From 747852773f0c8e02facb3be5fae624a445aa6eab Mon Sep 17 00:00:00 2001 From: Rae Sharp Date: Wed, 28 Jan 2026 18:41:15 -0500 Subject: [PATCH 01/11] Initial commit for a multi-siteconfig for Spaces which includes versioned Spaces docs --- docs/manuals/spaces/overview.md | 26 - .../apis/spaces-api/{latest.md => index.md} | 4 +- docs/reference/apis/spaces-api/v1_10.md | 49 -- docs/reference/apis/spaces-api/v1_11.md | 50 -- docs/reference/apis/spaces-api/v1_12.md | 75 -- docs/reference/apis/spaces-api/v1_13.md | 75 -- docs/reference/apis/spaces-api/v1_14.md | 72 -- docs/reference/apis/spaces-api/v1_9.md | 51 -- docusaurus.config.js | 27 +- .../concepts/_category_.json | 0 .../concepts/control-planes.md | 22 +- .../concepts/deployment-modes.md | 6 +- .../spaces => spaces-docs}/concepts/groups.md | 6 +- .../howtos/_category_.json | 0 .../howtos/api-connector.md | 8 +- .../howtos/auto-upgrade.md | 8 +- .../automation-and-gitops/_category_.json | 8 + .../howtos/automation-and-gitops/overview.md | 138 ++++ .../howtos/backup-and-restore.md | 25 +- .../howtos/cloud-spaces/_category_.json | 0 .../dedicated-spaces-deployment.md | 2 +- .../howtos/cloud-spaces/gitops-on-upbound.md | 14 +- .../howtos/control-plane-topologies.md | 6 + .../howtos/ctp-connector.md | 8 +- .../howtos/debugging-a-ctp.md | 8 +- .../howtos/managed-service.md | 2 +- .../howtos/mcp-connector-guide.md | 12 +- .../howtos/migrating-to-mcps.md | 10 +- .../howtos/observability.md | 29 +- .../howtos/query-api.md | 8 +- .../howtos/secrets-management.md | 6 + .../howtos/self-hosted/_category_.json | 0 .../howtos/self-hosted/administer-features.md | 10 +- .../howtos/self-hosted/attach-detach.md | 12 +- .../howtos/self-hosted/billing.md | 7 +- .../howtos/self-hosted/capacity-licensing.md | 6 +- .../howtos/self-hosted/certs.md | 0 .../howtos/self-hosted/configure-ha.md | 8 +- .../howtos/self-hosted/controllers.md | 0 .../howtos/self-hosted/ctp-audit-logs.md | 14 +- .../howtos/self-hosted/declarative-ctps.md | 6 + .../howtos/self-hosted/deployment-reqs.md | 0 .../howtos/self-hosted/dr.md | 11 +- .../howtos/self-hosted/gitops-with-argocd.md | 19 +- .../self-hosted/managed-spaces-deployment.md | 0 .../howtos/self-hosted/oidc-configuration.md | 8 +- .../howtos/self-hosted/proxies-config.md | 6 + .../howtos/self-hosted/query-api.md | 15 +- .../howtos/self-hosted/scaling-resources.md | 12 +- .../self-hosted-spaces-deployment.md | 0 .../howtos/self-hosted/space-observability.md | 13 +- .../howtos/self-hosted/spaces-management.md | 16 +- .../howtos/self-hosted/troubleshooting.md | 2 +- .../howtos/self-hosted/use-argo.md | 10 +- .../self-hosted/workload-id/_category_.json | 0 .../workload-id/backup-restore-config.md | 6 +- .../self-hosted/workload-id/billing-config.md | 6 +- .../self-hosted/workload-id/eso-config.md | 6 +- .../howtos/simulations.md | 10 +- spaces-docs/overview/_category_.json | 4 + spaces-docs/overview/index.md | 14 + spaces-docs/reference/_category_.json | 5 + spaces-docs/reference/index.md | 72 ++ .../version-v1.10/concepts/_category_.json | 7 + .../version-v1.10/concepts/control-planes.md | 227 ++++++ .../concepts/deployment-modes.md | 53 ++ .../version-v1.10/concepts/groups.md | 115 +++ .../version-v1.10/howtos/_category_.json | 7 + .../version-v1.10/howtos/api-connector.md | 413 ++++++++++ .../version-v1.10/howtos/auto-upgrade.md | 131 ++++ .../automation-and-gitops/_category_.json | 8 + .../howtos/automation-and-gitops/overview.md | 138 ++++ .../howtos/backup-and-restore.md | 530 +++++++++++++ .../howtos/cloud-spaces}/_category_.json | 4 +- .../dedicated-spaces-deployment.md | 33 + .../howtos/cloud-spaces/gitops-on-upbound.md | 318 ++++++++ .../howtos/control-plane-topologies.md | 566 ++++++++++++++ .../version-v1.10/howtos/ctp-connector.md | 508 +++++++++++++ .../version-v1.10/howtos/debugging-a-ctp.md | 128 ++++ .../version-v1.10/howtos/managed-service.md | 23 + .../howtos/mcp-connector-guide.md | 169 ++++ .../version-v1.10/howtos/migrating-to-mcps.md | 439 +++++++++++ .../version-v1.10/howtos/observability.md | 395 ++++++++++ .../version-v1.10/howtos/query-api.md | 320 ++++++++ .../howtos/secrets-management.md | 719 ++++++++++++++++++ .../howtos/self-hosted/_category_.json | 11 + .../howtos/self-hosted/administer-features.md | 121 +++ .../howtos/self-hosted/attach-detach.md | 198 +++++ .../howtos/self-hosted/billing.md | 307 ++++++++ .../howtos/self-hosted/capacity-licensing.md | 591 ++++++++++++++ .../version-v1.10/howtos/self-hosted/certs.md | 274 +++++++ .../howtos/self-hosted/configure-ha.md | 450 +++++++++++ .../howtos/self-hosted/controllers.md | 389 ++++++++++ .../howtos/self-hosted/ctp-audit-logs.md | 549 +++++++++++++ .../howtos/self-hosted/declarative-ctps.md | 110 +++ .../howtos/self-hosted/deployment-reqs.md | 353 +++++++++ .../version-v1.10/howtos/self-hosted/dr.md | 412 ++++++++++ .../howtos/self-hosted/gitops-with-argocd.md | 142 ++++ .../self-hosted/managed-spaces-deployment.md | 266 +++++++ .../howtos/self-hosted/oidc-configuration.md | 289 +++++++ .../howtos/self-hosted/proxies-config.md | 31 + .../howtos/self-hosted/query-api.md | 396 ++++++++++ .../howtos/self-hosted/scaling-resources.md | 184 +++++ .../self-hosted-spaces-deployment.md | 461 +++++++++++ .../howtos/self-hosted/space-observability.md | 313 ++++++++ .../howtos/self-hosted/spaces-management.md | 219 ++++++ .../howtos/self-hosted/troubleshooting.md | 132 ++++ .../howtos/self-hosted/use-argo.md | 228 ++++++ .../self-hosted/workload-id/_category_.json | 11 + .../workload-id/backup-restore-config.md | 384 ++++++++++ .../self-hosted/workload-id/billing-config.md | 454 +++++++++++ .../self-hosted/workload-id/eso-config.md | 503 ++++++++++++ .../version-v1.10/howtos/simulations.md | 110 +++ .../version-v1.10/overview/_category_.json | 4 + .../version-v1.10/overview/index.md | 14 + .../version-v1.10/reference/_category_.json | 5 + .../version-v1.10/reference/index.md | 72 ++ .../version-v1.11/concepts/_category_.json | 7 + .../version-v1.11/concepts/control-planes.md | 227 ++++++ .../concepts/deployment-modes.md | 53 ++ .../version-v1.11/concepts/groups.md | 115 +++ .../version-v1.11/howtos/_category_.json | 7 + .../version-v1.11/howtos/api-connector.md | 413 ++++++++++ .../version-v1.11/howtos/auto-upgrade.md | 131 ++++ .../automation-and-gitops/_category_.json | 8 + .../howtos/automation-and-gitops/overview.md | 138 ++++ .../howtos/backup-and-restore.md | 530 +++++++++++++ .../howtos/cloud-spaces/_category_.json | 10 + .../dedicated-spaces-deployment.md | 33 + .../howtos/cloud-spaces/gitops-on-upbound.md | 318 ++++++++ .../howtos/control-plane-topologies.md | 566 ++++++++++++++ .../version-v1.11/howtos/ctp-connector.md | 508 +++++++++++++ .../version-v1.11/howtos/debugging-a-ctp.md | 128 ++++ .../version-v1.11/howtos/managed-service.md | 23 + .../howtos/mcp-connector-guide.md | 169 ++++ .../version-v1.11/howtos/migrating-to-mcps.md | 439 +++++++++++ .../version-v1.11/howtos/observability.md | 395 ++++++++++ .../version-v1.11/howtos/query-api.md | 320 ++++++++ .../howtos/secrets-management.md | 719 ++++++++++++++++++ .../howtos/self-hosted/_category_.json | 11 + .../howtos/self-hosted/administer-features.md | 121 +++ .../howtos/self-hosted/attach-detach.md | 198 +++++ .../howtos/self-hosted/billing.md | 307 ++++++++ .../howtos/self-hosted/capacity-licensing.md | 591 ++++++++++++++ .../version-v1.11/howtos/self-hosted/certs.md | 274 +++++++ .../howtos/self-hosted/configure-ha.md | 450 +++++++++++ .../howtos/self-hosted/controllers.md | 389 ++++++++++ .../howtos/self-hosted/ctp-audit-logs.md | 549 +++++++++++++ .../howtos/self-hosted/declarative-ctps.md | 110 +++ .../howtos/self-hosted/deployment-reqs.md | 353 +++++++++ .../version-v1.11/howtos/self-hosted/dr.md | 412 ++++++++++ .../howtos/self-hosted/gitops-with-argocd.md | 142 ++++ .../self-hosted/managed-spaces-deployment.md | 266 +++++++ .../howtos/self-hosted/oidc-configuration.md | 289 +++++++ .../howtos/self-hosted/proxies-config.md | 31 + .../howtos/self-hosted/query-api.md | 396 ++++++++++ .../howtos/self-hosted/scaling-resources.md | 184 +++++ .../self-hosted-spaces-deployment.md | 461 +++++++++++ .../howtos/self-hosted/space-observability.md | 313 ++++++++ .../howtos/self-hosted/spaces-management.md | 219 ++++++ .../howtos/self-hosted/troubleshooting.md | 132 ++++ .../howtos/self-hosted/use-argo.md | 228 ++++++ .../self-hosted/workload-id/_category_.json | 11 + .../workload-id/backup-restore-config.md | 384 ++++++++++ .../self-hosted/workload-id/billing-config.md | 454 +++++++++++ .../self-hosted/workload-id/eso-config.md | 503 ++++++++++++ .../version-v1.11/howtos/simulations.md | 110 +++ .../version-v1.11/overview/_category_.json | 4 + .../version-v1.11/overview/index.md | 14 + .../version-v1.11/reference/_category_.json | 5 + .../version-v1.11/reference/index.md | 72 ++ .../version-v1.12/concepts/_category_.json | 7 + .../version-v1.12/concepts/control-planes.md | 227 ++++++ .../concepts/deployment-modes.md | 53 ++ .../version-v1.12/concepts/groups.md | 115 +++ .../version-v1.12/howtos/_category_.json | 7 + .../version-v1.12/howtos/api-connector.md | 413 ++++++++++ .../version-v1.12/howtos/auto-upgrade.md | 131 ++++ .../automation-and-gitops/_category_.json | 8 + .../howtos/automation-and-gitops/overview.md | 138 ++++ .../howtos/backup-and-restore.md | 530 +++++++++++++ .../howtos/cloud-spaces/_category_.json | 10 + .../dedicated-spaces-deployment.md | 33 + .../howtos/cloud-spaces/gitops-on-upbound.md | 318 ++++++++ .../howtos/control-plane-topologies.md | 566 ++++++++++++++ .../version-v1.12/howtos/ctp-connector.md | 508 +++++++++++++ .../version-v1.12/howtos/debugging-a-ctp.md | 128 ++++ .../version-v1.12/howtos/managed-service.md | 23 + .../howtos/mcp-connector-guide.md | 169 ++++ .../version-v1.12/howtos/migrating-to-mcps.md | 439 +++++++++++ .../version-v1.12/howtos/observability.md | 395 ++++++++++ .../version-v1.12/howtos/query-api.md | 320 ++++++++ .../howtos/secrets-management.md | 719 ++++++++++++++++++ .../howtos/self-hosted/_category_.json | 11 + .../howtos/self-hosted/administer-features.md | 121 +++ .../howtos/self-hosted/attach-detach.md | 198 +++++ .../howtos/self-hosted/billing.md | 307 ++++++++ .../howtos/self-hosted/capacity-licensing.md | 591 ++++++++++++++ .../version-v1.12/howtos/self-hosted/certs.md | 274 +++++++ .../howtos/self-hosted/configure-ha.md | 450 +++++++++++ .../howtos/self-hosted/controllers.md | 389 ++++++++++ .../howtos/self-hosted/ctp-audit-logs.md | 549 +++++++++++++ .../howtos/self-hosted/declarative-ctps.md | 110 +++ .../howtos/self-hosted/deployment-reqs.md | 353 +++++++++ .../version-v1.12/howtos/self-hosted/dr.md | 412 ++++++++++ .../howtos/self-hosted/gitops-with-argocd.md | 142 ++++ .../self-hosted/managed-spaces-deployment.md | 266 +++++++ .../howtos/self-hosted/oidc-configuration.md | 289 +++++++ .../howtos/self-hosted/proxies-config.md | 31 + .../howtos/self-hosted/query-api.md | 396 ++++++++++ .../howtos/self-hosted/scaling-resources.md | 184 +++++ .../self-hosted-spaces-deployment.md | 461 +++++++++++ .../howtos/self-hosted/space-observability.md | 313 ++++++++ .../howtos/self-hosted/spaces-management.md | 219 ++++++ .../howtos/self-hosted/troubleshooting.md | 132 ++++ .../howtos/self-hosted/use-argo.md | 228 ++++++ .../self-hosted/workload-id/_category_.json | 11 + .../workload-id/backup-restore-config.md | 384 ++++++++++ .../self-hosted/workload-id/billing-config.md | 454 +++++++++++ .../self-hosted/workload-id/eso-config.md | 503 ++++++++++++ .../version-v1.12/howtos/simulations.md | 110 +++ .../version-v1.12/overview/_category_.json | 4 + .../version-v1.12/overview/index.md | 14 + .../version-v1.12/reference/_category_.json | 5 + .../version-v1.12/reference/index.md | 72 ++ .../version-v1.13/concepts/_category_.json | 7 + .../version-v1.13/concepts/control-planes.md | 227 ++++++ .../concepts/deployment-modes.md | 53 ++ .../version-v1.13/concepts/groups.md | 115 +++ .../version-v1.13/howtos/_category_.json | 7 + .../version-v1.13/howtos/api-connector.md | 413 ++++++++++ .../version-v1.13/howtos/auto-upgrade.md | 131 ++++ .../automation-and-gitops/_category_.json | 8 + .../howtos/automation-and-gitops/overview.md | 138 ++++ .../howtos/backup-and-restore.md | 530 +++++++++++++ .../howtos/cloud-spaces/_category_.json | 10 + .../dedicated-spaces-deployment.md | 33 + .../howtos/cloud-spaces/gitops-on-upbound.md | 318 ++++++++ .../howtos/control-plane-topologies.md | 566 ++++++++++++++ .../version-v1.13/howtos/ctp-connector.md | 508 +++++++++++++ .../version-v1.13/howtos/debugging-a-ctp.md | 128 ++++ .../version-v1.13/howtos/managed-service.md | 23 + .../howtos/mcp-connector-guide.md | 169 ++++ .../version-v1.13/howtos/migrating-to-mcps.md | 439 +++++++++++ .../version-v1.13/howtos/observability.md | 395 ++++++++++ .../version-v1.13/howtos/query-api.md | 320 ++++++++ .../howtos/secrets-management.md | 719 ++++++++++++++++++ .../howtos/self-hosted/_category_.json | 11 + .../howtos/self-hosted/administer-features.md | 121 +++ .../howtos/self-hosted/attach-detach.md | 198 +++++ .../howtos/self-hosted/billing.md | 307 ++++++++ .../howtos/self-hosted/capacity-licensing.md | 591 ++++++++++++++ .../version-v1.13/howtos/self-hosted/certs.md | 274 +++++++ .../howtos/self-hosted/configure-ha.md | 450 +++++++++++ .../howtos/self-hosted/controllers.md | 389 ++++++++++ .../howtos/self-hosted/ctp-audit-logs.md | 549 +++++++++++++ .../howtos/self-hosted/declarative-ctps.md | 110 +++ .../howtos/self-hosted/deployment-reqs.md | 353 +++++++++ .../version-v1.13/howtos/self-hosted/dr.md | 412 ++++++++++ .../howtos/self-hosted/gitops-with-argocd.md | 142 ++++ .../self-hosted/managed-spaces-deployment.md | 266 +++++++ .../howtos/self-hosted/oidc-configuration.md | 289 +++++++ .../howtos/self-hosted/proxies-config.md | 31 + .../howtos/self-hosted/query-api.md | 396 ++++++++++ .../howtos/self-hosted/scaling-resources.md | 184 +++++ .../self-hosted-spaces-deployment.md | 461 +++++++++++ .../howtos/self-hosted/space-observability.md | 313 ++++++++ .../howtos/self-hosted/spaces-management.md | 219 ++++++ .../howtos/self-hosted/troubleshooting.md | 132 ++++ .../howtos/self-hosted/use-argo.md | 228 ++++++ .../self-hosted/workload-id/_category_.json | 11 + .../workload-id/backup-restore-config.md | 384 ++++++++++ .../self-hosted/workload-id/billing-config.md | 454 +++++++++++ .../self-hosted/workload-id/eso-config.md | 503 ++++++++++++ .../version-v1.13/howtos/simulations.md | 110 +++ .../version-v1.13/overview/_category_.json | 4 + .../version-v1.13/overview/index.md | 14 + .../version-v1.13/reference/_category_.json | 5 + .../version-v1.13/reference/index.md | 72 ++ .../version-v1.14/concepts/_category_.json | 7 + .../version-v1.14/concepts/control-planes.md | 227 ++++++ .../concepts/deployment-modes.md | 53 ++ .../version-v1.14/concepts/groups.md | 115 +++ .../version-v1.14/howtos/_category_.json | 7 + .../version-v1.14/howtos/api-connector.md | 413 ++++++++++ .../version-v1.14/howtos/auto-upgrade.md | 131 ++++ .../automation-and-gitops/_category_.json | 8 + .../howtos/automation-and-gitops/overview.md | 138 ++++ .../howtos/backup-and-restore.md | 530 +++++++++++++ .../howtos/cloud-spaces/_category_.json | 10 + .../dedicated-spaces-deployment.md | 33 + .../howtos/cloud-spaces/gitops-on-upbound.md | 318 ++++++++ .../howtos/control-plane-topologies.md | 566 ++++++++++++++ .../version-v1.14/howtos/ctp-connector.md | 508 +++++++++++++ .../version-v1.14/howtos/debugging-a-ctp.md | 128 ++++ .../version-v1.14/howtos/managed-service.md | 23 + .../howtos/mcp-connector-guide.md | 169 ++++ .../version-v1.14/howtos/migrating-to-mcps.md | 439 +++++++++++ .../version-v1.14/howtos/observability.md | 395 ++++++++++ .../version-v1.14/howtos/query-api.md | 320 ++++++++ .../howtos/secrets-management.md | 719 ++++++++++++++++++ .../howtos/self-hosted/_category_.json | 11 + .../howtos/self-hosted/administer-features.md | 121 +++ .../howtos/self-hosted/attach-detach.md | 198 +++++ .../howtos/self-hosted/billing.md | 307 ++++++++ .../howtos/self-hosted/capacity-licensing.md | 591 ++++++++++++++ .../version-v1.14/howtos/self-hosted/certs.md | 274 +++++++ .../howtos/self-hosted/configure-ha.md | 450 +++++++++++ .../howtos/self-hosted/controllers.md | 389 ++++++++++ .../howtos/self-hosted/ctp-audit-logs.md | 549 +++++++++++++ .../howtos/self-hosted/declarative-ctps.md | 110 +++ .../howtos/self-hosted/deployment-reqs.md | 353 +++++++++ .../version-v1.14/howtos/self-hosted/dr.md | 412 ++++++++++ .../howtos/self-hosted/gitops-with-argocd.md | 142 ++++ .../self-hosted/managed-spaces-deployment.md | 266 +++++++ .../howtos/self-hosted/oidc-configuration.md | 289 +++++++ .../howtos/self-hosted/proxies-config.md | 31 + .../howtos/self-hosted/query-api.md | 396 ++++++++++ .../howtos/self-hosted/scaling-resources.md | 184 +++++ .../self-hosted-spaces-deployment.md | 461 +++++++++++ .../howtos/self-hosted/space-observability.md | 313 ++++++++ .../howtos/self-hosted/spaces-management.md | 219 ++++++ .../howtos/self-hosted/troubleshooting.md | 132 ++++ .../howtos/self-hosted/use-argo.md | 228 ++++++ .../self-hosted/workload-id/_category_.json | 11 + .../workload-id/backup-restore-config.md | 384 ++++++++++ .../self-hosted/workload-id/billing-config.md | 454 +++++++++++ .../self-hosted/workload-id/eso-config.md | 503 ++++++++++++ .../version-v1.14/howtos/simulations.md | 110 +++ .../version-v1.14/overview/_category_.json | 4 + .../version-v1.14/overview/index.md | 14 + .../version-v1.14/reference/_category_.json | 5 + .../version-v1.14/reference/index.md | 72 ++ .../version-v1.15/concepts/_category_.json | 7 + .../version-v1.15/concepts/control-planes.md | 227 ++++++ .../concepts/deployment-modes.md | 53 ++ .../version-v1.15/concepts/groups.md | 115 +++ .../version-v1.15/howtos/_category_.json | 7 + .../version-v1.15/howtos/api-connector.md | 413 ++++++++++ .../version-v1.15/howtos/auto-upgrade.md | 131 ++++ .../automation-and-gitops/_category_.json | 8 + .../howtos/automation-and-gitops/overview.md | 138 ++++ .../howtos/backup-and-restore.md | 530 +++++++++++++ .../howtos/cloud-spaces/_category_.json | 10 + .../dedicated-spaces-deployment.md | 33 + .../howtos/cloud-spaces/gitops-on-upbound.md | 318 ++++++++ .../howtos/control-plane-topologies.md | 566 ++++++++++++++ .../version-v1.15/howtos/ctp-connector.md | 508 +++++++++++++ .../version-v1.15/howtos/debugging-a-ctp.md | 128 ++++ .../version-v1.15/howtos/managed-service.md | 23 + .../howtos/mcp-connector-guide.md | 169 ++++ .../version-v1.15/howtos/migrating-to-mcps.md | 439 +++++++++++ .../version-v1.15/howtos/observability.md | 395 ++++++++++ .../version-v1.15/howtos/query-api.md | 320 ++++++++ .../howtos/secrets-management.md | 719 ++++++++++++++++++ .../howtos/self-hosted/_category_.json | 11 + .../howtos/self-hosted/administer-features.md | 121 +++ .../howtos/self-hosted/attach-detach.md | 198 +++++ .../howtos/self-hosted/billing.md | 307 ++++++++ .../howtos/self-hosted/capacity-licensing.md | 591 ++++++++++++++ .../version-v1.15/howtos/self-hosted/certs.md | 274 +++++++ .../howtos/self-hosted/configure-ha.md | 450 +++++++++++ .../howtos/self-hosted/controllers.md | 389 ++++++++++ .../howtos/self-hosted/ctp-audit-logs.md | 549 +++++++++++++ .../howtos/self-hosted/declarative-ctps.md | 110 +++ .../howtos/self-hosted/deployment-reqs.md | 353 +++++++++ .../version-v1.15/howtos/self-hosted/dr.md | 412 ++++++++++ .../howtos/self-hosted/gitops-with-argocd.md | 142 ++++ .../self-hosted/managed-spaces-deployment.md | 266 +++++++ .../howtos/self-hosted/oidc-configuration.md | 289 +++++++ .../howtos/self-hosted/proxies-config.md | 31 + .../howtos/self-hosted/query-api.md | 396 ++++++++++ .../howtos/self-hosted/scaling-resources.md | 184 +++++ .../self-hosted-spaces-deployment.md | 461 +++++++++++ .../howtos/self-hosted/space-observability.md | 313 ++++++++ .../howtos/self-hosted/spaces-management.md | 219 ++++++ .../howtos/self-hosted/troubleshooting.md | 132 ++++ .../howtos/self-hosted/use-argo.md | 228 ++++++ .../self-hosted/workload-id/_category_.json | 11 + .../workload-id/backup-restore-config.md | 384 ++++++++++ .../self-hosted/workload-id/billing-config.md | 454 +++++++++++ .../self-hosted/workload-id/eso-config.md | 503 ++++++++++++ .../version-v1.15/howtos/simulations.md | 110 +++ .../version-v1.15/overview/_category_.json | 4 + .../version-v1.15/overview/index.md | 14 + .../version-v1.15/reference/_category_.json | 5 + .../version-v1.15/reference/index.md | 72 ++ .../version-v1.9/concepts/_category_.json | 7 + .../version-v1.9/concepts/control-planes.md | 227 ++++++ .../version-v1.9/concepts/deployment-modes.md | 53 ++ .../version-v1.9/concepts/groups.md | 115 +++ .../version-v1.9/howtos/_category_.json | 7 + .../version-v1.9/howtos/api-connector.md | 413 ++++++++++ .../version-v1.9/howtos/auto-upgrade.md | 131 ++++ .../automation-and-gitops/_category_.json | 8 + .../howtos/automation-and-gitops/overview.md | 138 ++++ .../version-v1.9/howtos/backup-and-restore.md | 530 +++++++++++++ .../howtos/cloud-spaces/_category_.json | 10 + .../dedicated-spaces-deployment.md | 33 + .../howtos/cloud-spaces/gitops-on-upbound.md | 318 ++++++++ .../howtos/control-plane-topologies.md | 566 ++++++++++++++ .../version-v1.9/howtos/ctp-connector.md | 508 +++++++++++++ .../version-v1.9/howtos/debugging-a-ctp.md | 128 ++++ .../version-v1.9/howtos/managed-service.md | 23 + .../howtos/mcp-connector-guide.md | 169 ++++ .../version-v1.9/howtos/migrating-to-mcps.md | 439 +++++++++++ .../version-v1.9/howtos/observability.md | 395 ++++++++++ .../version-v1.9/howtos/query-api.md | 320 ++++++++ .../version-v1.9/howtos/secrets-management.md | 719 ++++++++++++++++++ .../howtos/self-hosted/_category_.json | 11 + .../howtos/self-hosted/administer-features.md | 121 +++ .../howtos/self-hosted/attach-detach.md | 198 +++++ .../howtos/self-hosted/billing.md | 307 ++++++++ .../howtos/self-hosted/capacity-licensing.md | 591 ++++++++++++++ .../version-v1.9/howtos/self-hosted/certs.md | 274 +++++++ .../howtos/self-hosted/configure-ha.md | 450 +++++++++++ .../howtos/self-hosted/controllers.md | 389 ++++++++++ .../howtos/self-hosted/ctp-audit-logs.md | 549 +++++++++++++ .../howtos/self-hosted/declarative-ctps.md | 110 +++ .../howtos/self-hosted/deployment-reqs.md | 353 +++++++++ .../version-v1.9/howtos/self-hosted/dr.md | 412 ++++++++++ .../howtos/self-hosted/gitops-with-argocd.md | 142 ++++ .../self-hosted/managed-spaces-deployment.md | 266 +++++++ .../howtos/self-hosted/oidc-configuration.md | 289 +++++++ .../howtos/self-hosted/proxies-config.md | 31 + .../howtos/self-hosted/query-api.md | 396 ++++++++++ .../howtos/self-hosted/scaling-resources.md | 184 +++++ .../self-hosted-spaces-deployment.md | 461 +++++++++++ .../howtos/self-hosted/space-observability.md | 313 ++++++++ .../howtos/self-hosted/spaces-management.md | 219 ++++++ .../howtos/self-hosted/troubleshooting.md | 132 ++++ .../howtos/self-hosted/use-argo.md | 228 ++++++ .../self-hosted/workload-id/_category_.json | 11 + .../workload-id/backup-restore-config.md | 384 ++++++++++ .../self-hosted/workload-id/billing-config.md | 454 +++++++++++ .../self-hosted/workload-id/eso-config.md | 503 ++++++++++++ .../version-v1.9/howtos/simulations.md | 110 +++ .../version-v1.9/overview/_category_.json | 4 + .../version-v1.9/overview/index.md | 14 + .../version-v1.9/reference/_category_.json | 5 + .../version-v1.9/reference/index.md | 72 ++ .../version-v1.10-sidebars.json | 91 +++ .../version-v1.11-sidebars.json | 91 +++ .../version-v1.12-sidebars.json | 92 +++ .../version-v1.13-sidebars.json | 93 +++ .../version-v1.14-sidebars.json | 96 +++ .../version-v1.15-sidebars.json | 96 +++ .../version-v1.9-sidebars.json | 91 +++ spaces_versions.json | 1 + src/sidebars/spaces.js | 96 +++ src/theme/DocItem/Layout/index.js | 72 ++ src/theme/DocItem/Layout/layout.module.css | 57 ++ versions.json | 1 + 453 files changed, 94106 insertions(+), 498 deletions(-) delete mode 100644 docs/manuals/spaces/overview.md rename docs/reference/apis/spaces-api/{latest.md => index.md} (95%) delete mode 100644 docs/reference/apis/spaces-api/v1_10.md delete mode 100644 docs/reference/apis/spaces-api/v1_11.md delete mode 100644 docs/reference/apis/spaces-api/v1_12.md delete mode 100644 docs/reference/apis/spaces-api/v1_13.md delete mode 100644 docs/reference/apis/spaces-api/v1_14.md delete mode 100644 docs/reference/apis/spaces-api/v1_9.md rename {docs/manuals/spaces => spaces-docs}/concepts/_category_.json (100%) rename {docs/manuals/spaces => spaces-docs}/concepts/control-planes.md (92%) rename {docs/manuals/spaces => spaces-docs}/concepts/deployment-modes.md (89%) rename {docs/manuals/spaces => spaces-docs}/concepts/groups.md (96%) rename {docs/manuals/spaces => spaces-docs}/howtos/_category_.json (100%) rename {docs/manuals/spaces => spaces-docs}/howtos/api-connector.md (97%) rename {docs/manuals/spaces => spaces-docs}/howtos/auto-upgrade.md (88%) create mode 100644 spaces-docs/howtos/automation-and-gitops/_category_.json create mode 100644 spaces-docs/howtos/automation-and-gitops/overview.md rename {docs/manuals/spaces => spaces-docs}/howtos/backup-and-restore.md (95%) rename {docs/manuals/spaces => spaces-docs}/howtos/cloud-spaces/_category_.json (100%) rename {docs/manuals/spaces => spaces-docs}/howtos/cloud-spaces/dedicated-spaces-deployment.md (93%) rename docs/manuals/spaces/howtos/cloud-spaces/gitops.md => spaces-docs/howtos/cloud-spaces/gitops-on-upbound.md (96%) rename {docs/manuals/spaces => spaces-docs}/howtos/control-plane-topologies.md (98%) rename {docs/manuals/spaces => spaces-docs}/howtos/ctp-connector.md (97%) rename {docs/manuals/spaces => spaces-docs}/howtos/debugging-a-ctp.md (92%) rename {docs/manuals/spaces => spaces-docs}/howtos/managed-service.md (94%) rename {docs/manuals/spaces => spaces-docs}/howtos/mcp-connector-guide.md (91%) rename {docs/manuals/spaces => spaces-docs}/howtos/migrating-to-mcps.md (97%) rename {docs/manuals/spaces => spaces-docs}/howtos/observability.md (88%) rename {docs/manuals/spaces => spaces-docs}/howtos/query-api.md (97%) rename {docs/manuals/spaces => spaces-docs}/howtos/secrets-management.md (98%) rename {docs/manuals/spaces => spaces-docs}/howtos/self-hosted/_category_.json (100%) rename {docs/manuals/spaces => spaces-docs}/howtos/self-hosted/administer-features.md (90%) rename {docs/manuals/spaces => spaces-docs}/howtos/self-hosted/attach-detach.md (93%) rename {docs/manuals/spaces => spaces-docs}/howtos/self-hosted/billing.md (93%) rename {docs/manuals/spaces => spaces-docs}/howtos/self-hosted/capacity-licensing.md (99%) rename {docs/manuals/spaces => spaces-docs}/howtos/self-hosted/certs.md (100%) rename {docs/manuals/spaces => spaces-docs}/howtos/self-hosted/configure-ha.md (97%) rename {docs/manuals/spaces => spaces-docs}/howtos/self-hosted/controllers.md (100%) rename {docs/manuals/spaces => spaces-docs}/howtos/self-hosted/ctp-audit-logs.md (96%) rename {docs/manuals/spaces => spaces-docs}/howtos/self-hosted/declarative-ctps.md (91%) rename {docs/manuals/spaces => spaces-docs}/howtos/self-hosted/deployment-reqs.md (100%) rename {docs/manuals/spaces => spaces-docs}/howtos/self-hosted/dr.md (96%) rename docs/manuals/spaces/howtos/self-hosted/gitops.md => spaces-docs/howtos/self-hosted/gitops-with-argocd.md (88%) rename {docs/manuals/spaces => spaces-docs}/howtos/self-hosted/managed-spaces-deployment.md (100%) rename {docs/manuals/spaces => spaces-docs}/howtos/self-hosted/oidc-configuration.md (96%) rename {docs/manuals/spaces => spaces-docs}/howtos/self-hosted/proxies-config.md (75%) rename {docs/manuals/spaces => spaces-docs}/howtos/self-hosted/query-api.md (97%) rename {docs/manuals/spaces => spaces-docs}/howtos/self-hosted/scaling-resources.md (92%) rename {docs/manuals/spaces => spaces-docs}/howtos/self-hosted/self-hosted-spaces-deployment.md (100%) rename {docs/manuals/spaces => spaces-docs}/howtos/self-hosted/space-observability.md (94%) rename {docs/manuals/spaces => spaces-docs}/howtos/self-hosted/spaces-management.md (91%) rename {docs/manuals/spaces => spaces-docs}/howtos/self-hosted/troubleshooting.md (99%) rename {docs/manuals/spaces => spaces-docs}/howtos/self-hosted/use-argo.md (95%) rename {docs/manuals/spaces => spaces-docs}/howtos/self-hosted/workload-id/_category_.json (100%) rename {docs/manuals/spaces => spaces-docs}/howtos/self-hosted/workload-id/backup-restore-config.md (98%) rename {docs/manuals/spaces => spaces-docs}/howtos/self-hosted/workload-id/billing-config.md (98%) rename {docs/manuals/spaces => spaces-docs}/howtos/self-hosted/workload-id/eso-config.md (98%) rename {docs/manuals/spaces => spaces-docs}/howtos/simulations.md (88%) create mode 100644 spaces-docs/overview/_category_.json create mode 100644 spaces-docs/overview/index.md create mode 100644 spaces-docs/reference/_category_.json create mode 100644 spaces-docs/reference/index.md create mode 100644 spaces_versioned_docs/version-v1.10/concepts/_category_.json create mode 100644 spaces_versioned_docs/version-v1.10/concepts/control-planes.md create mode 100644 spaces_versioned_docs/version-v1.10/concepts/deployment-modes.md create mode 100644 spaces_versioned_docs/version-v1.10/concepts/groups.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/_category_.json create mode 100644 spaces_versioned_docs/version-v1.10/howtos/api-connector.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/auto-upgrade.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/automation-and-gitops/_category_.json create mode 100644 spaces_versioned_docs/version-v1.10/howtos/automation-and-gitops/overview.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/backup-and-restore.md rename {docs/manuals/spaces => spaces_versioned_docs/version-v1.10/howtos/cloud-spaces}/_category_.json (63%) create mode 100644 spaces_versioned_docs/version-v1.10/howtos/cloud-spaces/dedicated-spaces-deployment.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/cloud-spaces/gitops-on-upbound.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/control-plane-topologies.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/ctp-connector.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/debugging-a-ctp.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/managed-service.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/mcp-connector-guide.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/migrating-to-mcps.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/observability.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/query-api.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/secrets-management.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/self-hosted/_category_.json create mode 100644 spaces_versioned_docs/version-v1.10/howtos/self-hosted/administer-features.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/self-hosted/attach-detach.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/self-hosted/billing.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/self-hosted/capacity-licensing.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/self-hosted/certs.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/self-hosted/configure-ha.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/self-hosted/controllers.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/self-hosted/ctp-audit-logs.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/self-hosted/declarative-ctps.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/self-hosted/deployment-reqs.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/self-hosted/dr.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/self-hosted/gitops-with-argocd.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/self-hosted/managed-spaces-deployment.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/self-hosted/oidc-configuration.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/self-hosted/proxies-config.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/self-hosted/query-api.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/self-hosted/scaling-resources.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/self-hosted/self-hosted-spaces-deployment.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/self-hosted/space-observability.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/self-hosted/spaces-management.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/self-hosted/troubleshooting.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/self-hosted/use-argo.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/self-hosted/workload-id/_category_.json create mode 100644 spaces_versioned_docs/version-v1.10/howtos/self-hosted/workload-id/backup-restore-config.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/self-hosted/workload-id/billing-config.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/self-hosted/workload-id/eso-config.md create mode 100644 spaces_versioned_docs/version-v1.10/howtos/simulations.md create mode 100644 spaces_versioned_docs/version-v1.10/overview/_category_.json create mode 100644 spaces_versioned_docs/version-v1.10/overview/index.md create mode 100644 spaces_versioned_docs/version-v1.10/reference/_category_.json create mode 100644 spaces_versioned_docs/version-v1.10/reference/index.md create mode 100644 spaces_versioned_docs/version-v1.11/concepts/_category_.json create mode 100644 spaces_versioned_docs/version-v1.11/concepts/control-planes.md create mode 100644 spaces_versioned_docs/version-v1.11/concepts/deployment-modes.md create mode 100644 spaces_versioned_docs/version-v1.11/concepts/groups.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/_category_.json create mode 100644 spaces_versioned_docs/version-v1.11/howtos/api-connector.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/auto-upgrade.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/automation-and-gitops/_category_.json create mode 100644 spaces_versioned_docs/version-v1.11/howtos/automation-and-gitops/overview.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/backup-and-restore.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/cloud-spaces/_category_.json create mode 100644 spaces_versioned_docs/version-v1.11/howtos/cloud-spaces/dedicated-spaces-deployment.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/cloud-spaces/gitops-on-upbound.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/control-plane-topologies.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/ctp-connector.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/debugging-a-ctp.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/managed-service.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/mcp-connector-guide.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/migrating-to-mcps.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/observability.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/query-api.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/secrets-management.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/self-hosted/_category_.json create mode 100644 spaces_versioned_docs/version-v1.11/howtos/self-hosted/administer-features.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/self-hosted/attach-detach.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/self-hosted/billing.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/self-hosted/capacity-licensing.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/self-hosted/certs.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/self-hosted/configure-ha.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/self-hosted/controllers.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/self-hosted/ctp-audit-logs.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/self-hosted/declarative-ctps.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/self-hosted/deployment-reqs.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/self-hosted/dr.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/self-hosted/gitops-with-argocd.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/self-hosted/managed-spaces-deployment.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/self-hosted/oidc-configuration.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/self-hosted/proxies-config.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/self-hosted/query-api.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/self-hosted/scaling-resources.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/self-hosted/self-hosted-spaces-deployment.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/self-hosted/space-observability.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/self-hosted/spaces-management.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/self-hosted/troubleshooting.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/self-hosted/use-argo.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/self-hosted/workload-id/_category_.json create mode 100644 spaces_versioned_docs/version-v1.11/howtos/self-hosted/workload-id/backup-restore-config.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/self-hosted/workload-id/billing-config.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/self-hosted/workload-id/eso-config.md create mode 100644 spaces_versioned_docs/version-v1.11/howtos/simulations.md create mode 100644 spaces_versioned_docs/version-v1.11/overview/_category_.json create mode 100644 spaces_versioned_docs/version-v1.11/overview/index.md create mode 100644 spaces_versioned_docs/version-v1.11/reference/_category_.json create mode 100644 spaces_versioned_docs/version-v1.11/reference/index.md create mode 100644 spaces_versioned_docs/version-v1.12/concepts/_category_.json create mode 100644 spaces_versioned_docs/version-v1.12/concepts/control-planes.md create mode 100644 spaces_versioned_docs/version-v1.12/concepts/deployment-modes.md create mode 100644 spaces_versioned_docs/version-v1.12/concepts/groups.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/_category_.json create mode 100644 spaces_versioned_docs/version-v1.12/howtos/api-connector.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/auto-upgrade.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/automation-and-gitops/_category_.json create mode 100644 spaces_versioned_docs/version-v1.12/howtos/automation-and-gitops/overview.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/backup-and-restore.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/cloud-spaces/_category_.json create mode 100644 spaces_versioned_docs/version-v1.12/howtos/cloud-spaces/dedicated-spaces-deployment.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/cloud-spaces/gitops-on-upbound.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/control-plane-topologies.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/ctp-connector.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/debugging-a-ctp.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/managed-service.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/mcp-connector-guide.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/migrating-to-mcps.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/observability.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/query-api.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/secrets-management.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/self-hosted/_category_.json create mode 100644 spaces_versioned_docs/version-v1.12/howtos/self-hosted/administer-features.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/self-hosted/attach-detach.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/self-hosted/billing.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/self-hosted/capacity-licensing.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/self-hosted/certs.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/self-hosted/configure-ha.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/self-hosted/controllers.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/self-hosted/ctp-audit-logs.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/self-hosted/declarative-ctps.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/self-hosted/deployment-reqs.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/self-hosted/dr.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/self-hosted/gitops-with-argocd.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/self-hosted/managed-spaces-deployment.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/self-hosted/oidc-configuration.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/self-hosted/proxies-config.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/self-hosted/query-api.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/self-hosted/scaling-resources.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/self-hosted/self-hosted-spaces-deployment.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/self-hosted/space-observability.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/self-hosted/spaces-management.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/self-hosted/troubleshooting.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/self-hosted/use-argo.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/self-hosted/workload-id/_category_.json create mode 100644 spaces_versioned_docs/version-v1.12/howtos/self-hosted/workload-id/backup-restore-config.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/self-hosted/workload-id/billing-config.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/self-hosted/workload-id/eso-config.md create mode 100644 spaces_versioned_docs/version-v1.12/howtos/simulations.md create mode 100644 spaces_versioned_docs/version-v1.12/overview/_category_.json create mode 100644 spaces_versioned_docs/version-v1.12/overview/index.md create mode 100644 spaces_versioned_docs/version-v1.12/reference/_category_.json create mode 100644 spaces_versioned_docs/version-v1.12/reference/index.md create mode 100644 spaces_versioned_docs/version-v1.13/concepts/_category_.json create mode 100644 spaces_versioned_docs/version-v1.13/concepts/control-planes.md create mode 100644 spaces_versioned_docs/version-v1.13/concepts/deployment-modes.md create mode 100644 spaces_versioned_docs/version-v1.13/concepts/groups.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/_category_.json create mode 100644 spaces_versioned_docs/version-v1.13/howtos/api-connector.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/auto-upgrade.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/automation-and-gitops/_category_.json create mode 100644 spaces_versioned_docs/version-v1.13/howtos/automation-and-gitops/overview.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/backup-and-restore.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/cloud-spaces/_category_.json create mode 100644 spaces_versioned_docs/version-v1.13/howtos/cloud-spaces/dedicated-spaces-deployment.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/cloud-spaces/gitops-on-upbound.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/control-plane-topologies.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/ctp-connector.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/debugging-a-ctp.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/managed-service.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/mcp-connector-guide.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/migrating-to-mcps.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/observability.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/query-api.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/secrets-management.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/_category_.json create mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/administer-features.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/attach-detach.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/billing.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/capacity-licensing.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/certs.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/configure-ha.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/controllers.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/ctp-audit-logs.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/declarative-ctps.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/deployment-reqs.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/dr.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/gitops-with-argocd.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/managed-spaces-deployment.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/oidc-configuration.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/proxies-config.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/query-api.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/scaling-resources.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/self-hosted-spaces-deployment.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/space-observability.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/spaces-management.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/troubleshooting.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/use-argo.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/workload-id/_category_.json create mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/workload-id/backup-restore-config.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/workload-id/billing-config.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/workload-id/eso-config.md create mode 100644 spaces_versioned_docs/version-v1.13/howtos/simulations.md create mode 100644 spaces_versioned_docs/version-v1.13/overview/_category_.json create mode 100644 spaces_versioned_docs/version-v1.13/overview/index.md create mode 100644 spaces_versioned_docs/version-v1.13/reference/_category_.json create mode 100644 spaces_versioned_docs/version-v1.13/reference/index.md create mode 100644 spaces_versioned_docs/version-v1.14/concepts/_category_.json create mode 100644 spaces_versioned_docs/version-v1.14/concepts/control-planes.md create mode 100644 spaces_versioned_docs/version-v1.14/concepts/deployment-modes.md create mode 100644 spaces_versioned_docs/version-v1.14/concepts/groups.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/_category_.json create mode 100644 spaces_versioned_docs/version-v1.14/howtos/api-connector.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/auto-upgrade.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/automation-and-gitops/_category_.json create mode 100644 spaces_versioned_docs/version-v1.14/howtos/automation-and-gitops/overview.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/backup-and-restore.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/cloud-spaces/_category_.json create mode 100644 spaces_versioned_docs/version-v1.14/howtos/cloud-spaces/dedicated-spaces-deployment.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/cloud-spaces/gitops-on-upbound.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/control-plane-topologies.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/ctp-connector.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/debugging-a-ctp.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/managed-service.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/mcp-connector-guide.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/migrating-to-mcps.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/observability.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/query-api.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/secrets-management.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/_category_.json create mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/administer-features.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/attach-detach.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/billing.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/capacity-licensing.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/certs.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/configure-ha.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/controllers.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/ctp-audit-logs.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/declarative-ctps.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/deployment-reqs.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/dr.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/gitops-with-argocd.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/managed-spaces-deployment.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/oidc-configuration.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/proxies-config.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/query-api.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/scaling-resources.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/self-hosted-spaces-deployment.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/space-observability.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/spaces-management.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/troubleshooting.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/use-argo.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/workload-id/_category_.json create mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/workload-id/backup-restore-config.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/workload-id/billing-config.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/workload-id/eso-config.md create mode 100644 spaces_versioned_docs/version-v1.14/howtos/simulations.md create mode 100644 spaces_versioned_docs/version-v1.14/overview/_category_.json create mode 100644 spaces_versioned_docs/version-v1.14/overview/index.md create mode 100644 spaces_versioned_docs/version-v1.14/reference/_category_.json create mode 100644 spaces_versioned_docs/version-v1.14/reference/index.md create mode 100644 spaces_versioned_docs/version-v1.15/concepts/_category_.json create mode 100644 spaces_versioned_docs/version-v1.15/concepts/control-planes.md create mode 100644 spaces_versioned_docs/version-v1.15/concepts/deployment-modes.md create mode 100644 spaces_versioned_docs/version-v1.15/concepts/groups.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/_category_.json create mode 100644 spaces_versioned_docs/version-v1.15/howtos/api-connector.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/auto-upgrade.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/automation-and-gitops/_category_.json create mode 100644 spaces_versioned_docs/version-v1.15/howtos/automation-and-gitops/overview.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/backup-and-restore.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/cloud-spaces/_category_.json create mode 100644 spaces_versioned_docs/version-v1.15/howtos/cloud-spaces/dedicated-spaces-deployment.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/cloud-spaces/gitops-on-upbound.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/control-plane-topologies.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/ctp-connector.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/debugging-a-ctp.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/managed-service.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/mcp-connector-guide.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/migrating-to-mcps.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/observability.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/query-api.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/secrets-management.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/_category_.json create mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/administer-features.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/attach-detach.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/billing.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/capacity-licensing.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/certs.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/configure-ha.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/controllers.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/ctp-audit-logs.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/declarative-ctps.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/deployment-reqs.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/dr.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/gitops-with-argocd.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/managed-spaces-deployment.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/oidc-configuration.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/proxies-config.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/query-api.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/scaling-resources.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/self-hosted-spaces-deployment.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/space-observability.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/spaces-management.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/troubleshooting.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/use-argo.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/workload-id/_category_.json create mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/workload-id/backup-restore-config.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/workload-id/billing-config.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/workload-id/eso-config.md create mode 100644 spaces_versioned_docs/version-v1.15/howtos/simulations.md create mode 100644 spaces_versioned_docs/version-v1.15/overview/_category_.json create mode 100644 spaces_versioned_docs/version-v1.15/overview/index.md create mode 100644 spaces_versioned_docs/version-v1.15/reference/_category_.json create mode 100644 spaces_versioned_docs/version-v1.15/reference/index.md create mode 100644 spaces_versioned_docs/version-v1.9/concepts/_category_.json create mode 100644 spaces_versioned_docs/version-v1.9/concepts/control-planes.md create mode 100644 spaces_versioned_docs/version-v1.9/concepts/deployment-modes.md create mode 100644 spaces_versioned_docs/version-v1.9/concepts/groups.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/_category_.json create mode 100644 spaces_versioned_docs/version-v1.9/howtos/api-connector.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/auto-upgrade.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/automation-and-gitops/_category_.json create mode 100644 spaces_versioned_docs/version-v1.9/howtos/automation-and-gitops/overview.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/backup-and-restore.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/cloud-spaces/_category_.json create mode 100644 spaces_versioned_docs/version-v1.9/howtos/cloud-spaces/dedicated-spaces-deployment.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/cloud-spaces/gitops-on-upbound.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/control-plane-topologies.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/ctp-connector.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/debugging-a-ctp.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/managed-service.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/mcp-connector-guide.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/migrating-to-mcps.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/observability.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/query-api.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/secrets-management.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/_category_.json create mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/administer-features.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/attach-detach.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/billing.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/capacity-licensing.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/certs.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/configure-ha.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/controllers.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/ctp-audit-logs.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/declarative-ctps.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/deployment-reqs.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/dr.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/gitops-with-argocd.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/managed-spaces-deployment.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/oidc-configuration.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/proxies-config.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/query-api.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/scaling-resources.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/self-hosted-spaces-deployment.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/space-observability.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/spaces-management.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/troubleshooting.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/use-argo.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/workload-id/_category_.json create mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/workload-id/backup-restore-config.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/workload-id/billing-config.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/workload-id/eso-config.md create mode 100644 spaces_versioned_docs/version-v1.9/howtos/simulations.md create mode 100644 spaces_versioned_docs/version-v1.9/overview/_category_.json create mode 100644 spaces_versioned_docs/version-v1.9/overview/index.md create mode 100644 spaces_versioned_docs/version-v1.9/reference/_category_.json create mode 100644 spaces_versioned_docs/version-v1.9/reference/index.md create mode 100644 spaces_versioned_sidebars/version-v1.10-sidebars.json create mode 100644 spaces_versioned_sidebars/version-v1.11-sidebars.json create mode 100644 spaces_versioned_sidebars/version-v1.12-sidebars.json create mode 100644 spaces_versioned_sidebars/version-v1.13-sidebars.json create mode 100644 spaces_versioned_sidebars/version-v1.14-sidebars.json create mode 100644 spaces_versioned_sidebars/version-v1.15-sidebars.json create mode 100644 spaces_versioned_sidebars/version-v1.9-sidebars.json create mode 100644 spaces_versions.json create mode 100644 src/sidebars/spaces.js create mode 100644 src/theme/DocItem/Layout/index.js create mode 100644 src/theme/DocItem/Layout/layout.module.css create mode 100644 versions.json diff --git a/docs/manuals/spaces/overview.md b/docs/manuals/spaces/overview.md deleted file mode 100644 index 741031d0e..000000000 --- a/docs/manuals/spaces/overview.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: Spaces Overview -sidebar_label: Overview -hide_title: true -sidebar_position: 1 -description: Spaces are a host environment in Upbound for control planes-as-a-service ---- -# Overview - - - -Upbound Spaces are a core component of the Upbound platform. A _Space_ is a hosting environment for Upbound control planes-as-a-service. Spaces delivers a managed experience for deploying, operating, and scaling control planes powered by [Upbound Crossplane][uxp]. - -Spaces offers [supporting features][features] to help you operate control planes at scale. - -You can choose to run in a Cloud Space, which offers a true SaaS experience, or deploy your own Self-hosted Space. **These options are available for users on a Standard, Enterprise, or Business Critical plan.** - -For more information on Spaces offerings, review the [deployment modes][deployment-modes]. - -[uxp]: /manuals/uxp/overview -[deployment-modes]: /manuals/spaces/concepts/deployment-modes -[cloud-spaces]: /manuals/spaces/overview -[self-hosted-space]: /manuals/spaces/howtos/self-hosted/dr -[features]: /manuals/spaces/howtos/managed-service -[cloud-space-guide]: /manuals/spaces/overview -[self-hosted-space-guide]: /manuals/spaces/howtos/self-hosted/dr diff --git a/docs/reference/apis/spaces-api/latest.md b/docs/reference/apis/spaces-api/index.md similarity index 95% rename from docs/reference/apis/spaces-api/latest.md rename to docs/reference/apis/spaces-api/index.md index 7c5163d29..5e68b0768 100644 --- a/docs/reference/apis/spaces-api/latest.md +++ b/docs/reference/apis/spaces-api/index.md @@ -1,6 +1,6 @@ --- -title: Latest - v1.15 -description: Documentation for the Spaces API resources +title: Spaces API Reference +description: Documentation for the Spaces API resources (v1.15 - Latest) sidebar_position: 1 --- import CrdDocViewer from '@site/src/components/CrdViewer'; diff --git a/docs/reference/apis/spaces-api/v1_10.md b/docs/reference/apis/spaces-api/v1_10.md deleted file mode 100644 index c08d4b329..000000000 --- a/docs/reference/apis/spaces-api/v1_10.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: v1.10 -description: Documentation for the Spaces API resources -sidebar_position: 6 ---- -import CrdDocViewer from '@site/src/components/CrdViewer'; - -This page documents the Custom Resource Definitions (CRDs) for the Spaces API. - -## Control Planes -### Control Planes - - -### Control Plane Overrides - - -## Observability -### Shared Telemetry Configs - - -## Policy -### Shared Upbound Policies - - -## Secrets -### Shared External Secrets - - -### Shared Secret Stores - - -## Simulations - - -## Spaces Backups -### Backups - - -### Backup Schedules - - -### Shared Backup Configs - - -### Shared Backups - - -### Shared Backup Schedules - diff --git a/docs/reference/apis/spaces-api/v1_11.md b/docs/reference/apis/spaces-api/v1_11.md deleted file mode 100644 index c0ac509ba..000000000 --- a/docs/reference/apis/spaces-api/v1_11.md +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: v1.11 -description: Documentation for the Spaces API resources -sidebar_position: 5 ---- -import CrdDocViewer from '@site/src/components/CrdViewer'; - -This page documents the Custom Resource Definitions (CRDs) for the Spaces API. - -# Spaces API Reference – v1.11 -## Control Planes -### Control Planes - - -### Control Plane Overrides - - -## Observability -### Shared Telemetry Configs - - -## Policy -### Shared Upbound Policies - - -## Secrets -### Shared External Secrets - - -### Shared Secret Stores - - -## Simulations - - -## Spaces Backups -### Backups - - -### Backup Schedules - - -### Shared Backup Configs - - -### Shared Backups - - -### Shared Backup Schedules - diff --git a/docs/reference/apis/spaces-api/v1_12.md b/docs/reference/apis/spaces-api/v1_12.md deleted file mode 100644 index a270c2f3b..000000000 --- a/docs/reference/apis/spaces-api/v1_12.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -title: v1.12 -description: Documentation for the Spaces API resources -sidebar_position: 4 ---- -import CrdDocViewer from '@site/src/components/CrdViewer'; - - -This page documents the Custom Resource Definitions (CRDs) for the Spaces API. - - -## Control Planes -### Control Planes - - -### Control Plane Overrides - - -## Observability -### Shared Telemetry Configs - - -## `pkg` -### Controller Revisions - - -### Controller Runtime Configs - - -### Controllers - - -### Remote Configuration Revisions - - -### Remote Configurations - - -## Policy -### Shared Upbound Policies - - -## References -### Referenced Objects - - -## Scheduling -### Environments - - -## Secrets -### Shared External Secrets - - -### Shared Secret Stores - - -## Simulations - - -## Spaces Backups -### Backups - - -### Backup Schedules - - -### Shared Backup Configs - - -### Shared Backups - - -### Shared Backup Schedules - diff --git a/docs/reference/apis/spaces-api/v1_13.md b/docs/reference/apis/spaces-api/v1_13.md deleted file mode 100644 index 5bc2ab5eb..000000000 --- a/docs/reference/apis/spaces-api/v1_13.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -title: v1.13 -description: Documentation for the Spaces API resources -sidebar_position: 3 ---- -import CrdDocViewer from '@site/src/components/CrdViewer'; - - -This page documents the Custom Resource Definitions (CRDs) for the Spaces API. - - -## Control Planes -### Control Planes - - -### Control Plane Overrides - - -## Observability -### Shared Telemetry Configs - - -## `pkg` -### Controller Revisions - - -### Controller Runtime Configs - - -### Controllers - - -### Remote Configuration Revisions - - -### Remote Configurations - - -## Policy -### Shared Upbound Policies - - -## References -### Referenced Objects - - -## Scheduling -### Environments - - -## Secrets -### Shared External Secrets - - -### Shared Secret Stores - - -## Simulations - - -## Spaces Backups -### Backups - - -### Backup Schedules - - -### Shared Backup Configs - - -### Shared Backups - - -### Shared Backup Schedules - diff --git a/docs/reference/apis/spaces-api/v1_14.md b/docs/reference/apis/spaces-api/v1_14.md deleted file mode 100644 index 17da14e45..000000000 --- a/docs/reference/apis/spaces-api/v1_14.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: v1.14 -description: Documentation for the Spaces API resources -sidebar_position: 2 ---- -import CrdDocViewer from '@site/src/components/CrdViewer'; - - -This page documents the Custom Resource Definitions (CRDs) for the Spaces API. - - -## Control Planes -### Control Planes - - -## Observability -### Shared Telemetry Configs - - -## `pkg` -### Controller Revisions - - -### Controller Runtime Configs - - -### Controllers - - -### Remote Configuration Revisions - - -### Remote Configurations - - -## Policy -### Shared Upbound Policies - - -## References -### Referenced Objects - - -## Scheduling -### Environments - - -## Secrets -### Shared External Secrets - - -### Shared Secret Stores - - -## Simulations - - -## Spaces Backups -### Backups - - -### Backup Schedules - - -### Shared Backup Configs - - -### Shared Backups - - -### Shared Backup Schedules - diff --git a/docs/reference/apis/spaces-api/v1_9.md b/docs/reference/apis/spaces-api/v1_9.md deleted file mode 100644 index 45b4f65de..000000000 --- a/docs/reference/apis/spaces-api/v1_9.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: v1.9 -description: Documentation for the Spaces API resources -sidebar_position: 7 ---- -import CrdDocViewer from '@site/src/components/CrdViewer'; - - -This page documents the Custom Resource Definitions (CRDs) for the Spaces API. - - -## Control Planes -### Control Planes - - -### Control Plane Overrides - - -## Observability -### Shared Telemetry Configs - - -## Policy -### Shared Upbound Policies - - -## Secrets -### Shared External Secrets - - -### Shared Secret Stores - - -## Simulations - - -## Spaces Backups -### Backups - - -### Backup Schedules - - -### Shared Backup Configs - - -### Shared Backups - - -### Shared Backup Schedules - diff --git a/docusaurus.config.js b/docusaurus.config.js index 6c0779f43..cb5810427 100644 --- a/docusaurus.config.js +++ b/docusaurus.config.js @@ -71,11 +71,33 @@ const config = { [ "@docusaurus/plugin-content-docs", { + id: "docs", path: "docs", routeBasePath: "/", sidebarPath: require.resolve("./src/sidebars/main.js"), }, ], + [ + "@docusaurus/plugin-content-docs", + { + id: "spaces", + path: "spaces-docs", + routeBasePath: "/spaces", + sidebarPath: require.resolve("./src/sidebars/spaces.js"), + includeCurrentVersion: true, + }, + ], + [ + "@docusaurus/plugin-client-redirects", + { + redirects: [ + { + from: "/spaces", + to: "/spaces/overview/", + }, + ], + }, + ], "./scripts/plan-plugin.js", function (context, options) { return { @@ -175,7 +197,7 @@ const config = { }, { label: "Spaces", - to: "/manuals/spaces/overview/", + href: "/spaces/", }, { label: "CLI", @@ -200,10 +222,9 @@ const config = { ], }, { - type: "doc", label: "Reference", position: "left", - docId: "reference/index", + to: "/reference/", }, { type: "search", diff --git a/docs/manuals/spaces/concepts/_category_.json b/spaces-docs/concepts/_category_.json similarity index 100% rename from docs/manuals/spaces/concepts/_category_.json rename to spaces-docs/concepts/_category_.json diff --git a/docs/manuals/spaces/concepts/control-planes.md b/spaces-docs/concepts/control-planes.md similarity index 92% rename from docs/manuals/spaces/concepts/control-planes.md rename to spaces-docs/concepts/control-planes.md index 1ce979802..7066343de 100644 --- a/docs/manuals/spaces/concepts/control-planes.md +++ b/spaces-docs/concepts/control-planes.md @@ -13,6 +13,12 @@ Control planes in Upbound are fully isolated Crossplane control plane instances This lets users focus on building their APIs and operating their control planes, while Upbound handles the rest. Each control plane has its own dedicated API server connecting users to their control plane. +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). + +For detailed information about Control Plane API specifications and CRD fields, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + ## Control plane architecture ![Managed Control Plane Architecture](/img/mcp.png) @@ -170,7 +176,7 @@ spec: -For production-grade scenarios, it's recommended you configure your control plane declaratively via Git plus a Continuous Delivery (CD) Engine such as Argo. For guidance on this topic, read [GitOps with control planes][gitops-with-control-planes]. +For production-grade scenarios, it's recommended you configure your control plane declaratively via Git plus a Continuous Delivery (CD) Engine such as Argo. guidance on this topic, read [GitOps with control planes][gitops-with-control-planes]. @@ -202,10 +208,10 @@ Upbound gives users the ability to configure the collection of telemetry (logs, -[automatic-upgrades]: /manuals/spaces/howtos/auto-upgrade +[automatic-upgrades]: /spaces/howtos/auto-upgrade [release-notes]: https://github.com/upbound/universal-crossplane/releases -[control-plane-group]: /manuals/spaces/concepts/groups -[space]: /manuals/spaces/overview +[control-plane-group]: /spaces/concepts/groups +[space]: /spaces/overview [up-cli]: /reference/cli-reference [cli-reference]: /reference/cli-reference [up-ctx]: /reference/cli-reference @@ -213,9 +219,9 @@ Upbound gives users the ability to configure the collection of telemetry (logs, [cli-documentation]: /manuals/cli/concepts/contexts [upbound-marketplace]: https://marketplace.upbound.io [upbound-marketplace-1]: https://marketplace.upbound.io -[gitops-with-control-planes]: /manuals/spaces/howtos/cloud-spaces/gitops +[gitops-with-control-planes]: /spaces/howtos/cloud-spaces/gitops [connect-control-planes-to-external-services]: /manuals/platform/howtos/oidc -[spaces-documentation]: /manuals/spaces/howtos/secrets-management -[spaces-documentation-1]: /manuals/spaces/howtos/backup-and-restore +[spaces-documentation]: /spaces/howtos/secrets-management +[spaces-documentation-1]: /spaces/howtos/backup-and-restore [otel]: https://otel.com -[spaces-documentation-2]: /manuals/spaces/howtos/observability +[spaces-documentation-2]: /spaces/howtos/observability diff --git a/docs/manuals/spaces/concepts/deployment-modes.md b/spaces-docs/concepts/deployment-modes.md similarity index 89% rename from docs/manuals/spaces/concepts/deployment-modes.md rename to spaces-docs/concepts/deployment-modes.md index 447349862..f5e718f88 100644 --- a/docs/manuals/spaces/concepts/deployment-modes.md +++ b/spaces-docs/concepts/deployment-modes.md @@ -47,7 +47,7 @@ This table lists the cloud service provider regions supported by Upbound. | --- | --- | | `us-east-1` | Eastern US (Iowa) -[dedicated-spaces]: /manuals/spaces/howtos/cloud-spaces/dedicated-spaces-deployment -[managed-spaces]: /manuals/spaces/howtos/self-hosted/managed-spaces-deployment -[self-hosted-spaces]: /manuals/spaces/howtos/self-hosted/self-hosted-spaces-deployment +[dedicated-spaces]: /spaces/howtos/cloud-spaces/dedicated-spaces-deployment +[managed-spaces]: /spaces/howtos/self-hosted/managed-spaces-deployment +[self-hosted-spaces]: /spaces/howtos/self-hosted/self-hosted-spaces-deployment [console]: /manuals/console/upbound-console/ diff --git a/docs/manuals/spaces/concepts/groups.md b/spaces-docs/concepts/groups.md similarity index 96% rename from docs/manuals/spaces/concepts/groups.md rename to spaces-docs/concepts/groups.md index e750fa006..d2ccacdb3 100644 --- a/docs/manuals/spaces/concepts/groups.md +++ b/spaces-docs/concepts/groups.md @@ -109,7 +109,7 @@ Most Kubernetes clusters come with some set of predefined namespaces. Because a 3. Labeling a Kubernetes namespace with the label `spaces.upbound.io/group=true` -[secrets]: /manuals/spaces/howtos/secrets-management -[backups]: /manuals/spaces/howtos/self-hosted/workload-id/backup-restore-config/ -[space]: /manuals/spaces/overview +[secrets]: /spaces/howtos/secrets-management +[backups]: /spaces/howtos/self-hosted/workload-id/backup-restore-config/ +[space]: /spaces/overview [upbound-context]: /manuals/cli/concepts/contexts diff --git a/docs/manuals/spaces/howtos/_category_.json b/spaces-docs/howtos/_category_.json similarity index 100% rename from docs/manuals/spaces/howtos/_category_.json rename to spaces-docs/howtos/_category_.json diff --git a/docs/manuals/spaces/howtos/api-connector.md b/spaces-docs/howtos/api-connector.md similarity index 97% rename from docs/manuals/spaces/howtos/api-connector.md rename to spaces-docs/howtos/api-connector.md index 67e1daf09..a14468f52 100644 --- a/docs/manuals/spaces/howtos/api-connector.md +++ b/spaces-docs/howtos/api-connector.md @@ -6,10 +6,16 @@ aliases: - /api-connector - /concepts/api-connector --- +:::info API Version Information +This guide covers API Connector, a preview feature available in all supported versions (v1.9-v1.14+). + +For related API specifications and available resources, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + :::warning API Connector is currently in **Preview**. The feature is under active development and subject to breaking changes. Use for testing and evaluation -purposes only. +purposes only. ::: API Connector enables seamless integration between Kubernetes application diff --git a/docs/manuals/spaces/howtos/auto-upgrade.md b/spaces-docs/howtos/auto-upgrade.md similarity index 88% rename from docs/manuals/spaces/howtos/auto-upgrade.md rename to spaces-docs/howtos/auto-upgrade.md index 919eaff48..249056fb4 100644 --- a/docs/manuals/spaces/howtos/auto-upgrade.md +++ b/spaces-docs/howtos/auto-upgrade.md @@ -9,6 +9,12 @@ plan: "standard" Upbound Spaces can automatically upgrade the version of Upbound Crossplane in managed control plane instances. You can edit the `spec.crossplane.autoUpgrade` field in your `ControlPlane` specification with the available release channels below. +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). The `spec.crossplane.autoUpgrade` field has been available since v1.9. + +For ControlPlane API specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + | Channel | Description | Example | |------------|-----------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------| | **None** | Disables auto upgrades. | _Uses version specified in `spec.crossplane.version`._ | @@ -60,7 +66,7 @@ spec: ## Supported Crossplane versions -Spaces supports the three [preceding minor versions][preceding-minor-versions] from the last supported minor version. For example, if the last supported minor version is `1.14`, minor versions `1.13` and `1.12` are also supported. Versions older than the three most recent minor versions aren't supported. Only supported Crossplane versions are valid specifications for new control planes. +Spaces supports the three [preceding minor versions][preceding-minor-versions] from the last supported minor version. example, if the last supported minor version is `1.14`, minor versions `1.13` and `1.12` are also supported. Versions older than the three most recent minor versions aren't supported. Only supported Crossplane versions are valid specifications for new control planes. Current Crossplane version support by Spaces version: diff --git a/spaces-docs/howtos/automation-and-gitops/_category_.json b/spaces-docs/howtos/automation-and-gitops/_category_.json new file mode 100644 index 000000000..b65481af6 --- /dev/null +++ b/spaces-docs/howtos/automation-and-gitops/_category_.json @@ -0,0 +1,8 @@ +{ + "label": "Automation & GitOps", + "position": 11, + "collapsed": true, + "customProps": { + "plan": "business" + } +} diff --git a/spaces-docs/howtos/automation-and-gitops/overview.md b/spaces-docs/howtos/automation-and-gitops/overview.md new file mode 100644 index 000000000..57eeb15fc --- /dev/null +++ b/spaces-docs/howtos/automation-and-gitops/overview.md @@ -0,0 +1,138 @@ +--- +title: Automation and GitOps Overview +sidebar_label: Overview +sidebar_position: 1 +description: Guide to automating control plane deployments with GitOps and Argo CD +plan: "business" +--- + +Automating control plane deployments with GitOps enables declarative, version-controlled infrastructure management. This section covers integrating GitOps workflows with Upbound control planes using Argo CD and related tools. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). GitOps patterns are compatible across all versions of Upbound Spaces. + +For details on deployment models and their differences, see the [Deployment Modes](../../concepts/deployment-modes.md) concept guide. version-specific features, see the . +::: + +## What is GitOps? + +GitOps is an approach for managing infrastructure by: +- **Declaratively describing** desired system state in Git +- **Using controllers** to continuously reconcile actual state with desired state +- **Treating Git as the source of truth** for all configuration and deployments + +Upbound control planes are fully compatible with GitOps patterns and we strongly recommend integrating GitOps in the platforms you build on Upbound. + +## Key Concepts + +### Argo CD +[Argo CD](https://argo-cd.readthedocs.io/) is a popular Kubernetes-native GitOps controller. It continuously monitors Git repositories and automatically applies changes to your infrastructure when commits are detected. + +### Deployment Models + +The way you configure GitOps depends on your deployment model: + +| Aspect | Cloud Spaces | Self-Hosted Spaces | +|--------|--------------|-------------------| +| **Access Method** | Upbound API with tokens | Kubernetes native (secrets/kubeconfig) | +| **Configuration** | Kubeconfig via `up` CLI | Control plane connection secrets | +| **Setup Complexity** | More involved (API integration) | Simpler (native Kubernetes) | +| **Typical Use Case** | Managing Upbound resources | Managing workloads on control planes | + +## Getting Started + +**Choose your path based on your deployment model:** + +###. Cloud Spaces +If you're using Upbound Cloud Spaces (Dedicated or Managed): +1. Start with [GitOps with Upbound Control Planes](../cloud-spaces/gitops-on-upbound.md) +2. Learn how to integrate Argo CD with Cloud Spaces +3. Manage both control plane infrastructure and Upbound resources declaratively + +###. Self-Hosted Spaces +If you're running self-hosted Spaces: +1. Start with [GitOps with ArgoCD in Self-Hosted Spaces](../self-hosted/gitops-with-argocd.md) +2. Learn how to configure control plane connection secrets +3. Manage workloads deployed to your control planes + +## Common Workflows + +### Workflow 1: Managing Control Planes with GitOps +Create and manage control planes themselves declaratively using provider-kubernetes: + +```yaml +apiVersion: kubernetes.crossplane.io/v1alpha2 +kind: Object +metadata: + name: my-controlplane +spec: + forProvider: + manifest: + apiVersion: spaces.upbound.io/v1beta1 + kind: ControlPlane + # ... control plane configuration +``` + +### Workflow 2: Managing Workloads on Control Planes +Deploy applications and resources to control planes using standard Kubernetes GitOps patterns: + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + name: my-app +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-app + namespace: my-app +# ... deployment configuration +``` + +### Workflow 3: Managing Upbound Resources +Use provider-upbound to manage Upbound IAM and repository resources: + +- Teams +- Robots and their team memberships +- Repositories and permissions + +## Advanced Topics + +### Argo CD Plugin for Upbound +Learn more in the [ArgoCD Plugin guide](../self-hosted/use-argo.md) for enhanced integration with self-hosted Spaces. + +### Declarative Control Plane Creation +See [Declaratively create control planes](../self-hosted/declarative-ctps.md) for advanced automation patterns. + +### Consuming Control Plane APIs +Understand how to [consume control plane APIs in your app cluster](../mcp-connector-guide.md) with Argo CD. + +## Prerequisites + +Before implementing GitOps with control planes, ensure you have: + +**For Cloud Spaces:** +- Access to Upbound Cloud Spaces +- `up` CLI installed and configured +- API token with appropriate permissions +- Argo CD or similar GitOps controller running +- Familiarity with Kubernetes RBAC + +**For Self-Hosted Spaces:** +- Self-hosted Spaces deployed and running +- Argo CD deployed in your infrastructure +- Kubectl access to the cluster hosting Spaces +- Understanding of control plane architecture + +## Next Steps + +1. **Choose your deployment model** above +2. **Review the relevant getting started guide** +3. **Set up your GitOps controller** (Argo CD) +4. **Deploy your first automated control plane** +5. **Explore advanced topics** as needed + +:::tip +Start with simple deployments to test your GitOps workflow before moving to production. Use [simulations](../simulations.md) to preview changes before applying them. +::: diff --git a/docs/manuals/spaces/howtos/backup-and-restore.md b/spaces-docs/howtos/backup-and-restore.md similarity index 95% rename from docs/manuals/spaces/howtos/backup-and-restore.md rename to spaces-docs/howtos/backup-and-restore.md index 01df2d8f8..3b8d026cb 100644 --- a/docs/manuals/spaces/howtos/backup-and-restore.md +++ b/spaces-docs/howtos/backup-and-restore.md @@ -9,6 +9,21 @@ plan: "enterprise" Upbound's _Shared Backups_ is a built-in backup and restore feature. Shared Backups lets you configure automatic schedules for taking snapshots of your control planes. You can restore data from these backups by making new control planes. This guide explains how to use Shared Backups for disaster recovery or upgrade scenarios. +:::info API Version Information & Available Versions +This guide applies to **all supported versions** (v1.9-v1.15+). + +**Select your API version**: +- [Spaces API v1.15 (Latest)](../../../next/reference/apis/spaces-api/) +- [Spaces API v1.14](../../../1.14/reference/apis/spaces-api/) +- [Spaces API v1.13](../../../1.13/reference/apis/spaces-api/) +- [Spaces API v1.12](../../../1.12/reference/apis/spaces-api/) +- [Spaces API v1.11](../../../1.11/reference/apis/spaces-api/) +- [Spaces API v1.10](../../../1.10/reference/apis/spaces-api/) +- [Spaces API v1.9](../../../1.9/reference/apis/spaces-api/) + +For version support policy, see . version compatibility details, see the . +::: + ## Benefits The Shared Backups feature provides the following benefits: @@ -226,7 +241,7 @@ the deletion policy. Set the `spec.deletionPolicy` to define backup deletion actions, including the deletion of the backup file from the bucket. The Deletion Policy value defaults -to `Orphan`. Set it to `Delete` to remove uploaded files in the bucket. For more +to `Orphan`. Set it to `Delete` to remove uploaded files in the bucket. more information on the backup and restore process, review the [Spaces API documentation][spaces-api-documentation]. @@ -498,10 +513,10 @@ spec: ``` -[group-scoped]: /manuals/spaces/concepts/groups -[group-scoped-1]: /manuals/spaces/concepts/groups -[group-scoped-2]: /manuals/spaces/concepts/groups -[group-scoped-3]: /manuals/spaces/concepts/groups +[group-scoped]: /spaces/concepts/groups +[group-scoped-1]: /spaces/concepts/groups +[group-scoped-2]: /spaces/concepts/groups +[group-scoped-3]: /spaces/concepts/groups [sharedbackupconfig]: /reference/apis/spaces-api/latest [thanos-object-storage]: https://thanos.io/tip/thanos/storage.md/ [sharedbackupschedule]: /reference/apis/spaces-api/latest diff --git a/docs/manuals/spaces/howtos/cloud-spaces/_category_.json b/spaces-docs/howtos/cloud-spaces/_category_.json similarity index 100% rename from docs/manuals/spaces/howtos/cloud-spaces/_category_.json rename to spaces-docs/howtos/cloud-spaces/_category_.json diff --git a/docs/manuals/spaces/howtos/cloud-spaces/dedicated-spaces-deployment.md b/spaces-docs/howtos/cloud-spaces/dedicated-spaces-deployment.md similarity index 93% rename from docs/manuals/spaces/howtos/cloud-spaces/dedicated-spaces-deployment.md rename to spaces-docs/howtos/cloud-spaces/dedicated-spaces-deployment.md index 6d68b1c8a..ebad9493e 100644 --- a/docs/manuals/spaces/howtos/cloud-spaces/dedicated-spaces-deployment.md +++ b/spaces-docs/howtos/cloud-spaces/dedicated-spaces-deployment.md @@ -30,4 +30,4 @@ If you have an interest in Upbound Dedicated Spaces, contact requirements and see if Dedicated Spaces are a good fit for you. [contact-us]: https://www.upbound.io/contact-us -[managed-space]: /manuals/spaces/howtos/self-hosted/managed-spaces-deployment +[managed-space]: /spaces/howtos/self-hosted/managed-spaces-deployment diff --git a/docs/manuals/spaces/howtos/cloud-spaces/gitops.md b/spaces-docs/howtos/cloud-spaces/gitops-on-upbound.md similarity index 96% rename from docs/manuals/spaces/howtos/cloud-spaces/gitops.md rename to spaces-docs/howtos/cloud-spaces/gitops-on-upbound.md index 9de732c14..fa59a8dce 100644 --- a/docs/manuals/spaces/howtos/cloud-spaces/gitops.md +++ b/spaces-docs/howtos/cloud-spaces/gitops-on-upbound.md @@ -1,10 +1,14 @@ --- -title: GitOps with control planes +title: GitOps with Upbound Control Planes sidebar_position: 80 -description: An introduction to doing GitOps with control planes on Upbound +description: An introduction to doing GitOps with control planes on Upbound Cloud Spaces tier: "business" --- +:::info Deployment Model +This guide applies to **Upbound Cloud Spaces** (Dedicated and Managed Spaces). For self-hosted Spaces deployments, see [GitOps with ArgoCD in Self-Hosted Spaces](/spaces/howtos/self-hosted/gitops-with-argocd/). +::: + GitOps is an approach for managing a system by declaratively describing desired resources' configurations in Git and using controllers to realize the desired state. Upbound's control planes are compatible with this pattern and it's strongly recommended you integrate GitOps in the platforms you build on Upbound. @@ -287,12 +291,12 @@ spec: You can now create _Objects_ in the control plane which wrap Space APIs. [generate-a-kubeconfig]: /manuals/cli/concepts/contexts -[control-plane-groups]: /manuals/spaces/concepts/groups -[control-planes]: /manuals/spaces/concepts/control-planes +[control-plane-groups]: /spaces/concepts/groups +[control-planes]: /spaces/concepts/control-planes [upbound-iam-resources]: /manuals/platform/concepts/identity-management [space-apis]: /reference/apis/spaces-api/v1_9 [space-apis-1]: /reference/apis/spaces-api/v1_9 -[control-plane-groups-2]: /manuals/spaces/concepts/groups +[control-plane-groups-2]: /spaces/concepts/groups [argo-cd]: https://argo-cd.readthedocs.io/en/stable/ diff --git a/docs/manuals/spaces/howtos/control-plane-topologies.md b/spaces-docs/howtos/control-plane-topologies.md similarity index 98% rename from docs/manuals/spaces/howtos/control-plane-topologies.md rename to spaces-docs/howtos/control-plane-topologies.md index 5818d824e..9020e5a41 100644 --- a/docs/manuals/spaces/howtos/control-plane-topologies.md +++ b/spaces-docs/howtos/control-plane-topologies.md @@ -4,6 +4,12 @@ sidebar_position: 15 description: Configure scheduling of composites to remote control planes --- +:::info API Version Information +This guide is for the Control Plane Topology feature, which is in **private preview**. interested customers with access to this feature, it applies to v1.12+. + +For related API specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). details on feature availability, see the . +::: + :::important This feature is in private preview for select customers in Upbound Spaces. If you're interested in this deployment mode, please [contact us](https://www.upbound.io/support/contact). ::: diff --git a/docs/manuals/spaces/howtos/ctp-connector.md b/spaces-docs/howtos/ctp-connector.md similarity index 97% rename from docs/manuals/spaces/howtos/ctp-connector.md rename to spaces-docs/howtos/ctp-connector.md index 3c18fdc15..b2cc48c49 100644 --- a/docs/manuals/spaces/howtos/ctp-connector.md +++ b/spaces-docs/howtos/ctp-connector.md @@ -7,6 +7,12 @@ plan: "standard" +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). Control Plane Connector functionality is consistent across versions. + +For related API specifications and resources, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + Control Plane Connector connects arbitrary Kubernetes application clusters outside the Upbound Spaces to your control planes running in Upbound Spaces. This lets you interact with your control plane's API from the app cluster. The claim APIs and the namespaced XR APIs @@ -495,7 +501,7 @@ credentials to the clusters. [kubeconfig]: /manuals/cli/howtos/context-config/#generate-a-kubeconfig-for-a-control-plane-in-a-group -[kubeconfig-1]:/manuals/spaces/concepts/control-planes/#connect-directly-to-your-control-plane +[kubeconfig-1]:/spaces/concepts/control-planes/#connect-directly-to-your-control-plane [these-instructions]:/manuals/console/#create-a-personal-access-token [kubernetes-api-aggregationlayer]: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/ [configuration-eks]: https://github.com/upbound/configuration-eks diff --git a/docs/manuals/spaces/howtos/debugging-a-ctp.md b/spaces-docs/howtos/debugging-a-ctp.md similarity index 92% rename from docs/manuals/spaces/howtos/debugging-a-ctp.md rename to spaces-docs/howtos/debugging-a-ctp.md index 673cdf77e..521271e40 100644 --- a/docs/manuals/spaces/howtos/debugging-a-ctp.md +++ b/spaces-docs/howtos/debugging-a-ctp.md @@ -6,6 +6,12 @@ description: A guide for how to debug resources on a control plane running in Up This guide provides troubleshooting guidance for how to identify and fix issues on a control plane. +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). Debugging methods are consistent across versions. + +For related control plane specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version-specific features, see the . +::: + ## Start from Upbound Console @@ -75,7 +81,7 @@ The rendered YAML shows this MR is referencing a sibling MR that shares the same ![Use control plane explorer view to view status for a sibling MR](/img/debug-mr-dependency-status.png) -The sibling MR event stream shows the Provider processed the resource create request. Ignore the `CannotInitalizeManagedResrouce` event. EKS clusters can take 15 minutes or more to provision in AWS. The root cause is everything is fine -- all the resources are still provisioning. Waiting longer and then looking at the control plane explorer again, shows all resources are healthy. For reference, below is an example status field for a resource that's healthy and provisioned. +The sibling MR event stream shows the Provider processed the resource create request. Ignore the `CannotInitalizeManagedResrouce` event. EKS clusters can take 15 minutes or more to provision in AWS. The root cause is everything is fine -- all the resources are still provisioning. Waiting longer and then looking at the control plane explorer again, shows all resources are healthy. reference, below is an example status field for a resource that's healthy and provisioned. ```yaml ... diff --git a/docs/manuals/spaces/howtos/managed-service.md b/spaces-docs/howtos/managed-service.md similarity index 94% rename from docs/manuals/spaces/howtos/managed-service.md rename to spaces-docs/howtos/managed-service.md index 48be6bcd7..40b983a76 100644 --- a/docs/manuals/spaces/howtos/managed-service.md +++ b/spaces-docs/howtos/managed-service.md @@ -20,4 +20,4 @@ server connecting users to their control plane. Read the [concept][ctp-concept] documentation to learn about Upbound control planes. [uxp]: /manuals/uxp/overview -[ctp-concept]: /manuals/spaces/concepts/control-planes \ No newline at end of file +[ctp-concept]: /spaces/concepts/control-planes \ No newline at end of file diff --git a/docs/manuals/spaces/howtos/mcp-connector-guide.md b/spaces-docs/howtos/mcp-connector-guide.md similarity index 91% rename from docs/manuals/spaces/howtos/mcp-connector-guide.md rename to spaces-docs/howtos/mcp-connector-guide.md index 67a3c6722..8a3866d07 100644 --- a/docs/manuals/spaces/howtos/mcp-connector-guide.md +++ b/spaces-docs/howtos/mcp-connector-guide.md @@ -7,6 +7,12 @@ description: A tutorial to configure a Space with Argo to declaratively create a In this tutorial, you learn how to configure a Kubernetes app cluster to communicate with a control plane in an Upbound self-hosted Space. +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Control plane connector functionality is consistent across versions. + +For related API specifications and resources, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + The [control plane connector][control-plane-connector] bridges your Kubernetes application clusters---running outside of Upbound--to your control planes running in Upbound. This allows you to interact with your control plane's API right from the app cluster. The claim APIs you define via `CompositeResourceDefinitions` are available alongside Kubernetes workload APIs like `Pod`. In effect, control plane connector provides the same experience as a locally installed Crossplane. ## Prerequisites @@ -30,7 +36,7 @@ Once the control plane is ready, connect to it. up ctp connect my-control-plane ``` -For convenience, install a an Upbound [platform reference Configuration][platform-reference-configuration] from the marketplace. For production scenarios, replace this with your own Crossplane Configurations or compositions. +For convenience, install a an Upbound [platform reference Configuration][platform-reference-configuration] from the marketplace. production scenarios, replace this with your own Crossplane Configurations or compositions. ```bash up ctp configuration install xpkg.upbound.io/upbound/platform-ref-aws:v1.4.0 @@ -158,6 +164,6 @@ In this tutorial, you: - You saw how resource requests are actually fulfilled by the control plane. -[control-plane-connector]: /manuals/spaces/howtos/ctp-connector +[control-plane-connector]: /spaces/howtos/ctp-connector [platform-reference-configuration]: https://marketplace.upbound.io/configurations/upbound/platform-ref-aws -[api-connector]: /manuals/spaces/howtos/api-connector +[api-connector]: /spaces/howtos/api-connector diff --git a/docs/manuals/spaces/howtos/migrating-to-mcps.md b/spaces-docs/howtos/migrating-to-mcps.md similarity index 97% rename from docs/manuals/spaces/howtos/migrating-to-mcps.md rename to spaces-docs/howtos/migrating-to-mcps.md index 0e23fc7c9..93b9c5ac2 100644 --- a/docs/manuals/spaces/howtos/migrating-to-mcps.md +++ b/spaces-docs/howtos/migrating-to-mcps.md @@ -6,6 +6,12 @@ description: A guide to how to migrate to a control plane in Upbound The Upbound migration tool is a [CLI command][cli-command] that helps you migrate your existing Crossplane control plane to a control plane in Upbound. This tool works for migrating from self-managed Crossplane installations as well as between Upbound managed control planes (MCPs). +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). Migration procedures are consistent across versions, though you may want to upgrade to a newer version after migration to get the latest features. + +For version-specific features and migration considerations, see the . the full compatibility matrix, see the . +::: + To migrate a control plane to Upbound, you must: 1. Export your existing Crossplane control plane configuration/state into an archive file. @@ -125,7 +131,7 @@ Use the `--include-extra-resources=` CLI option to select other CRD types to inc ### Set the kubecontext -Currently `--context` isn't supported in the migration CLI. You should be able to use the `--kubeconfig` CLI option to use a file that's set to the correct context. For example: +Currently `--context` isn't supported in the migration CLI. You should be able to use the `--kubeconfig` CLI option to use a file that's set to the correct context. example: ```bash up controlplane migration export --kubeconfig @@ -347,7 +353,7 @@ included in the backup: - Resources directly managed via Helm (ArgoCD's helm implementation, which templates Helm resources and then applies them, get included in the backup). The migration creates the exclusion list by looking for: - Any Resource with the label `"app.kubernetes.io/managed-by" == "Helm"` - - Kubernetes Secrets with the label prefix `helm.sh/release`. For example, `helm.sh/release.v1` + - Kubernetes Secrets with the label prefix `helm.sh/release`. example, `helm.sh/release.v1` - Resources installed via a Crossplane package. These have an `ownerReference` with a prefix `pkg.crossplane.io`. The expectation is that during import, the Crossplane Package Manager bears responsibility for installing the resources. - Crossplane Locks: Any `Lock.pkg.crossplane.io` resource isn't included in the diff --git a/docs/manuals/spaces/howtos/observability.md b/spaces-docs/howtos/observability.md similarity index 88% rename from docs/manuals/spaces/howtos/observability.md rename to spaces-docs/howtos/observability.md index 531331385..8fc5c3278 100644 --- a/docs/manuals/spaces/howtos/observability.md +++ b/spaces-docs/howtos/observability.md @@ -22,6 +22,27 @@ Upbound Spaces offers two levels of observability: +:::info API Version Information & Version Selector +This guide applies to all supported versions (v1.9-v1.15+). Observability features evolved: + +- **v1.9-v1.12**: SharedTelemetryConfig available (alpha until v1.11) +- **v1.11+**: Observability promoted to stable with logs export support +- **v1.14+**: Both space-level and control-plane observability GA + +**View API Reference for Your Version**: +| Version | Status | Link | +|---------|--------|------| +| v1.15 | Latest | [Spaces API v1.15](../../../next/reference/apis/spaces-api/) | +| v1.14 | Supported | [Spaces API v1.14](../../../1.14/reference/apis/spaces-api/) | +| v1.13 | Supported | [Spaces API v1.13](../../../1.13/reference/apis/spaces-api/) | +| v1.12 | Supported | [Spaces API v1.12](../../../1.12/reference/apis/spaces-api/) | +| v1.11 | Supported | [Spaces API v1.11](../../../1.11/reference/apis/spaces-api/) | +| v1.10 | Limited | [Spaces API v1.10](../../../1.10/reference/apis/spaces-api/) | +| v1.9 | EOL | [Spaces API v1.9](../../../1.9/reference/apis/spaces-api/) | + +For version support policy and feature availability, see and . +::: + :::important **Space-level observability** (available since v1.6.0, GA in v1.14.0): - Disabled by default @@ -131,7 +152,7 @@ Self-hosted users can include system workloads (`api-server`, `etcd`) by setting :::important Spaces validates `SharedTelemetryConfig` resources before applying them by -sending telemetry to configured exporters. For self-hosted Spaces, ensure that +sending telemetry to configured exporters. self-hosted Spaces, ensure that `spaces-controller` can reach the exporter endpoints. ::: @@ -346,9 +367,9 @@ kubectl describe stc ## Supported exporters Both Space-level and control plane observability support: -- `datadog` - For Datadog integration +- `datadog` -. Datadog integration - `otlphttp` - General-purpose exporter (used by New Relic, among others) -- `debug` - For troubleshooting +- `debug` -. troubleshooting ## Considerations @@ -367,7 +388,7 @@ documentation][opentelemetry-transformation-language]. [opentelemetry-operator]: https://opentelemetry.io/docs/kubernetes/operator/ [transform-processor]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/transformprocessor/README.md [opentelemetry-transformation-language]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl -[space-level-o11y]: /manuals/spaces/howtos/self-hosted/space-observability +[space-level-o11y]: /spaces/howtos/self-hosted/space-observability [helm-chart-reference]: /reference/helm-reference [opentelemetry-transformation-language-functions]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/ottlfuncs/README.md [opentelemetry-transformation-language-contexts]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts diff --git a/docs/manuals/spaces/howtos/query-api.md b/spaces-docs/howtos/query-api.md similarity index 97% rename from docs/manuals/spaces/howtos/query-api.md rename to spaces-docs/howtos/query-api.md index 1dcd95d54..78163de2f 100644 --- a/docs/manuals/spaces/howtos/query-api.md +++ b/spaces-docs/howtos/query-api.md @@ -9,6 +9,12 @@ description: Use the `up` CLI to query objects and resources Upbound's Query API allows users to inspect objects and resources within their control planes. The read-only `up alpha query` and `up alpha get` CLI commands allow you to gather information on your control planes in a fast and efficient package. These commands follow the [`kubectl` conventions][kubectl-conventions] for filtering, sorting, and retrieving information from your Space. +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). Query API is available in Cloud Spaces since v1.6 and Self-Hosted since v1.8. + +For detailed deployment procedures, see [Deploy Query API infrastructure - Self-Hosted](./self-hosted/query-api.md). version compatibility details, see the . +::: + ## Using the Query API @@ -302,7 +308,7 @@ The SpaceQuery resource allows you to query objects across all control planes in -[documentation]: /manuals/spaces/howtos/self-hosted/query-api +[documentation]: /spaces/howtos/self-hosted/query-api [up-ctx]: /reference/cli-reference [up-alpha-get-command]: /reference/cli-reference [a-flag]: /reference/cli-reference diff --git a/docs/manuals/spaces/howtos/secrets-management.md b/spaces-docs/howtos/secrets-management.md similarity index 98% rename from docs/manuals/spaces/howtos/secrets-management.md rename to spaces-docs/howtos/secrets-management.md index 491b7f5d8..88e730ae5 100644 --- a/docs/manuals/spaces/howtos/secrets-management.md +++ b/spaces-docs/howtos/secrets-management.md @@ -12,6 +12,12 @@ planes as secrets in an external secret store. This feature is a wrapper around the [External Secrets Operator (ESO)][external-secrets-operator-eso] that pulls secrets from external vaults and distributes them across your platform. +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). The SharedSecretStore and SharedExternalSecret CRDs have been stable since v1.9. + +For API specifications and configuration details, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + ## Benefits The Shared Secrets feature allows you to: diff --git a/docs/manuals/spaces/howtos/self-hosted/_category_.json b/spaces-docs/howtos/self-hosted/_category_.json similarity index 100% rename from docs/manuals/spaces/howtos/self-hosted/_category_.json rename to spaces-docs/howtos/self-hosted/_category_.json diff --git a/docs/manuals/spaces/howtos/self-hosted/administer-features.md b/spaces-docs/howtos/self-hosted/administer-features.md similarity index 90% rename from docs/manuals/spaces/howtos/self-hosted/administer-features.md rename to spaces-docs/howtos/self-hosted/administer-features.md index bcabb5862..ce878014e 100644 --- a/docs/manuals/spaces/howtos/self-hosted/administer-features.md +++ b/spaces-docs/howtos/self-hosted/administer-features.md @@ -1,9 +1,15 @@ --- -title: Administer features +title: Administer features sidebar_position: 12 description: Enable and disable features in Spaces --- +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Feature flags and their availability may vary by version. + +For detailed feature availability across versions, see the . +::: + This guide shows how to enable or disable features in your self-hosted Space. ## Shared secrets @@ -112,4 +118,4 @@ Query API is required for self-hosted deployments with connected Spaces. See the related [documentation][documentation] to enable this feature. -[documentation]: /manuals/spaces/howtos/query-api/ +[documentation]: /spaces/howtos/query-api/ diff --git a/docs/manuals/spaces/howtos/self-hosted/attach-detach.md b/spaces-docs/howtos/self-hosted/attach-detach.md similarity index 93% rename from docs/manuals/spaces/howtos/self-hosted/attach-detach.md rename to spaces-docs/howtos/self-hosted/attach-detach.md index 016f7b02a..1465921cf 100644 --- a/docs/manuals/spaces/howtos/self-hosted/attach-detach.md +++ b/spaces-docs/howtos/self-hosted/attach-detach.md @@ -3,10 +3,16 @@ title: Connect or disconnect a Space sidebar_position: 12 description: Enable and connect self-hosted Spaces to the Upbound console --- +:::info API Version Information +This guide applies to **Spaces v1.8.0 and later** (Self-Hosted only). Connection to Upbound console requires Query API and RBAC to be enabled. + +For version-specific features and requirements, see the . Query API setup details, see [Deploy Query API infrastructure](./query-api.md). +::: + :::important This feature is in preview. Starting in Spaces `v1.8.0` and later, you must deploy and [enable the Query API][enable-the-query-api] and [enable Upbound -RBAC][enable-upbound-rbac] to connect a Space to Upbound. +RBAC][enable-upbound-rbac] to connect a Space to Upbound. ::: [Upbound][upbound] allows you to connect self-hosted Spaces and enables a streamlined operations and debugging experience in your Console. @@ -182,11 +188,11 @@ resource in your Space isn't visible at any point. Only users with editor or administrative permissions can make changes using the Console like creating or deleting control planes or groups. -[enable-the-query-api]: /manuals/spaces/howtos/self-hosted/query-api +[enable-the-query-api]: /spaces/howtos/self-hosted/query-api [enable-upbound-rbac]: /manuals/platform/concepts/authorization/upbound-rbac [upbound]: /manuals/console/upbound-console [organization]: /manuals/platform/concepts/identity-management/organizations -[query-api]: /manuals/spaces/howtos/self-hosted/query-api +[query-api]: /spaces/howtos/self-hosted/query-api [jq-install]: https://jqlang.org/download/ [upbound-console]: https://console.upbound.io diff --git a/docs/manuals/spaces/howtos/self-hosted/billing.md b/spaces-docs/howtos/self-hosted/billing.md similarity index 93% rename from docs/manuals/spaces/howtos/self-hosted/billing.md rename to spaces-docs/howtos/self-hosted/billing.md index 54ca327a2..145ff9f03 100644 --- a/docs/manuals/spaces/howtos/self-hosted/billing.md +++ b/spaces-docs/howtos/self-hosted/billing.md @@ -4,12 +4,17 @@ sidebar_position: 50 description: A guide for how billing works in an Upbound Space --- +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Billing models may evolve between versions; see Capacity Licensing for alternative models. + +For version-specific features and capacity-based licensing details, see the . reference specifications, see [Capacity Licensing](/spaces/howtos/self-hosted/capacity-licensing). +::: Spaces are a self-hosting feature of Upbound's [flagship product][flagship-product] for platform teams to deploy control planes in their self-managed environments. You can install Spaces into any Kubernetes cluster in your own cloud account, on-premises data center, or on the edge. The pricing usage-based and requires an Upbound account and subscription. The billing unit is a `Loop`. :::info -This guide describes the traditional usage-based billing model using object storage. For disconnected or air-gapped environments, consider [Capacity Licensing](/manuals/spaces/howtos/self-hosted/capacity-licensing), which provides a simpler fixed-capacity model with local usage tracking. +This guide describes the traditional usage-based billing model using object storage. disconnected or air-gapped environments, consider [Capacity Licensing](/spaces/howtos/self-hosted/capacity-licensing), which provides a simpler fixed-capacity model with local usage tracking. ::: ## Billing details diff --git a/docs/manuals/spaces/howtos/self-hosted/capacity-licensing.md b/spaces-docs/howtos/self-hosted/capacity-licensing.md similarity index 99% rename from docs/manuals/spaces/howtos/self-hosted/capacity-licensing.md rename to spaces-docs/howtos/self-hosted/capacity-licensing.md index d13d748a0..a1dc6c101 100644 --- a/docs/manuals/spaces/howtos/self-hosted/capacity-licensing.md +++ b/spaces-docs/howtos/self-hosted/capacity-licensing.md @@ -580,12 +580,12 @@ If your license shows as invalid: - Contact [Upbound Sales][sales] to discuss capacity licensing options -[space-billing]: /manuals/spaces/howtos/self-hosted/billing +[space-billing]: /spaces/howtos/self-hosted/billing [CloudNativePG]: https://cloudnative-pg.io/ [backups]: https://cloudnative-pg.io/documentation/current/backup_recovery/ -[backup-restore]: /manuals/spaces/howtos/backup-and-restore +[backup-restore]: /spaces/howtos/backup-and-restore [sales]: https://www.upbound.io/contact [eso]: https://external-secrets.io/ -[Observability]: /manuals/spaces/howtos/observability +[Observability]: /spaces/howtos/observability diff --git a/docs/manuals/spaces/howtos/self-hosted/certs.md b/spaces-docs/howtos/self-hosted/certs.md similarity index 100% rename from docs/manuals/spaces/howtos/self-hosted/certs.md rename to spaces-docs/howtos/self-hosted/certs.md diff --git a/docs/manuals/spaces/howtos/self-hosted/configure-ha.md b/spaces-docs/howtos/self-hosted/configure-ha.md similarity index 97% rename from docs/manuals/spaces/howtos/self-hosted/configure-ha.md rename to spaces-docs/howtos/self-hosted/configure-ha.md index be31aceac..ddf36c55e 100644 --- a/docs/manuals/spaces/howtos/self-hosted/configure-ha.md +++ b/spaces-docs/howtos/self-hosted/configure-ha.md @@ -12,6 +12,12 @@ production operation at scale. Use this guide when you're ready to deploy production scaling, high availability, and monitoring in your Space. +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. + +For API specifications on ControlPlane resources and configurations, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + ## Prerequisites Before you begin scaling your Spaces deployment, make sure you have: @@ -437,7 +443,7 @@ Configure alerts for critical scaling and health metrics: [rds]: https://aws.amazon.com/rds/postgresql/ [gke-sql]: https://cloud.google.com/kubernetes-engine/docs/tutorials/stateful-workloads/postgresql [aks-sql]: https://learn.microsoft.com/en-us/azure/aks/deploy-postgresql-ha?tabs=azuredisk -[deployment]: https://docs.upbound.io/manuals/spaces/howtos/self-hosted/deployment-reqs/ +[deployment]: https://docs.upbound.io/spaces/howtos/self-hosted/deployment-reqs/ [karpenter]: https://docs.aws.amazon.com/eks/latest/best-practices/karpenter.html [gke-autoscaling]: https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-autoscaler [aks-autoscaling]: https://learn.microsoft.com/en-us/azure/aks/cluster-autoscaler-overview diff --git a/docs/manuals/spaces/howtos/self-hosted/controllers.md b/spaces-docs/howtos/self-hosted/controllers.md similarity index 100% rename from docs/manuals/spaces/howtos/self-hosted/controllers.md rename to spaces-docs/howtos/self-hosted/controllers.md diff --git a/docs/manuals/spaces/howtos/self-hosted/ctp-audit-logs.md b/spaces-docs/howtos/self-hosted/ctp-audit-logs.md similarity index 96% rename from docs/manuals/spaces/howtos/self-hosted/ctp-audit-logs.md rename to spaces-docs/howtos/self-hosted/ctp-audit-logs.md index 4d83fae85..52f52c776 100644 --- a/docs/manuals/spaces/howtos/self-hosted/ctp-audit-logs.md +++ b/spaces-docs/howtos/self-hosted/ctp-audit-logs.md @@ -7,10 +7,16 @@ in Self-Hosted Upbound Spaces. Starting in Spaces `v1.14.0`, each control plane contains an API server that supports audit log collection. You can use audit logging to track creation, -updates, and deletions of Crossplane resources. Control plane audit logs +updates, and deletions of Crossplane resources. Control plane audit logs use observability features to collect audit logs with `SharedTelemetryConfig` and send logs to an OpenTelemetry (`OTEL`) collector. +:::info API Version Information +This guide applies to **Spaces v1.14.0 and later** (Self-Hosted only). Audit logging is not available in earlier versions. + +For API specifications on observability resources, see the [Spaces API Reference - v1.14](../../../../../1.14/reference/apis/spaces-api/). details on observability evolution across versions, see the . +::: + ## Prerequisites Before you begin, make sure you have: @@ -80,7 +86,7 @@ next section. For this guide, you'll use Grafana's `docker-otel-lgtm` bundle to validate audit log -generation. For production environments, configure a dedicated observability +generation. production environments, configure a dedicated observability backend like Datadog, Splunk, or an enterprise-grade Grafana stack. @@ -264,7 +270,7 @@ This configuration: :::note You can configure the `SharedTelemetryConfig` to select control planes in -several ways. For more information on control plane selection, see the [control +several ways. more information on control plane selection, see the [control plane selection][ctp-selection] documentation. ::: @@ -539,5 +545,5 @@ Delete the `SharedTelemetryConfig` to stop audit logging for all control planes kubectl delete sharedtelemetryconfig --namespace ``` -[ctp-selection]: /manuals/spaces/howtos/observability/#control-plane-selection +[ctp-selection]: /spaces/howtos/observability/#control-plane-selection [Auditing]: https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/ diff --git a/docs/manuals/spaces/howtos/self-hosted/declarative-ctps.md b/spaces-docs/howtos/self-hosted/declarative-ctps.md similarity index 91% rename from docs/manuals/spaces/howtos/self-hosted/declarative-ctps.md rename to spaces-docs/howtos/self-hosted/declarative-ctps.md index 0038bf297..2c3e5331b 100644 --- a/docs/manuals/spaces/howtos/self-hosted/declarative-ctps.md +++ b/spaces-docs/howtos/self-hosted/declarative-ctps.md @@ -7,6 +7,12 @@ description: A tutorial to configure a Space with Argo to declaratively create a In this tutorial, you learn how to configure [Argo CD][argo-cd] to communicate with a self-hosted Space. This flow allows you to declaratively create and manage control planes from Git. Argo CD is a continuous delivery tool for Kubernetes that you can use to drive GitOps flows for your control plane infrastructure. +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. + +For API specifications on ControlPlane resources and their declarative creation, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + ## Prerequisites To complete this tutorial, you need the following: diff --git a/docs/manuals/spaces/howtos/self-hosted/deployment-reqs.md b/spaces-docs/howtos/self-hosted/deployment-reqs.md similarity index 100% rename from docs/manuals/spaces/howtos/self-hosted/deployment-reqs.md rename to spaces-docs/howtos/self-hosted/deployment-reqs.md diff --git a/docs/manuals/spaces/howtos/self-hosted/dr.md b/spaces-docs/howtos/self-hosted/dr.md similarity index 96% rename from docs/manuals/spaces/howtos/self-hosted/dr.md rename to spaces-docs/howtos/self-hosted/dr.md index 6e9899d26..67ecbfecf 100644 --- a/docs/manuals/spaces/howtos/self-hosted/dr.md +++ b/spaces-docs/howtos/self-hosted/dr.md @@ -4,6 +4,15 @@ sidebar_position: 13 description: Configure Space-wide backups for disaster recovery. --- +:::info API Version Information +This guide applies to **Spaces v1.9.0 and later** (Self-Hosted only). Space Backups is a alpha feature enabled by default starting in v1.14.0. + +- **v1.9.0-v1.13.x**: Alpha feature (requires manual enablement) +- **v1.14.0+**: GA (enabled by default) + +For version-specific features and backup resources, see the . control-plane backups, see [Backup and Restore](../backup-and-restore.md). +::: + :::important For Connected and Disconnected Spaces, this feature requires Spaces `v1.9.0` and, starting with `v1.14.0`, Spaces enables it by default. @@ -393,7 +402,7 @@ kubectl exec -ti -n upbound-system deployments/spaces-controller -c spaces ``` -[shared-backups]: /manuals/spaces/howtos/self-hosted/workload-id/backup-restore-config/ +[shared-backups]: /spaces/howtos/self-hosted/workload-id/backup-restore-config/ [spacebackupconfig]: /reference/apis/spaces-api/v1_9 [thanos-object-storage]: https://thanos.io/tip/thanos/storage.md/ [spacebackupschedule]: /reference/apis/spaces-api/v1_9 diff --git a/docs/manuals/spaces/howtos/self-hosted/gitops.md b/spaces-docs/howtos/self-hosted/gitops-with-argocd.md similarity index 88% rename from docs/manuals/spaces/howtos/self-hosted/gitops.md rename to spaces-docs/howtos/self-hosted/gitops-with-argocd.md index 6e3c824ff..004247a10 100644 --- a/docs/manuals/spaces/howtos/self-hosted/gitops.md +++ b/spaces-docs/howtos/self-hosted/gitops-with-argocd.md @@ -1,16 +1,15 @@ --- -title: GitOps with control planes +title: GitOps with ArgoCD in Self-Hosted Spaces sidebar_position: 80 -description: An introduction to doing GitOps with control planes on Upbound +description: Set up GitOps workflows with Argo CD in self-hosted Spaces plan: "business" --- - +:::info Deployment Model +This guide applies to **self-hosted Spaces** deployments. For Upbound Cloud Spaces, see [GitOps with Upbound Control Planes](/spaces/howtos/cloud-spaces/gitops-on-upbound/). +::: -GitOps is an approach for managing a system by declaratively describing desired -resources' configurations in Git and using controllers to realize the desired -state. Upbound's control planes are compatible with this pattern and it's -strongly recommended you integrate GitOps in the platforms you build on Upbound. +GitOps is an approach for managing a system by declaratively describing desired resources' configurations in Git and using controllers to realize the desired state. Upbound's control planes are compatible with this pattern and it's strongly recommended you integrate GitOps in the platforms you build on Upbound. ## Integrate with Argo CD @@ -116,12 +115,12 @@ EOF ``` [generate-a-kubeconfig]: /manuals/cli/concepts/contexts -[control-plane-groups]: /manuals/spaces/concepts/groups -[control-planes]: /manuals/spaces/concepts/control-planes +[control-plane-groups]: /spaces/concepts/groups +[control-planes]: /spaces/concepts/control-planes [upbound-iam-resources]: /manuals/platform/identity-management [space-apis]: /reference/apis/spaces-api/v1_9 [space-apis-1]: /reference/apis/spaces-api/v1_9 -[control-plane-groups-2]: /manuals/spaces/concepts/groups +[control-plane-groups-2]: /spaces/concepts/groups [argo-cd]: https://argo-cd.readthedocs.io/en/stable/ diff --git a/docs/manuals/spaces/howtos/self-hosted/managed-spaces-deployment.md b/spaces-docs/howtos/self-hosted/managed-spaces-deployment.md similarity index 100% rename from docs/manuals/spaces/howtos/self-hosted/managed-spaces-deployment.md rename to spaces-docs/howtos/self-hosted/managed-spaces-deployment.md diff --git a/docs/manuals/spaces/howtos/self-hosted/oidc-configuration.md b/spaces-docs/howtos/self-hosted/oidc-configuration.md similarity index 96% rename from docs/manuals/spaces/howtos/self-hosted/oidc-configuration.md rename to spaces-docs/howtos/self-hosted/oidc-configuration.md index 32c780e7e..cbef4dc42 100644 --- a/docs/manuals/spaces/howtos/self-hosted/oidc-configuration.md +++ b/spaces-docs/howtos/self-hosted/oidc-configuration.md @@ -4,7 +4,7 @@ sidebar_position: 20 description: Configure OIDC in your Space --- :::important -This guide is only applicable for administrators who've deployed self-hosted Spaces. For general RBAC in Upbound, read [Upbound RBAC][upbound-rbac]. +This guide is only applicable for administrators who've deployed self-hosted Spaces. general RBAC in Upbound, read [Upbound RBAC][upbound-rbac]. ::: Upbound uses the Kubernetes [Structured Authentication Configuration][structured-auth-config] to validate OIDC tokens sent to the API. Upbound stores this @@ -15,6 +15,12 @@ This guide walks you through how to create and apply an authentication configuration to validate Upbound with an external identity provider. Each section focuses on a specific part of the configuration file. +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. + +For details on authentication and access control across versions, see the . related platform authentication features, see the [Platform manual](../../../../platform/). +::: + ## Creating the `AuthenticationConfiguration` file First, create a file called `config.yaml` with an `AuthenticationConfiguration` diff --git a/docs/manuals/spaces/howtos/self-hosted/proxies-config.md b/spaces-docs/howtos/self-hosted/proxies-config.md similarity index 75% rename from docs/manuals/spaces/howtos/self-hosted/proxies-config.md rename to spaces-docs/howtos/self-hosted/proxies-config.md index 81bfe335b..3802e4cb0 100644 --- a/docs/manuals/spaces/howtos/self-hosted/proxies-config.md +++ b/spaces-docs/howtos/self-hosted/proxies-config.md @@ -4,6 +4,12 @@ sidebar_position: 20 description: Configure Upbound within a proxied environment --- +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments in proxied environments. Proxy configuration procedures are consistent across versions. + +For version-specific deployment considerations, see the . +::: + When you install Upbound with Helm in a proxied environment, please update the specified registry with your internal registry. diff --git a/docs/manuals/spaces/howtos/self-hosted/query-api.md b/spaces-docs/howtos/self-hosted/query-api.md similarity index 97% rename from docs/manuals/spaces/howtos/self-hosted/query-api.md rename to spaces-docs/howtos/self-hosted/query-api.md index 96a029692..c112e9001 100644 --- a/docs/manuals/spaces/howtos/self-hosted/query-api.md +++ b/spaces-docs/howtos/self-hosted/query-api.md @@ -11,6 +11,15 @@ aliases: +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). Query API has evolved across versions: + +- **Cloud Spaces**: Available since v1.6 (enabled by default) +- **Self-Hosted**: Available since v1.8 (requires manual enablement) + +For details on Query API availability across versions, see the . +::: + :::important This feature is in preview. The Query API is available in the Cloud Space offering in `v1.6` and enabled by default. @@ -348,12 +357,12 @@ helm ... \ --set "apollo.apollo.storage.postgres.connection.ca.name=spaces-apollo-pg-ca" \ --set "apollo.apollo.storage.postgres.connection.syncer.url=$PG_POOLED_URL" \ - # For the syncers + #. the syncers --set "apollo.apollo.storage.postgres.connection.syncer.credentials.format=basicauth" \ --set "apollo.apollo.storage.postgres.connection.syncer.credentials.user=$APOLLO_SYNCER_USER" \ --set "apollo.apollo.storage.postgres.connection.syncer.credentials.secret.name=spaces-apollo-pg-syncer" \ - # For the server + #. the server --set "apollo.apollo.storage.postgres.connection.apollo.credentials.format=basicauth" \ --set "apollo.apollo.storage.postgres.connection.apollo.credentials.user=$APOLLO_SERVER_USER" \ --set "apollo.apollo.storage.postgres.connection.apollo.credentials.secret.name=spaces-apollo-pg-apollo" \ @@ -371,7 +380,7 @@ See the [Query API documentation][query-api-documentation] for more information [postgresql-setup]: #self-hosted-postgresql-configuration [up-cli-installed]: /manuals/cli/overview -[query-api-documentation]: /manuals/spaces/howtos/query-api +[query-api-documentation]: /spaces/howtos/query-api [helm-chart-reference]: /reference/helm-reference [kubectl-conventions]: https://kubernetes.io/docs/reference/kubectl/generated/kubectl_get/ diff --git a/docs/manuals/spaces/howtos/self-hosted/scaling-resources.md b/spaces-docs/howtos/self-hosted/scaling-resources.md similarity index 92% rename from docs/manuals/spaces/howtos/self-hosted/scaling-resources.md rename to spaces-docs/howtos/self-hosted/scaling-resources.md index ee429fcaf..7bb04d2c2 100644 --- a/docs/manuals/spaces/howtos/self-hosted/scaling-resources.md +++ b/spaces-docs/howtos/self-hosted/scaling-resources.md @@ -11,6 +11,12 @@ In large workloads or control plane migration, you may performance impacting resource constraints. This guide explains how to scale vCluster and `etcd` resources for optimal performance in your self-hosted Space. +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Scaling procedures are consistent across versions. + +For version-specific resource requirements and capacity planning, see the . +::: + ## Signs of resource constraints You may need to scale your vCluster or `etcd` resources if you observe: @@ -77,8 +83,8 @@ controlPlanes: For AWS: - Use GP3 volumes with adequate IOPS -- For AWS GP3 volumes, IOPS scale with volume size (3000 IOPS baseline) -- For optimal performance, provision at least 32Gi to support up to 16,000 IOPS +-. AWS GP3 volumes, IOPS scale with volume size (3000 IOPS baseline) +-. optimal performance, provision at least 32Gi to support up to 16,000 IOPS For GCP and Azure: - Use SSD-based persistent disk types for optimal performance @@ -156,7 +162,7 @@ controlPlanes: cpu: "500m" memory: "512Mi" ha: - enabled: true # For production environments + enabled: true #. production environments ``` Apply the configuration using Helm: diff --git a/docs/manuals/spaces/howtos/self-hosted/self-hosted-spaces-deployment.md b/spaces-docs/howtos/self-hosted/self-hosted-spaces-deployment.md similarity index 100% rename from docs/manuals/spaces/howtos/self-hosted/self-hosted-spaces-deployment.md rename to spaces-docs/howtos/self-hosted/self-hosted-spaces-deployment.md diff --git a/docs/manuals/spaces/howtos/self-hosted/space-observability.md b/spaces-docs/howtos/self-hosted/space-observability.md similarity index 94% rename from docs/manuals/spaces/howtos/self-hosted/space-observability.md rename to spaces-docs/howtos/self-hosted/space-observability.md index 56bba9989..52f223f5b 100644 --- a/docs/manuals/spaces/howtos/self-hosted/space-observability.md +++ b/spaces-docs/howtos/self-hosted/space-observability.md @@ -4,6 +4,15 @@ sidebar_position: 30 description: Configure Space-level observability --- +:::info API Version Information +This guide applies to **Space v1.6.0 and later** (Self-Hosted only). Space-level observability became GA in v1.14.0. + +- **v1.6.0-v1.13.x**: Available as alpha (flag: `features.alpha.observability.enabled=true`) +- **v1.14.0+**: GA (flag: `observability.enabled=true`) + +For details on observability evolution and related API resources, see the . control-plane observability (distinct from space-level), see the [main observability guide](../observability.md). +::: + :::important This feature is GA since `v1.14.0`, requires Spaces `v1.6.0`, and is off by default. To enable, set `observability.enabled=true` (`features.alpha.observability.enabled=true` before `v1.14.0`) when installing Spaces: @@ -115,7 +124,7 @@ The sampling behavior depends on whether a parent trace context exists: - **With parent context**: If a `traceparent` header is present, the parent's sampling decision is respected, enabling proper distributed tracing across services. -- **Root spans**: For new traces without a parent, Envoy samples based on +- **Root spans**:. new traces without a parent, Envoy samples based on `x-request-id` hashing. The default sampling rate is 10%. #### TLS configuration for external collectors @@ -298,7 +307,7 @@ lifecycle including status codes and client-perceived latency. | `envoy_http_downstream_rq_time_count` | Count of downstream requests | [router-ref]: #router-ref -[observability-documentation]: /manuals/spaces/howtos/observability +[observability-documentation]: /spaces/howtos/observability [opentelemetry-collector]: https://opentelemetry.io/docs/collector/ [opentelemetry-operator]: https://opentelemetry.io/docs/kubernetes/operator/ [helm-chart-reference]: /reference/helm-reference diff --git a/docs/manuals/spaces/howtos/self-hosted/spaces-management.md b/spaces-docs/howtos/self-hosted/spaces-management.md similarity index 91% rename from docs/manuals/spaces/howtos/self-hosted/spaces-management.md rename to spaces-docs/howtos/self-hosted/spaces-management.md index 18924a192..3df61c306 100644 --- a/docs/manuals/spaces/howtos/self-hosted/spaces-management.md +++ b/spaces-docs/howtos/self-hosted/spaces-management.md @@ -4,6 +4,12 @@ sidebar_position: 10 description: Common operations in Spaces --- +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Spaces management procedures are consistent across versions. + +For version compatibility details, see the . +::: + ## Spaces management ### Create a Space @@ -200,14 +206,14 @@ kubectl delete controlplane ctp1 [up-space-init]: /reference/cli-reference [quickstart]: / -[aws]: /manuals/spaces/howtos/self-hosted/self-hosted-spaces-deployment -[azure]:/manuals/spaces/howtos/self-hosted/self-hosted-spaces-deployment -[gcp]:/manuals/spaces/howtos/self-hosted/self-hosted-spaces-deployment +[aws]: /spaces/howtos/self-hosted/self-hosted-spaces-deployment +[azure]:/spaces/howtos/self-hosted/self-hosted-spaces-deployment +[gcp]:/spaces/howtos/self-hosted/self-hosted-spaces-deployment [up-space-upgrade]: /reference/cli-reference [spaces-release-notes]: /reference/release-notes/spaces [up-space-upgrade-1]: /reference/cli-reference [release-notes]: /reference/release-notes/spaces [up-space-destroy]: /reference/cli-reference [up-cli]: /reference/cli-reference -[upbound-s-saas-environment]: /manuals/spaces/howtos/self-hosted/spaces-management -[spaces-git-integration]: /manuals/spaces/howtos/self-hosted/gitops +[upbound-s-saas-environment]: /spaces/howtos/self-hosted/spaces-management +[spaces-git-integration]: /spaces/howtos/self-hosted/gitops diff --git a/docs/manuals/spaces/howtos/self-hosted/troubleshooting.md b/spaces-docs/howtos/self-hosted/troubleshooting.md similarity index 99% rename from docs/manuals/spaces/howtos/self-hosted/troubleshooting.md rename to spaces-docs/howtos/self-hosted/troubleshooting.md index 4ddcce5a9..8d1ca6517 100644 --- a/docs/manuals/spaces/howtos/self-hosted/troubleshooting.md +++ b/spaces-docs/howtos/self-hosted/troubleshooting.md @@ -128,5 +128,5 @@ If you try to install a Space on an existing cluster that previously had Crosspl -[observability]: /manuals/spaces/howtos/observability +[observability]: /spaces/howtos/observability [remove-old-crossplane-crds]: https://docs.crossplane.io/latest/guides/uninstall-crossplane/ diff --git a/docs/manuals/spaces/howtos/self-hosted/use-argo.md b/spaces-docs/howtos/self-hosted/use-argo.md similarity index 95% rename from docs/manuals/spaces/howtos/self-hosted/use-argo.md rename to spaces-docs/howtos/self-hosted/use-argo.md index b4b9b3075..0862feb13 100644 --- a/docs/manuals/spaces/howtos/self-hosted/use-argo.md +++ b/spaces-docs/howtos/self-hosted/use-argo.md @@ -10,6 +10,12 @@ aliases: --- +:::info API Version Information +This guide covers the ArgoCD Plugin, a preview feature available in all supported versions (v1.9-v1.14+) for self-hosted deployments. + +For general GitOps guidance, see [Automation and GitOps Overview](/spaces/howtos/automation-and-gitops/overview/). +::: + :::important This feature is in preview and is off by default. To enable, set `features.alpha.argocdPlugin.enabled=true` when installing Spaces: @@ -147,7 +153,7 @@ Be sure to [configure Argo][configure-argo-1] after it's installed. ## Configure Argo -Argo's default configuration causes it to try to query for resource kinds that don't exist in control planes. You should configure Argo's [general configmap][general-configmap] to include the resource group/kinds which make sense in the context of control planes. For example, the concept of `nodes` isn't exposed in control planes. +Argo's default configuration causes it to try to query for resource kinds that don't exist in control planes. You should configure Argo's [general configmap][general-configmap] to include the resource group/kinds which make sense in the context of control planes. example, the concept of `nodes` isn't exposed in control planes. To configure Argo CD, connect to the cluster where you've installed it and edit the configmap: @@ -216,7 +222,7 @@ Control plane labels automatically propagate to the connection secret, which all This configuration enables Argo CD to automatically discover and manage resources on your control planes. -[gitops-with-control-planes]: /manuals/spaces/howtos/cloud-spaces/gitops +[gitops-with-control-planes]: /spaces/howtos/cloud-spaces/gitops [configure-argo]: #configure-argo [configure-argo-1]: #configure-argo [general-configmap]: https://argo-cd.readthedocs.io/en/stable/operator-manual/argocd-cm-yaml/ diff --git a/docs/manuals/spaces/howtos/self-hosted/workload-id/_category_.json b/spaces-docs/howtos/self-hosted/workload-id/_category_.json similarity index 100% rename from docs/manuals/spaces/howtos/self-hosted/workload-id/_category_.json rename to spaces-docs/howtos/self-hosted/workload-id/_category_.json diff --git a/docs/manuals/spaces/howtos/self-hosted/workload-id/backup-restore-config.md b/spaces-docs/howtos/self-hosted/workload-id/backup-restore-config.md similarity index 98% rename from docs/manuals/spaces/howtos/self-hosted/workload-id/backup-restore-config.md rename to spaces-docs/howtos/self-hosted/workload-id/backup-restore-config.md index de70dd62b..935ca69ec 100644 --- a/docs/manuals/spaces/howtos/self-hosted/workload-id/backup-restore-config.md +++ b/spaces-docs/howtos/self-hosted/workload-id/backup-restore-config.md @@ -379,6 +379,6 @@ Other workload identity guides are: * [Billing][billing] * [Shared Secrets][secrets] -[backup-restore-guide]: /manuals/spaces/howtos/backup-and-restore -[billing]: /manuals/spaces/howtos/self-hosted/workload-id/billing-config -[secrets]: /manuals/spaces/howtos/self-hosted/workload-id/eso-config +[backup-restore-guide]: /spaces/howtos/backup-and-restore +[billing]: /spaces/howtos/self-hosted/workload-id/billing-config +[secrets]: /spaces/howtos/self-hosted/workload-id/eso-config diff --git a/docs/manuals/spaces/howtos/self-hosted/workload-id/billing-config.md b/spaces-docs/howtos/self-hosted/workload-id/billing-config.md similarity index 98% rename from docs/manuals/spaces/howtos/self-hosted/workload-id/billing-config.md rename to spaces-docs/howtos/self-hosted/workload-id/billing-config.md index 12720908e..323a6122f 100644 --- a/docs/manuals/spaces/howtos/self-hosted/workload-id/billing-config.md +++ b/spaces-docs/howtos/self-hosted/workload-id/billing-config.md @@ -449,6 +449,6 @@ Other workload identity guides are: * [Backup and restore][backuprestore] * [Shared Secrets][secrets] -[billing-guide]: /manuals/spaces/howtos/self-hosted/billing -[backuprestore]: /manuals/spaces/howtos/self-hosted/workload-id/backup-restore-config -[secrets]: /manuals/spaces/howtos/self-hosted/workload-id/eso-config +[billing-guide]: /spaces/howtos/self-hosted/billing +[backuprestore]: /spaces/howtos/self-hosted/workload-id/backup-restore-config +[secrets]: /spaces/howtos/self-hosted/workload-id/eso-config diff --git a/docs/manuals/spaces/howtos/self-hosted/workload-id/eso-config.md b/spaces-docs/howtos/self-hosted/workload-id/eso-config.md similarity index 98% rename from docs/manuals/spaces/howtos/self-hosted/workload-id/eso-config.md rename to spaces-docs/howtos/self-hosted/workload-id/eso-config.md index f74ffbb53..c1418c171 100644 --- a/docs/manuals/spaces/howtos/self-hosted/workload-id/eso-config.md +++ b/spaces-docs/howtos/self-hosted/workload-id/eso-config.md @@ -498,6 +498,6 @@ Other workload identity guides are: * [Backup and restore][backuprestore] * [Billing][billing] -[eso-guide]: /manuals/spaces/howtos/secrets-management -[backuprestore]: /manuals/spaces/howtos/self-hosted/workload-id/backup-restore-config -[billing]: /manuals/spaces/howtos/self-hosted/workload-id/billing-config +[eso-guide]: /spaces/howtos/secrets-management +[backuprestore]: /spaces/howtos/self-hosted/workload-id/backup-restore-config +[billing]: /spaces/howtos/self-hosted/workload-id/billing-config diff --git a/docs/manuals/spaces/howtos/simulations.md b/spaces-docs/howtos/simulations.md similarity index 88% rename from docs/manuals/spaces/howtos/simulations.md rename to spaces-docs/howtos/simulations.md index 38a19d7d4..26cb0e657 100644 --- a/docs/manuals/spaces/howtos/simulations.md +++ b/spaces-docs/howtos/simulations.md @@ -4,8 +4,14 @@ sidebar_position: 100 description: Use the Up CLI to mock operations before deploying to your environments. --- +:::info API Version Information +This guide covers Simulations, available in v1.10+ (GA since v1.13). version-specific availability and features, see the . + +For API specifications on the Simulations CRD, see the [Spaces API Reference](../../../reference/apis/spaces-api/). +::: + :::important -The Simulations feature is in private preview. For more information, [reach out to Upbound][reach-out-to-upbound]. +The Simulations feature is in private preview. more information, [reach out to Upbound][reach-out-to-upbound]. ::: Control plane simulations allow you to preview changes to your resources before @@ -35,7 +41,7 @@ packages. ## Requirements Simulations are available to select customers on Upbound Cloud with Team -Tier or higher. For more information, [reach out to Upbound][reach-out-to-upbound-1]. +Tier or higher. more information, [reach out to Upbound][reach-out-to-upbound-1]. ## How to simulate your control planes diff --git a/spaces-docs/overview/_category_.json b/spaces-docs/overview/_category_.json new file mode 100644 index 000000000..54bb16430 --- /dev/null +++ b/spaces-docs/overview/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Overview", + "position": 0 +} diff --git a/spaces-docs/overview/index.md b/spaces-docs/overview/index.md new file mode 100644 index 000000000..b199ea0b2 --- /dev/null +++ b/spaces-docs/overview/index.md @@ -0,0 +1,14 @@ +--- +title: Spaces Overview +sidebar_position: 0 +--- + +# Upbound Spaces + +Welcome to the Upbound Spaces documentation. This section contains comprehensive documentation for Spaces API and Spaces operations across all supported versions (v1.9 through v1.15). + +## Get Started + +- **[Concepts](/spaces/concepts/control-planes/)** - Core concepts for Spaces +- **[How-To Guides](/spaces/howtos/auto-upgrade/)** - Step-by-step guides for operating Spaces +- **[API Reference](/spaces/reference/)** - API specifications and resources diff --git a/spaces-docs/reference/_category_.json b/spaces-docs/reference/_category_.json new file mode 100644 index 000000000..4a6a139c4 --- /dev/null +++ b/spaces-docs/reference/_category_.json @@ -0,0 +1,5 @@ +{ + "label": "Spaces API", + "position": 1, + "collapsed": true +} diff --git a/spaces-docs/reference/index.md b/spaces-docs/reference/index.md new file mode 100644 index 000000000..5e68b0768 --- /dev/null +++ b/spaces-docs/reference/index.md @@ -0,0 +1,72 @@ +--- +title: Spaces API Reference +description: Documentation for the Spaces API resources (v1.15 - Latest) +sidebar_position: 1 +--- +import CrdDocViewer from '@site/src/components/CrdViewer'; + + +This page documents the Custom Resource Definitions (CRDs) for the Spaces API. + + +## Control Planes +### Control Planes + + +## Observability +### Shared Telemetry Configs + + +## `pkg` +### Controller Revisions + + +### Controller Runtime Configs + + +### Controllers + + +### Remote Configuration Revisions + + +### Remote Configurations + + +## Policy +### Shared Upbound Policies + + +## References +### Referenced Objects + + +## Scheduling +### Environments + + +## Secrets +### Shared External Secrets + + +### Shared Secret Stores + + +## Simulations + + +## Spaces Backups +### Backups + + +### Backup Schedules + + +### Shared Backup Configs + + +### Shared Backups + + +### Shared Backup Schedules + diff --git a/spaces_versioned_docs/version-v1.10/concepts/_category_.json b/spaces_versioned_docs/version-v1.10/concepts/_category_.json new file mode 100644 index 000000000..4b8667e29 --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/concepts/_category_.json @@ -0,0 +1,7 @@ +{ + "label": "Concepts", + "position": 2, + "collapsed": true +} + + diff --git a/spaces_versioned_docs/version-v1.10/concepts/control-planes.md b/spaces_versioned_docs/version-v1.10/concepts/control-planes.md new file mode 100644 index 000000000..7066343de --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/concepts/control-planes.md @@ -0,0 +1,227 @@ +--- +title: Control Planes +weight: 1 +description: An overview of control planes in Upbound +--- + + +Control planes in Upbound are fully isolated Crossplane control plane instances that Upbound manages for you. This means: + +- the underlying lifecycle of infrastructure (compute, memory, and storage) required to power your instance. +- scaling of the infrastructure. +- the maintenance of the core Crossplane components that make up a control plane. + +This lets users focus on building their APIs and operating their control planes, while Upbound handles the rest. Each control plane has its own dedicated API server connecting users to their control plane. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). + +For detailed information about Control Plane API specifications and CRD fields, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +## Control plane architecture + +![Managed Control Plane Architecture](/img/mcp.png) + +Along with underlying infrastructure, Upbound manages the Crossplane system components. You don't need to manage the Crossplane API server or core resource controllers because Upbound manages your control plane lifecycle from creation to deletion. + +### Crossplane API + +Each control plane offers a unified endpoint. You interact with your control plane through Kubernetes and Crossplane API calls. Each control plane runs a Kubernetes API server to handle API requests. You can make API calls in the following ways: + +- Direct calls: HTTP/gRPC +- Indirect calls: the up CLI, Kubernetes clients such as kubectl, or the Upbound Console. + +Like in Kubernetes, the API server is the hub for all communication for the control plane. All internal components such as system processes and provider controllers act as clients of the API server. + +Your API requests tell Crossplane your desired state for the resources your control plane manages. Crossplane attempts to constantly maintain that state. Crossplane lets you configure objects in the API either imperatively or declaratively. + +### Crossplane versions and features + +Upbound automatically upgrades Crossplane system components on control planes to new Crossplane versions for updated features and improvements in the open source project. With [automatic upgrades][automatic-upgrades], you choose the cadence that Upbound automatically upgrades the system components in your control plane. You can also choose to manually upgrade your control plane to a different Crossplane version. + +For detailed information on versions and upgrades, refer to the [release notes][release-notes] and the automatic upgrade documentation. If you don't enroll a control plane in a release channel, Upbound doesn't apply automatic upgrades. + +Features considered "alpha" in Crossplane are by default not supported in a control plane unless otherwise specified. + +### Hosting environments + +Every control plane in Upbound belongs to a [control plane group][control-plane-group]. Control plane groups are a logical grouping of one or more control planes with shared objects (such as secrets or backup configuration). Every group resides in a [Space][space] in Upbound, which are hosting environments for control planes. + +Think of a Space as being conceptually the same as an AWS, Azure, or GCP region. Regardless of the Space type you run a control plane in, the core experience is identical. + +## Management + +### Create a control plane + +You can create a new control plane from the Upbound Console, [up CLI][up-cli], or with Kubernetes clients such as `kubectl`. + + + + + +To use the CLI, run the following: + +```shell +up ctp create +``` + +To learn more about control plane-related commands in `up`, go to the [CLI reference][cli-reference] documentation. + + + +You can create and manage control planes declaratively in Upbound. Before you +begin, ensure you're logged into Upbound and set the correct context: + +```bash +up login +# Example: acmeco/upbound-gcp-us-west-1/default +up ctx ${yourOrganization}/${yourSpace}/${yourGroup} +```` + +```yaml +#controlplane-a.yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: controlplane-a +spec: + crossplane: + autoUpgrade: + channel: Rapid +``` + +```bash +kubectl apply -f controlplane-a.yaml +``` + + + + + +### Connect directly to your control plane + +Each control plane offers a unified endpoint. You interact with your control plane through Kubernetes and Crossplane API calls. Each control plane runs a Kubernetes API server to handle API requests. + +You can connect to a control plane's API server directly via the up CLI. Use the [`up ctx`][up-ctx] command to set your kubeconfig's current context to a control plane: + +```shell +# Example: acmeco/upbound-gcp-us-west-1/default/ctp1 +up ctx ${yourOrganization}/${yourSpace}/${yourGroup}/${yourControlPlane} +``` + +To disconnect from your control plane and revert your kubeconfig's current context to the previous entry, run the following: + +```shell +up ctx .. +``` + +You can also generate a `kubeconfig` file for a control plane with [`up ctx -f`][up-ctx-f]. + +```shell +up ctx ${yourOrganization}/${yourSpace}/${yourGroup}/${yourControlPlane} -f - > ctp-kubeconfig.yaml +``` + +:::tip +To learn more about how to use `up ctx` to navigate different contexts in Upbound, read the [CLI documentation][cli-documentation]. +::: + +## Configuration + +When you create a new control plane, Upbound provides you with a fully isolated instance of Crossplane. Configure your control plane by installing packages that extend its capabilities, like to create and manage the lifecycle of new types of infrastructure resources. + +You're encourage to install any available Crossplane package type (Providers, Configurations, Functions) available in the [Upbound Marketplace][upbound-marketplace] on your control planes. + +### Install packages + +Below are a couple ways to install Crossplane packages on your control plane. + + + + + + +Use the `up` CLI to install Crossplane packages from the [Upbound Marketplace][upbound-marketplace-1] on your control planes. Connect directly to your control plane via `up ctx`. Then, to install a provider: + +```shell +up ctp provider install xpkg.upbound.io/upbound/provider-family-aws +``` + +To install a Configuration: + +```shell +up ctp configuration install xpkg.upbound.io/upbound/platform-ref-aws +``` + +To install a Function: + +```shell +up ctp function install xpkg.upbound.io/crossplane-contrib/function-kcl +``` + + +You can use kubectl to directly apply any Crossplane manifest. Below is an example for installing a Crossplane provider: + +```yaml +cat < + + + +For production-grade scenarios, it's recommended you configure your control plane declaratively via Git plus a Continuous Delivery (CD) Engine such as Argo. guidance on this topic, read [GitOps with control planes][gitops-with-control-planes]. + + + + + + +### Configure Crossplane ProviderConfigs + +#### ProviderConfigs with OpenID Connect + +Use OpenID Connect (`OIDC`) to authenticate to Upbound control planes without credentials. OIDC lets your control plane exchange short-lived tokens directly with your cloud provider. Read how to [connect control planes to external services][connect-control-planes-to-external-services] to learn more. + +#### Generic ProviderConfigs + +The Upbound Console doesn't allow direct editing of ProviderConfigs that don't support `Upbound` authentication. To edit these ProviderConfigs on your control plane, connect to the control plane directly by following the instructions in the previous section and using `kubectl`. + +### Configure secrets + +Upbound gives users the ability to configure the synchronization of secrets from external stores into control planes. Configure this capability at the group-level, explained in the [Spaces documentation][spaces-documentation]. + +### Configure backups + +Upbound gives users the ability to configure backup schedules, take impromptu backups, and conduct self-service restore operations. Configure this capability at the group-level, explained in the [Spaces documentation][spaces-documentation-1]. + +### Configure telemetry + + +Upbound gives users the ability to configure the collection of telemetry (logs, metrics, and traces) in their control planes. Using Upbound's built-in [OTEL][otel] support, you can stream this data out to your preferred observability solution. Configure this capability at the group-level, explained in the [Spaces documentation][spaces-documentation-2]. + + + +[automatic-upgrades]: /spaces/howtos/auto-upgrade +[release-notes]: https://github.com/upbound/universal-crossplane/releases +[control-plane-group]: /spaces/concepts/groups +[space]: /spaces/overview +[up-cli]: /reference/cli-reference +[cli-reference]: /reference/cli-reference +[up-ctx]: /reference/cli-reference +[up-ctx-f]: /reference/cli-reference +[cli-documentation]: /manuals/cli/concepts/contexts +[upbound-marketplace]: https://marketplace.upbound.io +[upbound-marketplace-1]: https://marketplace.upbound.io +[gitops-with-control-planes]: /spaces/howtos/cloud-spaces/gitops +[connect-control-planes-to-external-services]: /manuals/platform/howtos/oidc +[spaces-documentation]: /spaces/howtos/secrets-management +[spaces-documentation-1]: /spaces/howtos/backup-and-restore +[otel]: https://otel.com +[spaces-documentation-2]: /spaces/howtos/observability diff --git a/spaces_versioned_docs/version-v1.10/concepts/deployment-modes.md b/spaces_versioned_docs/version-v1.10/concepts/deployment-modes.md new file mode 100644 index 000000000..f5e718f88 --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/concepts/deployment-modes.md @@ -0,0 +1,53 @@ +--- +title: Deployment Modes +sidebar_position: 10 +description: An overview of deployment modes for Spaces +--- + +Upbound Spaces can be deployed and used in a variety of modes: + +- **Cloud Spaces:** Multi-tenant Upbound-hosted, Upbound-managed Space environment. Cloud Spaces provide a typical SaaS experience. +- **[Dedicated Spaces][dedicated-spaces]:** Single-tenant Upbound-hosted, Upbound-managed Space environment. Dedicated Spaces provide a SaaS experience, with additional isolation guarantees that your workloads run in a fully isolated context. +- **[Managed Spaces][managed-spaces]:** Single-tenant customer-hosted, Upbound-managed Space environment. Managed Spaces provide a SaaS-like experience, with additional guarantees of all hosting infrastructure being served from your own cloud account. +- **[Self-Hosted Spaces][self-hosted-spaces]:** Single-tenant customer-hosted, customer-managed Space environment. This is a fully self-hosted, self-managed software experience for using Spaces. Upbound delivers the Spaces software and you run it yourself. + +The Upbound platform uses a federated model to connect each Space back to a +central service called the [Upbound Console][console], which is deployed and +managed by Upbound. + +By default, customers have access to a set of Cloud Spaces. + +## Supported clouds + +You can use host Upbound Spaces on Amazon Web Services (AWS), Microsoft Azure, +and Google Cloud Platform (GCP). Regardless of the hosting platform, you can use +Spaces to deploy control planes that manage the lifecycle of your resources. + +## Supported regions + +This table lists the cloud service provider regions supported by Upbound. + +### GCP + +| Region | Location | +| --- | --- | +| `us-west-1` | Western US (Oregon) +| `us-central-1` | Central US (Iowa) +| `eu-west-3` | Eastern Europe (Frankfurt) + +### AWS + +| Region | Location | +| --- | --- | +| `us-east-1` | Eastern US (Northern Virginia) + +### Azure + +| Region | Location | +| --- | --- | +| `us-east-1` | Eastern US (Iowa) + +[dedicated-spaces]: /spaces/howtos/cloud-spaces/dedicated-spaces-deployment +[managed-spaces]: /spaces/howtos/self-hosted/managed-spaces-deployment +[self-hosted-spaces]: /spaces/howtos/self-hosted/self-hosted-spaces-deployment +[console]: /manuals/console/upbound-console/ diff --git a/spaces_versioned_docs/version-v1.10/concepts/groups.md b/spaces_versioned_docs/version-v1.10/concepts/groups.md new file mode 100644 index 000000000..d2ccacdb3 --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/concepts/groups.md @@ -0,0 +1,115 @@ +--- +title: Control Plane Groups +sidebar_position: 2 +description: An introduction to the Control Plane Groups in Upbound +plan: "enterprise" +--- + + + +In Upbound, Control Plane Groups (or just, 'groups') are a logical grouping of one or more control planes with shared resources like [secrets][secrets] or [backups][backups]. It's a mechanism for isolating these groups of resources within a single [Space][space]. All role-based access control in Upbound happens at the control plane group-level. + +## When to use multiple groups + +You should use groups in environments where there's a need to have Crossplane manage infrastructure across multiple cloud accounts or projects. For users who only need to deploy and manage resources in a couple cloud accounts, you shouldn't need to think about groups at all. + +Groups are a way to divide access in Upbound between multiple teams. Think of a group as being analogous to a Kubernetes _namespace_. + +## The 'default' group + +Every Cloud Space in Upbound has a group named _default_ available. + +## Working with groups + +### View groups + +You can list groups in a Space using: + +```shell +up group list +``` + +If you're operating in a single-tenant Space and have access to the underlying cluster, you can list namespaces that have the group label: + +```shell +kubectl get namespaces -l spaces.upbound.io/group=true +``` + +### Set the group for a request + +Several commands in _up_ have a group context. To set the group for a request, use the `--group` flag: + +```shell +up ctp list --group=team1 +``` +```shell +up ctp create new-ctp --group=team2 +``` + +### Set the group preference + +The _up_ CLI operates upon a single [Upbound context][upbound-context]. Whatever context gets set is then used as the preference for other commands. An Upbound context is capable of pointing at a variety of altitudes: + +1. A Space in Upbound +2. A group within a Space +3. a control plane within a group + +To set the group preference, use `up ctx` to choose a group as your preferred Upbound context. For example: + +```shell +# This sets the context for the up CLI to the default group in an Upbound-managed Cloud Space (gcp-us-west-1) for an organization called 'acmeco' +up ctx acmeco/upbound-gcp-us-west-1/default/ +``` + +### Create a group + +To create a group, login to Upbound and set your context to your desired Space: + +```shell +up login +up ctx '/' +# Example: up ctx acmeco/upbound-gcp-us-west-1 +``` + + +Create a group: + +```shell +up group create my-new-group +``` + +### Delete a group + +To delete a group, login to Upbound and set your context to your desired Space: + +```shell +up login +up ctx '/' +# Example: up ctx acmeco/upbound-gcp-us-west-1 +``` + +Delete a group: + +```shell +up group delete my-new-group +``` + +### Protected groups + +Once a control plane gets created in a group, Upbound enforces a protection policy on the group. Upbound prevents accidental deletion of the group. To delete a group that has control planes in it, you should first delete all control planes in the group. + +## Groups in the context of single-tenant Spaces + +Upbound offers a variety of deployment models to use the product. If you deploy your own single-tenant Upbound Space (whether connected or disconnected), you're self-hosting Upbound software in a Kubernetes cluster. In these environments, a control plane group maps to a corresponding namespace in the cluster which hosts the Space. + +Most Kubernetes clusters come with some set of predefined namespaces. Because a group maps to a corresponding Kubernetes namespace, whenever a group gets created, there too must be a Kubernetes namespace accordingly. When the Spaces software is newly installed, no groups exist. You _can_ elevate a Kubernetes namespace to become a group by doing the following: + +1. Creating a group with the same name as a preexisting Kubernetes namespace +2. Creating a control plane in a preexisting Kubernetes namespace +3. Labeling a Kubernetes namespace with the label `spaces.upbound.io/group=true` + + +[secrets]: /spaces/howtos/secrets-management +[backups]: /spaces/howtos/self-hosted/workload-id/backup-restore-config/ +[space]: /spaces/overview +[upbound-context]: /manuals/cli/concepts/contexts diff --git a/spaces_versioned_docs/version-v1.10/howtos/_category_.json b/spaces_versioned_docs/version-v1.10/howtos/_category_.json new file mode 100644 index 000000000..d3a8547aa --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/_category_.json @@ -0,0 +1,7 @@ +{ + "label": "How-tos", + "position": 3, + "collapsed": true +} + + diff --git a/spaces_versioned_docs/version-v1.10/howtos/api-connector.md b/spaces_versioned_docs/version-v1.10/howtos/api-connector.md new file mode 100644 index 000000000..a14468f52 --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/api-connector.md @@ -0,0 +1,413 @@ +--- +title: API Connector +weight: 90 +description: Connect Kubernetes clusters to remote Crossplane control planes for resource synchronization +aliases: + - /api-connector + - /concepts/api-connector +--- +:::info API Version Information +This guide covers API Connector, a preview feature available in all supported versions (v1.9-v1.14+). + +For related API specifications and available resources, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +:::warning +API Connector is currently in **Preview**. The feature is under active +development and subject to breaking changes. Use for testing and evaluation +purposes only. +::: + +API Connector enables seamless integration between Kubernetes application +clusters consuming APIs and remote Crossplane control planes providing and +reconciling APIs. + +You can use the API Connector to decouple where Crossplane is running (for +example in an Upbound control plane), and where APIs are consumed +(for example in an existing Kubernetes cluster). This gives you flexibility and +consistency in your control plane operations. + + + +Unlike the [Control Plane Connector](ctp-connector.md) which offers only +coarse-grained connectivity between app clusters and a control plane, API +connector offers fine-grained configuration of which APIs get offered along with +multi-cluster connectivity. + +## Architecture overview + +![API Connector Architecture](/img/api-connector.png) + +API Connector uses a **provider-consumer** model: + +- **Provider control plane**: The Upbound control plane that provides APIs and manages infrastructure. +- **Consumer cluster**: Any Kubernetes cluster where its users wants to use APIs provided by the provider control plane, without having to run Crossplane. API connector gets installed in the consumer cluster, and bidirectionally syncs API objects to the provider. + +### Key components + +**Custom Resource Definitions (CRDs)**: + + +- `ClusterConnection`: Establishes a connection from the consumer to the provider cluster. Pulls bindable CRD APIs from the provider into the consumer cluster for use. + +- `ClusterAPIBinding`: Instructs API connector to sync all API objects cluster-wide with a given API group to a given provider cluster. +- `APIBinding`: Namespaced version of `ClusterAPIBinding`. Instructs API connector to sync API objects within a given namespace and with a given API group to a given provider cluster. + + +## Prerequisites + +Before using API Connector, ensure: + +1. **Consumer cluster** has network access to the provider control plane +1. You have an license to use API connector. If you are unsure, [contact Upbound][contact] or your sales representative. + +This guide walks through how to automate connecting your cluster to an Upbound +control plane. You can also manually configure the API Connector. + +## Publishing APIs in the provider cluster + + + + +First, log in to your provider control plane, and choose which CRD APIs you want +to make accessible to the consumer cluster's. API connector only syncs +these "bindable" CRDs. + + + + + + +Use the `up` CLI to login: + +```bash +up login +``` + +Connect to your control plane: + +```bash +up ctx +``` + +Check what CRDs are available: + +```bash +kubectl get crds +``` + + +Label all CRDs you want to publish with the bindable label: + + +```bash +kubectl label crd 'connect.upbound.io/bindable'='true' --overwrite +``` + + + + +Change context to the provider cluster: +```bash +kubectl config set-context +``` + +Check what CRDs are available: +```bash +kubectl get crds +``` + + +Label all CRDs you want to publish with the bindable label + +```bash +kubectl label crd 'connect.upbound.io/bindable'='true' --overwrite +``` + + + +## Installation + + + + +The up CLI provides the simplest installation method with automatic +configuration: + +Make sure the current Kubeconfig context is set to the **provider control plane** +```bash +up ctx + +up controlplane api-connector install --consumer-kubeconfig [OPTIONS] +``` + +The command: +1. creates a Robot account (named ``) in the Upbound Cloud organization ``, +1. Gives the created robot account `admin` permissions to the provider control plane `` +1. Generates a JWT token for the robot account, and stores it in a Kubernetes Secret in the consumer cluster. +1. Installs the API connector Helm chart in the consumer cluster. +1. Creates a `ClusterConnection` object in the consumer cluster, referring to the newly generated Secret, so that API connector can authenticate successfully to the provider control plane. +1. API connector pulls all published CRDs from the previous step into the consumer cluster. + +**Example**: +```bash +up controlplane api-connector install \ + --consumer-kubeconfig ~/.kube/config \ + --consumer-context my-cluster \ + --upbound-token +``` + +This command uses provided token to authenticate with the **Provider control plane** +and create a `ClusterConnection` resource in the **Consumer cluster** to connect to the +**Provider control plane**. + +**Key Options**: +- `--consumer-kubeconfig`: Path to consumer cluster kubeconfig (required) +- `--consumer-context`: Context name for consumer cluster (required) +- `--name`: Custom name for connection resources (optional) +- `--upbound-token`: API token for authentication (optional) +- `--upgrade`: Upgrade existing installation (optional) +- `--version`: Specific version to install (optional) + + + + +For manual installation or custom configurations: + +```bash +helm upgrade --install api-connector oci://xpkg.upbound.io/spaces-artifacts/api-connector \ + --namespace upbound-system \ + --create-namespace \ + --version \ + --set consumerClusterDisplayName= +``` + +### Authentication methods + +API Connector supports two authentication methods: + + + + +For Upbound Spaces integration: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: spaces-secret + namespace: upbound-system +type: Opaque +stringData: + token: + organization: + spacesBaseURL: + controlPlaneGroupName: + controlPlaneName: +``` + + + +For direct cluster access: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: provider-kubeconfig + namespace: upbound-system +type: Opaque +data: + kubeconfig: +``` + + + + +### Connection setup + +Create a `ClusterConnection` to establish connectivity: + + + + +```yaml +apiVersion: connect.upbound.io/v1alpha1 +kind: ClusterConnection +metadata: + name: spaces-connection + namespace: upbound-system +spec: + secretRef: + kind: UpboundRobotToken + name: spaces-secret + namespace: upbound-system + crdManagement: + pullBehavior: Pull +``` + + + + +```yaml +apiVersion: connect.upbound.io/v1alpha1 +kind: ClusterConnection +metadata: + name: provider-connection + namespace: upbound-system +spec: + secretRef: + kind: KubeConfig + name: provider-kubeconfig + namespace: upbound-system + crdManagement: + pullBehavior: Pull +``` + + + + + + + +### Configuration + +Bind APIs to make them available in your consumer cluster: + +```yaml +apiVersion: connect.upbound.io/v1alpha1 +kind: ClusterAPIBinding +metadata: + name: +spec: + connectionRef: + kind: ClusterConnection + name: # Or --name value +``` + + + + +The `ClusterAPIBinding` name must match the **Resource.Group** (name of the CustomResourceDefinition) of the CRD you want to bind. + + + + +## Usage example + +After configuration, you can create API objects (in the consumer cluster) that +will be synchronized to the provider cluster: + +```yaml +apiVersion: nop.example.org/v1alpha1 +kind: NopResource +metadata: + name: my-resource + namespace: default +spec: + coolField: "Synchronized resource" + compositeDeletePolicy: Foreground +``` + +Verify the resource status: + +```bash +kubectl get nopresource my-resource -o yaml + +``` +When the `APIBound=True` condition is present, it means that the API object has +been synced to the provider cluster, and is being reconciled there. Whenever the +API object in the provider cluster gets status updates (for example +`Ready=True`), that status is synced back to the consumer cluster. + +Switch contexts to the provider cluster to see the API object being created: + +```bash +up ctx +# or kubectl config set-context +``` + +```bash +kubectl get nopresource my-resource -o yaml +``` + +Note that in the provider cluster, the API object is labeled with information on +where the API object originates from, and `connect.upbound.io/managed=true`. + +## Monitoring and troubleshooting + +### Check connection status + +```bash +kubectl get clusterconnection +``` + +Expected output: +``` +NAME STATUS MESSAGE +spaces-connection Ready Provider controlplane is available +``` + +### View available APIs + +```bash +kubectl get clusterconnection spaces-connection -o jsonpath='{.status.offeredAPIs[*].name}' +``` + +### Check API binding status + +```bash +kubectl get clusterapibinding +``` + +### Debug resource synchronization + +```bash +kubectl describe +``` + +## Removal + +### Using the up CLI + +```bash +up controlplane api-connector uninstall \ + --consumer-kubeconfig ~/.kube/config \ + --all +``` + +The `--all` flag removes all resources including connections and secrets. +Without the flag, only runtime related resources won't be removed. + +:::note +Uninstall doesn't remove any API objects in the provider control plane. If you +want to clean up all API objects there, delete all API objects from the consumer +cluster before API connector uninstallation, and wait for the objects to get +deleted. +::: + + +### Using Helm + +```bash +helm uninstall api-connector -n upbound-system +``` + +## Limitations + +- **Preview feature**: Subject to breaking changes. Not yet production grade. +- **CRD updates**: CRDs are pulled once but not automatically updated. If multiple Crossplane clusters offer the same CRD API, API changes must be synchronized out of band, for example using a [Crossplane Configuration](https://docs.crossplane.io/latest/packages/). +- **Network requirements**: Consumer cluster must have direct network access to provider cluster. +- **Wide permissions needed in consumer cluster**: Because the API connector doesn't know up front the names of the APIs it needs to reconcile, it currently runs with full "root" privileges in the consumer cluster. + +- **Connector polling**: API Connector checks for drift between the consumer and provider cluster + periodically through polling. The poll interval can be changed with the `pollInterval` Helm value. + + +## Advanced configuration + +### Multiple connections + +You can connect to multiple provider clusters simultaneously by creating multiple `ClusterConnection` resources with different names and configurations. + +[contact]: https://www.upbound.io/contact-us diff --git a/spaces_versioned_docs/version-v1.10/howtos/auto-upgrade.md b/spaces_versioned_docs/version-v1.10/howtos/auto-upgrade.md new file mode 100644 index 000000000..249056fb4 --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/auto-upgrade.md @@ -0,0 +1,131 @@ +--- +title: Automatically upgrade control planes +sidebar_position: 50 +description: How to configure automatic upgrades of Crossplane in a control plane +plan: "standard" +--- + + + +Upbound Spaces can automatically upgrade the version of Upbound Crossplane in managed control plane instances. You can edit the `spec.crossplane.autoUpgrade` field in your `ControlPlane` specification with the available release channels below. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). The `spec.crossplane.autoUpgrade` field has been available since v1.9. + +For ControlPlane API specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +| Channel | Description | Example | +|------------|-----------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------| +| **None** | Disables auto upgrades. | _Uses version specified in `spec.crossplane.version`._ | +| **Patch** | Upgrades to the latest supported patch release. | _Control plane version 1.12.2-up.2 auto upgrades to 1.12.3-up.1 upon release._ | +| **Stable** | Default setting. Upgrades to the latest supported patch release on minor version _N-1_ where N is the latest supported minor version. | _If latest supported minor version is 1.14, auto upgrades to latest patch - 1.13.2-up.3_ | +| **Rapid** | Upgrades to the latest supported patch release on the latest supported minor version. | _If the latest supported minor version is 1.14, auto upgrades to the latest patch of minor version. 1.14 upgrades to 1.14.5-up.1_ | + + +:::warning + +The `Rapid` channel is only recommended for users willing to accept the risk of new features and potentially breaking changes. + +::: + +## Examples + +The specs below are examples of how to edit the `autoUpgrade` channel in your `ControlPlane` specification. + +To run a control plane with the `Rapid` auto upgrade channel, your spec should look like this: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: example-ctp +spec: + crossplane: + autoUpgrade: + channel: Rapid + writeConnectionSecretToRef: + name: kubeconfig-example-ctp +``` + +To run a control plane with a pinned version of Crossplane, specify in the `version` field: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: example-ctp +spec: + crossplane: + version: 1.14.3-up.1 + autoUpgrade: + channel: None + writeConnectionSecretToRef: + name: kubeconfig-example-ctp +``` + +## Supported Crossplane versions + +Spaces supports the three [preceding minor versions][preceding-minor-versions] from the last supported minor version. example, if the last supported minor version is `1.14`, minor versions `1.13` and `1.12` are also supported. Versions older than the three most recent minor versions aren't supported. Only supported Crossplane versions are valid specifications for new control planes. + +Current Crossplane version support by Spaces version: + +| Spaces Version | Crossplane Version Min | Crossplane Version Max | +|:--------------:|:----------------------:|:----------------------:| +| 1.2 | 1.13 | 1.15 | +| 1.3 | 1.13 | 1.15 | +| 1.4 | 1.14 | 1.16 | +| 1.5 | 1.14 | 1.16 | +| 1.6 | 1.14 | 1.16 | +| 1.7 | 1.14 | 1.16 | +| 1.8 | 1.15 | 1.17 | +| 1.9 | 1.16 | 1.18 | +| 1.10 | 1.16 | 1.18 | +| 1.11 | 1.16 | 1.18 | +| 1.12 | 1.17 | 1.19 | + + +Upbound offers extended support for all installed Crossplane versions released within a 12 month window since the last Spaces release. Contact your Upbound sales representative for more information on version support. + + +:::warning + +If the auto upgrade channel is `Stable` or `Rapid`, the Crossplane version will always stay within the support window after auto upgrade. If set to `Patch` or `None`, the minor version may be outside the support window. You are responsible for upgrading to a supported version + +::: + +To view the support status of a control plane instance, use `kubectl get ctp`. + +```bash +kubectl get ctp +NAME CROSSPLANE VERSION SUPPORTED READY MESSAGE AGE +example-ctp 1.13.2-up.3 True True 31m + +``` + +Unsupported versions return `SUPPORTED: False`. + +```bash +kubectl get ctp +NAME CROSSPLANE VERSION SUPPORTED READY MESSAGE AGE +example-ctp 1.11.5-up.1 False True 31m + +``` + +For more information, use the `-o yaml` flag to return more information. + +```bash +kubectl get controlplanes.spaces.upbound.io example-ctp -o yaml +status: +conditions: +... +- lastTransitionTime: "2024-01-23T06:36:10Z" + message: Crossplane version 1.11.5-up.1 is outside of the support window. + Oldest supported minor version is 1.12. + reason: UnsupportedCrossplaneVersion + status: "False" + type: Supported +``` + + +[preceding-minor-versions]: /reference/usage/lifecycle/#maintenance-and-updates diff --git a/spaces_versioned_docs/version-v1.10/howtos/automation-and-gitops/_category_.json b/spaces_versioned_docs/version-v1.10/howtos/automation-and-gitops/_category_.json new file mode 100644 index 000000000..b65481af6 --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/automation-and-gitops/_category_.json @@ -0,0 +1,8 @@ +{ + "label": "Automation & GitOps", + "position": 11, + "collapsed": true, + "customProps": { + "plan": "business" + } +} diff --git a/spaces_versioned_docs/version-v1.10/howtos/automation-and-gitops/overview.md b/spaces_versioned_docs/version-v1.10/howtos/automation-and-gitops/overview.md new file mode 100644 index 000000000..57eeb15fc --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/automation-and-gitops/overview.md @@ -0,0 +1,138 @@ +--- +title: Automation and GitOps Overview +sidebar_label: Overview +sidebar_position: 1 +description: Guide to automating control plane deployments with GitOps and Argo CD +plan: "business" +--- + +Automating control plane deployments with GitOps enables declarative, version-controlled infrastructure management. This section covers integrating GitOps workflows with Upbound control planes using Argo CD and related tools. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). GitOps patterns are compatible across all versions of Upbound Spaces. + +For details on deployment models and their differences, see the [Deployment Modes](../../concepts/deployment-modes.md) concept guide. version-specific features, see the . +::: + +## What is GitOps? + +GitOps is an approach for managing infrastructure by: +- **Declaratively describing** desired system state in Git +- **Using controllers** to continuously reconcile actual state with desired state +- **Treating Git as the source of truth** for all configuration and deployments + +Upbound control planes are fully compatible with GitOps patterns and we strongly recommend integrating GitOps in the platforms you build on Upbound. + +## Key Concepts + +### Argo CD +[Argo CD](https://argo-cd.readthedocs.io/) is a popular Kubernetes-native GitOps controller. It continuously monitors Git repositories and automatically applies changes to your infrastructure when commits are detected. + +### Deployment Models + +The way you configure GitOps depends on your deployment model: + +| Aspect | Cloud Spaces | Self-Hosted Spaces | +|--------|--------------|-------------------| +| **Access Method** | Upbound API with tokens | Kubernetes native (secrets/kubeconfig) | +| **Configuration** | Kubeconfig via `up` CLI | Control plane connection secrets | +| **Setup Complexity** | More involved (API integration) | Simpler (native Kubernetes) | +| **Typical Use Case** | Managing Upbound resources | Managing workloads on control planes | + +## Getting Started + +**Choose your path based on your deployment model:** + +###. Cloud Spaces +If you're using Upbound Cloud Spaces (Dedicated or Managed): +1. Start with [GitOps with Upbound Control Planes](../cloud-spaces/gitops-on-upbound.md) +2. Learn how to integrate Argo CD with Cloud Spaces +3. Manage both control plane infrastructure and Upbound resources declaratively + +###. Self-Hosted Spaces +If you're running self-hosted Spaces: +1. Start with [GitOps with ArgoCD in Self-Hosted Spaces](../self-hosted/gitops-with-argocd.md) +2. Learn how to configure control plane connection secrets +3. Manage workloads deployed to your control planes + +## Common Workflows + +### Workflow 1: Managing Control Planes with GitOps +Create and manage control planes themselves declaratively using provider-kubernetes: + +```yaml +apiVersion: kubernetes.crossplane.io/v1alpha2 +kind: Object +metadata: + name: my-controlplane +spec: + forProvider: + manifest: + apiVersion: spaces.upbound.io/v1beta1 + kind: ControlPlane + # ... control plane configuration +``` + +### Workflow 2: Managing Workloads on Control Planes +Deploy applications and resources to control planes using standard Kubernetes GitOps patterns: + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + name: my-app +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-app + namespace: my-app +# ... deployment configuration +``` + +### Workflow 3: Managing Upbound Resources +Use provider-upbound to manage Upbound IAM and repository resources: + +- Teams +- Robots and their team memberships +- Repositories and permissions + +## Advanced Topics + +### Argo CD Plugin for Upbound +Learn more in the [ArgoCD Plugin guide](../self-hosted/use-argo.md) for enhanced integration with self-hosted Spaces. + +### Declarative Control Plane Creation +See [Declaratively create control planes](../self-hosted/declarative-ctps.md) for advanced automation patterns. + +### Consuming Control Plane APIs +Understand how to [consume control plane APIs in your app cluster](../mcp-connector-guide.md) with Argo CD. + +## Prerequisites + +Before implementing GitOps with control planes, ensure you have: + +**For Cloud Spaces:** +- Access to Upbound Cloud Spaces +- `up` CLI installed and configured +- API token with appropriate permissions +- Argo CD or similar GitOps controller running +- Familiarity with Kubernetes RBAC + +**For Self-Hosted Spaces:** +- Self-hosted Spaces deployed and running +- Argo CD deployed in your infrastructure +- Kubectl access to the cluster hosting Spaces +- Understanding of control plane architecture + +## Next Steps + +1. **Choose your deployment model** above +2. **Review the relevant getting started guide** +3. **Set up your GitOps controller** (Argo CD) +4. **Deploy your first automated control plane** +5. **Explore advanced topics** as needed + +:::tip +Start with simple deployments to test your GitOps workflow before moving to production. Use [simulations](../simulations.md) to preview changes before applying them. +::: diff --git a/spaces_versioned_docs/version-v1.10/howtos/backup-and-restore.md b/spaces_versioned_docs/version-v1.10/howtos/backup-and-restore.md new file mode 100644 index 000000000..3b8d026cb --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/backup-and-restore.md @@ -0,0 +1,530 @@ +--- +title: Backup and restore +sidebar_position: 13 +description: Configure and manage backups in your Upbound Space. +plan: "enterprise" +--- + + + +Upbound's _Shared Backups_ is a built-in backup and restore feature. Shared Backups lets you configure automatic schedules for taking snapshots of your control planes. You can restore data from these backups by making new control planes. This guide explains how to use Shared Backups for disaster recovery or upgrade scenarios. + +:::info API Version Information & Available Versions +This guide applies to **all supported versions** (v1.9-v1.15+). + +**Select your API version**: +- [Spaces API v1.15 (Latest)](../../../next/reference/apis/spaces-api/) +- [Spaces API v1.14](../../../1.14/reference/apis/spaces-api/) +- [Spaces API v1.13](../../../1.13/reference/apis/spaces-api/) +- [Spaces API v1.12](../../../1.12/reference/apis/spaces-api/) +- [Spaces API v1.11](../../../1.11/reference/apis/spaces-api/) +- [Spaces API v1.10](../../../1.10/reference/apis/spaces-api/) +- [Spaces API v1.9](../../../1.9/reference/apis/spaces-api/) + +For version support policy, see . version compatibility details, see the . +::: + +## Benefits + +The Shared Backups feature provides the following benefits: + +* Automatic backups for control planes without any operational overhead +* Backup schedules for multiple control planes in a group +* Shared Backups are available across all hosting environments of Upbound (Disconnected, Connected or Cloud Spaces) + + +## Configure a Shared Backup Config + + +[SharedBackupConfig][sharedbackupconfig] is a [group-scoped][group-scoped] resource. You should create them in a group containing one or more control planes. This resource configures the storage details and provider. Whenever a backup executes (either by schedule or manually initiated), it references a SharedBackupConfig to tell it where store the snapshot. + + +### Backup config provider + + +The `spec.objectStorage.provider` and `spec.objectStorage.config` fields configures: + +* The object storage provider +* The path to the provider +* The credentials needed to communicate with the provider + +You can only set one provider. Upbound currently supports AWS, Azure, and GCP as providers. + + +`spec.objectStorage.config` is a freeform map of configuration options for the object storage provider. See [Thanos object storage][thanos-object-storage] for more information on the formats for each supported cloud provider. `spec.bucket` and `spec.provider` overrides the required values in the config. + + + +#### AWS as a storage provider + +:::important +For Cloud Spaces, static credentials are currently the only supported auth method. +::: + +This example demonstrates how to use AWS as a storage provider for your backups: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupConfig +metadata: + name: default + namespace: default +spec: + objectStorage: + provider: AWS + bucket: spaces-backup-bucket + config: + endpoint: s3.eu-west-2.amazonaws.com + region: eu-west-2 + credentials: + source: Secret + secretRef: + name: bucket-creds + key: creds +``` + + +This example assumes you've already created an S3 bucket called "spaces-backup-bucket" in AWS `eu-west-2` region. The account credentials to access the bucket should exist in a secret of the same namespace as the Shared Backup Config. + +#### Azure as a storage provider + +:::important +For Cloud Spaces, static credentials are currently the only supported auth method. +::: + +This example demonstrates how to use Azure as a storage provider for your backups: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupConfig +metadata: + name: default + namespace: default +spec: + objectStorage: + provider: Azure + bucket: upbound-backups + config: + storage_account: upbackupstore + container: upbound-backups + endpoint: blob.core.windows.net + credentials: + source: Secret + secretRef: + name: bucket-creds + key: creds +``` + + +This example assumes you've already created an Azure storage account called `upbackupstore` and blob `upbound-backups`. The storage account key to access the blob should exist in a secret of the same namespace as the Shared Backup Config. + + +#### GCP as a storage provider + +:::important +For Cloud Spaces, static credentials are currently the only supported auth method. +::: + +This example demonstrates how to use Google Cloud Storage as a storage provider for your backups: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupConfig +metadata: + name: default + namespace: default +spec: + objectStorage: + provider: GCP + bucket: spaces-backup-bucket + credentials: + source: Secret + secretRef: + name: bucket-creds + key: creds +``` + + +This example assumes you've already created a Cloud bucket called "spaces-backup-bucket" and a service account with access to this bucket. The key file should exist in a secret of the same namespace as the Shared Backup Config. + + +## Configure a Shared Backup Schedule + + +[SharedBackupSchedule][sharedbackupschedule] is a [group-scoped][group-scoped-1] resource. You should create them in a group containing one or more control planes. This resource defines a backup schedule for control planes within its corresponding group. + +Below is an example of a Shared Backup Schedule that takes backups every day of all control planes having `environment: production` labels: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: daily-schedule + namespace: default +spec: + schedule: "@daily" + configRef: + kind: SharedBackupConfig + name: default + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +### Define a schedule + +The `spec.schedule` field is a [Cron-formatted][cron-formatted] string. Some common examples are below: + + +| Entry | Description | +| ----------------- | ------------------------------------------------------------------------------------------------- | +| `@hourly` | Run once an hour. | +| `@daily` | Run once a day. | +| `@weekly` | Run once a week. | +| `0 0/4 * * *` | Run every 4 hours. | +| `0/15 * * * 1-5` | Run every fifteenth minute on Monday through Friday. | +| `@every 1h30m10s` | Run every 1 hour, 30 minutes, and 10 seconds. Hour is the largest measurement of time for @every. | + + +### Exclude resources from the backup + +The `spec.excludedResources` field is an array of resource names to exclude from each backup. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: daily-schedule +spec: + excludedResources: + - "xclusters.aws.platformref.upbound.io" + - "xdatabase.aws.platformref.upbound.io" + - "xrolepolicyattachment.iam.aws.crossplane.io" +``` + +:::warning +You must specify resource names in lowercase "resource.group" format (for example, `xclusters.aws.platformref.upbound.io`). Using only the resource kind (for example, `XCluster`) isn't supported. +::: + +### Suspend a schedule + +Use `spec.suspend` field to suspend the schedule. It creates no new backups, but allows running backups to complete. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: daily-schedule +spec: + suspend: true +``` + +### Set the time to live + +Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: daily-schedule +spec: + ttl: 168h # Backup is garbage collected after 7 days +``` +:::tip +By default, this setting doesn't delete uploaded files. Review the next section to define +the deletion policy. +::: + +### Define the deletion policy + +Set the `spec.deletionPolicy` to define backup deletion actions, including the +deletion of the backup file from the bucket. The Deletion Policy value defaults +to `Orphan`. Set it to `Delete` to remove uploaded files in the bucket. more +information on the backup and restore process, review the [Spaces API +documentation][spaces-api-documentation]. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: daily-schedule +spec: + ttl: 168h # Backup is garbage collected after 7 days + deletionPolicy: Delete # Defaults to Orphan +``` + +### Garbage collect backups when the schedule gets deleted + +Set the `spec.useOwnerReferencesInBackup` to garbage collect associated backups when a shared schedule gets deleted. If set to true, backups are garbage collected when the schedule gets deleted. + +### Control plane selection + +To configure which control planes in a group you want to create a backup schedule for, use the `spec.controlPlaneSelector` field. You can either use `labelSelectors` or the `names` of a control plane directly. A control plane matches if any of the label selectors match. + +This example matches all control planes in the group that have `environment: production` as a label: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: my-backup-schedule +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +You can use the more complex `matchExpressions` to match labels based on an expression. This example matches control planes that have label `environment: production` or `environment: staging`: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: my-backup-schedule +spec: + controlPlaneSelector: + labelSelectors: + - matchExpressions: + - { key: environment, operator: In, values: [production,staging] } +``` + +You can also specify the names of control planes directly: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: my-backup-schedule +spec: + controlPlaneSelector: + names: + - controlplane-dev + - controlplane-staging + - controlplane-prod +``` + + +## Configure a Shared Backup + + + +[SharedBackup][sharedbackup] is a [group-scoped][group-scoped-2] resource. You should create them in a group containing one or more control planes. This resource causes a backups to occur for control planes within its corresponding group. + +Below is an example of a Shared Backup that takes a backup of all control planes having `environment: production` labels: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup + namespace: default +spec: + configRef: + kind: SharedBackupConfig + name: default + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +### Exclude resources from the backup + +The `spec.excludedResources` field is an array of resource names to exclude from each backup. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup +spec: + excludedResources: + - "xclusters.aws.platformref.upbound.io" + - "xdatabase.aws.platformref.upbound.io" + - "xrolepolicyattachment.iam.aws.crossplane.io" +``` + +:::warning +You must specify resource names in lowercase "resource.group" format (for example, `xclusters.aws.platformref.upbound.io`). Using only the resource kind (for example, `XCluster`) isn't supported. +::: + +### Set the time to live + +Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup +spec: + ttl: 168h # Backup is garbage collected after 7 days +``` + + +### Garbage collect backups on Shared Backup deletion + + + +Set the `spec.useOwnerReferencesInBackup` to define whether to garbage collect associated backups when a shared backup gets deleted. If set to true, backups are garbage collected when the shared backup gets deleted. + +### Control plane selection + +To configure which control planes in a group you want to create a backup for, use the `spec.controlPlaneSelector` field. You can either use `labelSelectors` or the `names` of a control plane directly. A control plane matches if any of the label selectors match. + +This example matches all control planes in the group that have `environment: production` as a label: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +You can use the more complex `matchExpressions` to match labels based on an expression. This example matches control planes that have label `environment: production` or `environment: staging`: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup +spec: + controlPlaneSelector: + labelSelectors: + - matchExpressions: + - { key: environment, operator: In, values: [production,staging] } +``` + +You can also specify the names of control planes directly: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup +spec: + controlPlaneSelector: + names: + - controlplane-dev + - controlplane-staging + - controlplane-prod +``` + +## Create a manual backup + +[Backup][backup] is a [group-scoped][group-scoped-3] resource that causes a single backup to occur for a control planes in its corresponding group. + +Below is an example of a manual Backup of a control plane: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: Backup +metadata: + name: my-backup + namespace: default +spec: + configRef: + kind: SharedBackupConfig + name: default + controlPlane: my-awesome-ctp + deletionPolicy: Delete +``` + +The backup specification `DeletionPolicy` defines backup deletion actions, +including the deletion of the backup file from the bucket. The `Deletion Policy` +value defaults to `Orphan`. Set it to `Delete` to remove uploaded files +in the bucket. +For more information on the backup and restore process, review the [Spaces API documentation][spaces-api-documentation-1]. + + +### Choose a control plane to backup + +The `spec.controlPlane` field defines which control plane to execute a backup against. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: Backup +metadata: + name: my-backup + namespace: default +spec: + controlPlane: my-awesome-ctp +``` + +If the control plane doesn't exist, the backup fails after multiple failed retry attempts. + +### Exclude resources from the backup + +The `spec.excludedResources` field is an array of resource names to exclude from the manual backup. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: Backup +metadata: + name: my-backup +spec: + excludedResources: + - "xclusters.aws.platformref.upbound.io" + - "xdatabase.aws.platformref.upbound.io" + - "xrolepolicyattachment.iam.aws.crossplane.io" +``` + +:::warning +You must specify resource names in lowercase "resource.group" format (for example, `xclusters.aws.platformref.upbound.io`). Using only the resource kind (for example, `XCluster`) isn't supported. +::: + +### Set the time to live + +Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: Backup +metadata: + name: my-backup +spec: + ttl: 168h # Backup is garbage collected after 7 days +``` + +## Restore a control plane from a backup + +You can restore a control plane's state from a backup. Below is an example of creating a new control plane from a previous backup called `restore-me`: + + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: my-awesome-restored-ctp + namespace: default +spec: + restore: + source: + kind: Backup + name: restore-me +``` + + +[group-scoped]: /spaces/concepts/groups +[group-scoped-1]: /spaces/concepts/groups +[group-scoped-2]: /spaces/concepts/groups +[group-scoped-3]: /spaces/concepts/groups +[sharedbackupconfig]: /reference/apis/spaces-api/latest +[thanos-object-storage]: https://thanos.io/tip/thanos/storage.md/ +[sharedbackupschedule]: /reference/apis/spaces-api/latest +[cron-formatted]: https://en.wikipedia.org/wiki/Cron +[spaces-api-documentation]: /reference/apis/spaces-api/v1_9 +[sharedbackup]: /reference/apis/spaces-api/latest +[backup]: /reference/apis/spaces-api/latest +[spaces-api-documentation-1]: /reference/apis/spaces-api/v1_9 + + + diff --git a/docs/manuals/spaces/_category_.json b/spaces_versioned_docs/version-v1.10/howtos/cloud-spaces/_category_.json similarity index 63% rename from docs/manuals/spaces/_category_.json rename to spaces_versioned_docs/version-v1.10/howtos/cloud-spaces/_category_.json index f0405023f..1e1869a38 100644 --- a/docs/manuals/spaces/_category_.json +++ b/spaces_versioned_docs/version-v1.10/howtos/cloud-spaces/_category_.json @@ -1,6 +1,6 @@ { - "label": "Spaces", - "position": 2, + "label": "Cloud Spaces", + "position": 1, "collapsed": true, "customProps": { "plan": "standard" diff --git a/spaces_versioned_docs/version-v1.10/howtos/cloud-spaces/dedicated-spaces-deployment.md b/spaces_versioned_docs/version-v1.10/howtos/cloud-spaces/dedicated-spaces-deployment.md new file mode 100644 index 000000000..ebad9493e --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/cloud-spaces/dedicated-spaces-deployment.md @@ -0,0 +1,33 @@ +--- +title: Dedicated Spaces +sidebar_position: 4 +description: A guide to Upbound Dedicated Spaces +plan: business +--- + + +## Benefits + +Dedicated Spaces offer the following benefits: + +- **Single-tenancy** A control plane space where Upbound guarantees you're the only tenant operating in the environment. +- **Connectivity to your private network** Establish secure network connections between your Dedicated Cloud Space running in Upbound and your own resources behind your private network. +- **Reduced Overhead.** Offload day-to-day operational burdens to Upbound while focusing on your job of building your platform. + +## Architecture + +A Dedicated Space is a deployment of the Upbound Spaces software inside an +Upbound-controlled cloud account and network. The control planes you run. + +The diagram below illustrates the high-level architecture of Upbound Dedicated Spaces: + +![Upbound Managed Spaces arch](/img/managed-arch-gcp.png) + +## How to get access to Dedicated Spaces + +If you have an interest in Upbound Dedicated Spaces, contact +[Upbound][contact-us]. We can chat more about your +requirements and see if Dedicated Spaces are a good fit for you. + +[contact-us]: https://www.upbound.io/contact-us +[managed-space]: /spaces/howtos/self-hosted/managed-spaces-deployment diff --git a/spaces_versioned_docs/version-v1.10/howtos/cloud-spaces/gitops-on-upbound.md b/spaces_versioned_docs/version-v1.10/howtos/cloud-spaces/gitops-on-upbound.md new file mode 100644 index 000000000..fa59a8dce --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/cloud-spaces/gitops-on-upbound.md @@ -0,0 +1,318 @@ +--- +title: GitOps with Upbound Control Planes +sidebar_position: 80 +description: An introduction to doing GitOps with control planes on Upbound Cloud Spaces +tier: "business" +--- + +:::info Deployment Model +This guide applies to **Upbound Cloud Spaces** (Dedicated and Managed Spaces). For self-hosted Spaces deployments, see [GitOps with ArgoCD in Self-Hosted Spaces](/spaces/howtos/self-hosted/gitops-with-argocd/). +::: + +GitOps is an approach for managing a system by declaratively describing desired resources' configurations in Git and using controllers to realize the desired state. Upbound's control planes are compatible with this pattern and it's strongly recommended you integrate GitOps in the platforms you build on Upbound. + + +## Integrate with Argo CD + + +[Argo CD][argo-cd] is a project in the Kubernetes ecosystem commonly used for GitOps. You can use it in tandem with Upbound control planes to achieve GitOps flows. The sections below explain how to integrate these tools with Upbound. + +### Generate a kubeconfig for your control plane + +Use the up CLI to [generate a kubeconfig][generate-a-kubeconfig] for your control plane. + +```bash +up ctx /// -f - > context.yaml +``` + +### Create an API token + + +You need a personal access token (PAT). You create PATs on a per-user basis in the Upbound Console. Go to [My Account - API tokens][my-account-api-tokens] and select Create New Token. Give the token a name and save the secret value to somewhere safe. + + +### Add the up CLI init container to Argo + +Create a new file called `up-plugin-values.yaml` and paste the following YAML: + +```yaml +controller: + volumes: + - name: up-plugin + emptyDir: {} + - name: up-home + emptyDir: {} + + volumeMounts: + - name: up-plugin + mountPath: /usr/local/bin/up + subPath: up + - name: up-home + mountPath: /home/argocd/.up + + initContainers: + - name: up-plugin + image: xpkg.upbound.io/upbound/up-cli:v0.39.0 + command: ["cp"] + args: + - /usr/local/bin/up + - /plugin/up + volumeMounts: + - name: up-plugin + mountPath: /plugin + +server: + volumes: + - name: up-plugin + emptyDir: {} + - name: up-home + emptyDir: {} + + volumeMounts: + - name: up-plugin + mountPath: /usr/local/bin/up + subPath: up + - name: up-home + mountPath: /home/argocd/.up + + initContainers: + - name: up-plugin + image: xpkg.upbound.io/upbound/up-cli:v0.39.0 + command: ["cp"] + args: + - /usr/local/bin/up + - /plugin/up + volumeMounts: + - name: up-plugin + mountPath: /plugin +``` + +### Install or upgrade Argo using the values file + +Install or upgrade Argo via Helm, including the values from the `up-plugin-values.yaml` file: + +```bash +helm upgrade --install -n argocd -f up-plugin-values.yaml --reuse-values argocd argo/argo-cd +``` + + +### Configure Argo CD + + +To configure Argo CD for Annotation resource tracking, edit the Argo CD ConfigMap in the Argo CD namespace. +Add `application.resourceTrackingMethod: annotation` to the data section as below. +This configuration turns off Argo CD auto pruning, preventing the deletion of Crossplane resources. + +Next, configure the [auto respect RBAC for the Argo CD controller][auto-respect-rbac-for-the-argo-cd-controller]. +By default, Argo CD attempts to discover some Kubernetes resource types that don't exist in a control plane. +You must configure Argo CD to respect the cluster's RBAC rules so that Argo CD can sync. +Add `resource.respectRBAC: normal` to the data section as below. + +```bash +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-cm +data: + ... + application.resourceTrackingMethod: annotation + resource.respectRBAC: normal +``` + +:::tip +The `resource.respectRBAC` configuration above tells Argo to respect RBAC for _all_ cluster contexts. If you're using an Argo CD instance to manage more than only control planes, you should consider changing the `clusters` string match for the configuration to apply only to control planes. For example, if every control plane context name followed the convention of being named `controlplane-`, you could set the string match to be `controlplane-*` +::: + + +### Create a cluster context definition + + +Replace the variables and run the following script to configure a new Argo cluster context definition. + +To configure Argo for a control plane in a Connected Space, replace `stringData.server` with the ingress URL of the control plane. This URL is what's outputted when using `up ctx`. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: my-control-plane + namespace: argocd + labels: + argocd.argoproj.io/secret-type: cluster +type: Opaque +stringData: + name: my-control-plane-context + server: https://.spaces.upbound.io/apis/spaces.upbound.io/v1beta1/namespaces//controlplanes//k8s + config: | + { + "execProviderConfig": { + "apiVersion": "client.authentication.k8s.io/v1", + "command": "up", + "args": [ "org", "token" ], + "env": { + "ORGANIZATION": "", + "UP_TOKEN": "" + } + }, + "tlsClientConfig": { + "insecure": false, + "caData": "" + } + } +``` + + +## GitOps for Upbound resources + + +Like any other cloud service, you can drive the lifecycle of Upbound Cloud resources with Crossplane. This lets you establish GitOps flows to declaratively create and manage: + +- [control plane groups][control-plane-groups] +- [control planes][control-planes] +- [Upbound IAM resources][upbound-iam-resources] + +Use a control plane installed with [provider-upbound][provider-upbound] and [provider-kubernetes][provider-kubernetes] to achieve this. + +### Provider-upbound + +[Provider-upbound][provider-upbound-2] is a Crossplane provider built by Upbound to interact with Upbound resources. use _provider-upbound_ to declaratively create and manage the lifecycle of IAM resources and repositories: + +- [Robots][robots] and their membership to teams +- [Teams][teams] +- [Repositories][repositories] and [permissions][permissions] on those repositories. + +:::tip +This provider defines managed resources for control planes, their auth, and permissions. These resources only applicable for customers who run in Upbound's **Legacy Spaces** control plane hosting environments. Customers should use provider-kubernetes explained below to manage the lifecycle of control planes with Crossplane. +::: + +### Provider-kubernetes + +[Provider-kubernetes][provider-kubernetes-3] is a Crossplane provider that defines an [Object][object] resource. Use _Objects_ as general-purpose resources to wrap _any_ Kubernetes resource for Crossplane to manage. + +Upbound [Space APIs][space-apis] are Kube-like APIs and have implemented support for most Kubernetes-style API concepts. You can use kubectl or any other Kubernetes-compatible tooling to interact with the API. This means you can use _provider-kubernetes_ to drive interactions with Space APIs. + +:::warning +When interacting with a Cloud Space's API, the Kubernetes [watch][watch] feature **isn't implemented.** Argo CD requires _watch_ support to function as expected, meaning you can't point Argo directly at a Cloud Space until it's implemented. +::: + +Use _provider-kubernetes_ to declaratively drive interactions with all [Space APIs][space-apis-1]. Wrap the desired API resource in an _Object_. See the example below for a control plane: + +```yaml +apiVersion: kubernetes.crossplane.io/v1alpha2 +kind: Object +metadata: + name: my-controlplane +spec: + forProvider: + manifest: + apiVersion: spaces.upbound.io/v1beta1 + kind: ControlPlane + metadata: + name: my-controlplane + namespace: default + spec: + crossplane: + autoUpgrade: + channel: Rapid +``` + +[Control plane groups][control-plane-groups-2] are a special case because they technically map to an underlying Kubernetes namespace. You should create a `kind: namespace` with the `spaces.upbound.io/group` label to create a control plane group in a Space. See the example below: + +```yaml +apiVersion: kubernetes.crossplane.io/v1alpha2 +kind: Object +metadata: + name: group1 +spec: + forProvider: + manifest: + apiVersion: v1 + kind: Namespace + metadata: + name: group1 + labels: + spaces.upbound.io/group: "true" + spec: {} +``` + +### Configure auth for provider-kubernetes + +Like any other Crossplane provider, _provider-kubernetes_ requires a valid [ProviderConfig][providerconfig] to authenticate with Upbound before interacting with its APIs. Follow the steps below to configure auth for a ProviderConfig on a control plane that you want to use to interact with Upbound resources. + +1. Define an environment variable for the name of your Upbound org account. Use `up org list` to retrieve this value. +```ini +export UPBOUND_ACCOUNT="" +``` + +2. Create a [personal access token][personal-access-token] and store it as an environment variable. +```shell +export UPBOUND_TOKEN="" +``` + +3. Log on to Upbound. +```shell +up login +``` + +4. Create a kubeconfig for the desired Cloud Space instance you want to interact with. +```shell +export CONTROLPLANE_CONFIG=/tmp/controlplane-kubeconfig +KUBECONFIG=$CONTROLPLANE_CONFIG up ctx $UPBOUND_ACCOUNT/upbound-gcp-us-west-1 # Replace this path with whichever Cloud Space you want to communicate with. +``` + +5. On the control plane you want to use to interact with Upbound resources, create a secret containing the credentials: +```shell +kubectl -n crossplane-system create secret generic cluster-config --from-file=kubeconfig=$CONTROLPLANE_CONFIG +kubectl -n crossplane-system create secret generic upbound-credentials --from-literal=token=$UPBOUND_TOKEN +``` + +6. Create a ProviderConfig that references the credentials created in the prior step. Create this resource in your control plane: +```yaml +apiVersion: kubernetes.crossplane.io/v1alpha1 +kind: ProviderConfig +metadata: + name: default +spec: + credentials: + source: Secret + secretRef: + namespace: crossplane-system + name: cluster-config + key: kubeconfig + identity: + type: UpboundTokens + source: Secret + secretRef: + name: upbound-credentials + namespace: crossplane-system + key: token +``` + +You can now create _Objects_ in the control plane which wrap Space APIs. + +[generate-a-kubeconfig]: /manuals/cli/concepts/contexts +[control-plane-groups]: /spaces/concepts/groups +[control-planes]: /spaces/concepts/control-planes +[upbound-iam-resources]: /manuals/platform/concepts/identity-management +[space-apis]: /reference/apis/spaces-api/v1_9 +[space-apis-1]: /reference/apis/spaces-api/v1_9 +[control-plane-groups-2]: /spaces/concepts/groups + + +[argo-cd]: https://argo-cd.readthedocs.io/en/stable/ +[my-account-api-tokens]: https://accounts.upbound.io/settings/tokens +[auto-respect-rbac-for-the-argo-cd-controller]: https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#auto-respect-rbac-for-controller +[spec-writeconnectionsecrettoref]: /reference/apis/spaces-api/latest +[auto-respect-rbac-for-the-argo-cd-controller-1]: https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#auto-respect-rbac-for-controller +[provider-upbound]: https://marketplace.upbound.io/providers/upbound/provider-upbound +[provider-kubernetes]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes +[provider-upbound-2]: https://marketplace.upbound.io/providers/upbound/provider-upbound +[robots]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/iam.upbound.io/Robot/v1alpha1 +[teams]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/iam.upbound.io/Team/v1alpha1 +[repositories]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/repository.upbound.io/Repository/v1alpha1 +[permissions]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/repository.upbound.io/Permission/v1alpha1 +[provider-kubernetes-3]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes +[object]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes/v0.17.0/resources/kubernetes.crossplane.io/Object/v1alpha2 +[watch]: https://kubernetes.io/docs/reference/using-api/api-concepts/#watch-bookmarks +[providerconfig]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes/v0.17.0/resources/kubernetes.crossplane.io/ProviderConfig/v1alpha1 +[personal-access-token]: https://accounts.upbound.io/settings/tokens diff --git a/spaces_versioned_docs/version-v1.10/howtos/control-plane-topologies.md b/spaces_versioned_docs/version-v1.10/howtos/control-plane-topologies.md new file mode 100644 index 000000000..9020e5a41 --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/control-plane-topologies.md @@ -0,0 +1,566 @@ +--- +title: Control Plane Topologies +sidebar_position: 15 +description: Configure scheduling of composites to remote control planes +--- + +:::info API Version Information +This guide is for the Control Plane Topology feature, which is in **private preview**. interested customers with access to this feature, it applies to v1.12+. + +For related API specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). details on feature availability, see the . +::: + +:::important +This feature is in private preview for select customers in Upbound Spaces. If you're interested in this deployment mode, please [contact us](https://www.upbound.io/support/contact). +::: + +Upbound's _Control Plane Topology_ feature lets you build and deploy a platform +of multiple control planes. These control planes work together for a unified platform +experience. + + +With the _Topology_ feature, you can install resource APIs that are +reconciled by other control planes and configure the routing that occurs between +control planes. You can also build compositions that reference other resources +running on your control plane or elsewhere in Upbound. + +This guide explains how to use Control Plane Topology APIs to install, configure +remote APIs, and build powerful compositions that reference other resources. + +## Benefits + +The Control Plane Topology feature provides the following benefits: + +* Decouple your platform architecture into independent offerings to improve your platform's software development lifecycle. +* Install composite APIs from Configurations as CRDs which are fulfilled and reconciled by other control planes. +* Route APIs to other control planes by configuring an _Environment_ resource, which define a set of routable dimensions. + +## How it works + + +Imagine the scenario where you want to let a user reference a subnet when creating a database instance. To your control plane, the `kind: database` and `kind: subnet` are independent resources. To you as the composition author, these resources have an important relationship. It may be that: + +- you don't want your user to ever be able to create a database without specifying a subnet. +- you want to let them create a subnet when they create the database, if it doesn't exist. +- you want to allow them to reuse a subnet that got created elsewhere or gets shared by another user. + +In each of these scenarios, you must resort to writing complex composition logic +to handle each case. The problem is compounded when the resource exists in a +context separate from the current control plane's context. Imagine a scenario +where one control plane manages Database resources and a second control plane +manages networking resources. With the _Topology_ feature, you can offload these +concerns to Upbound machinery. + + +![Control Plane Topology feature arch](/img/topology-arch.png) + +## Prerequisites + +Enable the Control Plane Topology feature in the Space you plan to run your control plane in: + +- Cloud Spaces: Not available yet +- Connected Spaces: Space administrator must enable this feature +- Disconnected Spaces: Space administrator must enable this feature + + + +## Compose resources with _ReferencedObjects_ + + + +_ReferencedObject_ is a resource type available in an Upbound control plane that lets you reference other Kubernetes resources in Upbound. + +:::tip +This feature is useful for composing resources that exist in a +remote context, like another control plane. You can also use +_ReferencedObjects_ to resolve references to any other Kubernetes object +in the current control plane context. This could be a secret, another Crossplane +resource, or more. +::: + +### Declare the resource reference in your XRD + +To compose a _ReferencedObject_, you should start by adding a resource reference +in your Composite Resource Definition (XRD). The convention for the resource +reference follows the shape shown below: + +```yaml +Ref: + type: object + properties: + apiVersion: + type: string + default: "" + enum: [ "" ] + kind: + type: string + default: "" + enum: [ "" ] + grants: + type: array + default: [ "Observe" ] + items: + type: string + enum: [ "Observe", "Create", "Update", "Delete", "*" ] + name: + type: string + namespace: + type: string + required: + - name +``` + +The `Ref` should be the kind of resource you want to reference. The `apiVersion` and `kind` should be the associated API version and kind of the resource you want to reference. + +The `name` and `namespace` strings are inputs that let your users specify the resource instance. + +#### Grants + +The `grants` field is a special array that lets you give users the power to influence the behavior of the referenced resource. You can configure which of the available grants you let your user select and which it defaults to. Similar in behavior as [Crossplane management policies][crossplane-management-policies], each grant value does the following: + +- **Observe:** The composite may observe the state of the referenced resource. +- **Create:** The composite may create the referenced resource if it doesn't exist. +- **Update:** The composite may update the referenced resource. +- **Delete:** The composite may delete the referenced resource. +- **\*:** The composite has full control over the referenced resource. + +Here are some examples that show how it looks in practice: + +
+ +Show example for defining the reference to another composite resource + +```yaml +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xsqlinstances.database.platform.upbound.io +spec: + type: object + properties: + parameters: + type: object + properties: + networkRef: + type: object + properties: + apiVersion: + type: string + default: "networking.platform.upbound.io" + enum: [ "networking.platform.upbound.io" ] + grants: + type: array + default: [ "Observe" ] + items: + type: string + enum: [ "Observe" ] + kind: + type: string + default: "Network" + enum: [ "Network" ] + name: + type: string + namespace: + type: string + required: + - name +``` + +
+ + +
+Show example for defining the reference to a secret +```yaml +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xsqlinstances.database.platform.upbound.io +spec: + type: object + properties: + parameters: + type: object + properties: + secretRef: + type: object + properties: + apiVersion: + type: string + default: "v1" + enum: [ "v1" ] + grants: + type: array + default: [ "Observe" ] + items: + type: string + enum: [ "Observe", "Create", "Update", "Delete", "*" ] + kind: + type: string + default: "Secret" + enum: [ "Secret" ] + name: + type: string + namespace: + type: string + required: + - name +``` +
+ +### Manually add the jsonPath + +:::important +This step is a known limitation of the preview. We're working on tooling that +removes the need for authors to do this step. +::: + +During the preview timeframe of this feature, you must add an annotation by hand +to the XRD. In your XRD's `metadata.annotations`, set the +`references.upbound.io/schema` annotation. It should be a JSON string in the +following format: + +```json +{ + "apiVersion": "references.upbound.io/v1alpha1", + "kind": "ReferenceSchema", + "references": [ + { + "jsonPath": ".spec.parameters.secretRef", + "kinds": [ + { + "apiVersion": "v1", + "kind": "Secret" + } + ] + } + ] +} +``` + +Flatten this JSON into a string and set the annotation on your XRD. View the +example below for an illustration: + +
+Show example setting the references.upbound.io/schema annotation +```yaml +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xthings.networking.acme.com + annotations: + references.upbound.io/schema: '{"apiVersion":"references.upbound.io/v1alpha1","kind":"ReferenceSchema","references":[{"jsonPath":".spec.secretRef","kinds":[{"apiVersion":"v1","kind":"Secret"}]},{"jsonPath":".spec.configMapRef","kinds":[{"apiVersion":"v1","kind":"ConfigMap"}]}]}' +``` +
+ +
+Show example for setting multiples references in the references.upbound.io/schema annotation +```yaml +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xthings.networking.acme.com + annotations: + references.upbound.io/schema: '{"apiVersion":"references.upbound.io/v1alpha1","kind":"ReferenceSchema","references":[{"jsonPath":".spec.parameters.secretRef","kinds":[{"apiVersion":"v1","kind":"Secret"}]},{"jsonPath":".spec.parameters.configMapRef","kinds":[{"apiVersion":"v1","kind":"ConfigMap"}]}]}' +``` +
+ + +You can use a VSCode extension like [vscode-pretty-json][vscode-pretty-json] to make this task easier. + + +### Compose a _ReferencedObject_ + +To pair with the resource reference declared in your XRD, you must compose the referenced resource. Use the _ReferencedObject_ resource type to bring the resource into your composition. _ReferencedObject_ has the following schema: + +```yaml +apiVersion: references.upbound.io/v1alpha1 +kind: ReferencedObject +spec: + managementPolicies: + - Observe + deletionPolicy: Orphan + composite: + apiVersion: + kind: + name: + jsonPath: .spec.parameters.secretRef +``` + +The `spec.composite.apiVersion` and `spec.composite.kind` should match the API version and kind of the `compositeTypeRef` declared in your composition. The `spec.composite.name` should be the name of the composite resource instance. + +The `spec.composite.jsonPath` should be the path to the root of the resource ref you declared in your XRD. + +
+Show example for composing a resource reference to a secret + +```yaml +apiVersion: apiextensions.crossplane.io/v1 +kind: Composition +metadata: + name: demo-composition +spec: + compositeTypeRef: + apiVersion: networking.acme.com/v1alpha1 + kind: XThing + mode: Pipeline + pipeline: + - step: patch-and-transform + functionRef: + name: crossplane-contrib-function-patch-and-transform + input: + apiVersion: pt.fn.crossplane.io/v1beta1 + kind: Resources + resources: + - name: secret-ref-object + base: + apiVersion: references.upbound.io/v1alpha1 + kind: ReferencedObject + spec: + managementPolicies: + - Observe + deletionPolicy: Orphan + composite: + apiVersion: networking.acme.com/v1alpha1 + kind: XThing + name: TO_BE_PATCHED + jsonPath: .spec.parameters.secretRef + patches: + - type: FromCompositeFieldPath + fromFieldPath: metadata.name + toFieldPath: spec.composite.name +``` +
+ +By declaring a resource reference in your XRD, Upbound handles resolution of the desired resource. + +## Deploy APIs + +To configure routing resource requests between control planes, you need to deploy APIs in at least two control planes. + +### Deploy into a service-level control plane + +Package the APIs you build into a Configuration package an deploy it on a +control plane in an Upbound Space. In Upbound, it's common to refer to the +control plane where the Configuration package is deployed as a **service-level +control plane**. This control plane runs the controllers that processes the API +requests and provisions underlying resources. In a later section, you learn how +you can use _Topology_ features to [configure routing][configure-routing]. + +### Deploy as Remote APIs on a platform control plane + +You should use the same package source as deployed in the **service-level +control planes**, but this time deploy the Configuration in a separate control +plane as a _RemoteConfiguration_. The _RemoteConfiguration_ installs Kubernetes +CustomResourceDefinitions for the APIs defined in the Configuration package, but +no controllers get deployed. + +### Install a _RemoteConfiguration_ + +_RemoteConfiguration_ is a resource type available in an Upbound manage control +planes that acts like a sort of Crossplane [Configuration][configuration] +package. Unlike standard Crossplane Configurations, which install XRDs, +compositions, and functions into a desired control plane, _RemoteConfigurations_ +install only the CRDs for claimable composite resource types. + +#### Install directly + +Install a _RemoteConfiguration_ by defining the following and applying it to +your control plane: + +```yaml +apiVersion: pkg.upbound.io/v1alpha1 +kind: RemoteConfiguration +metadata: + name: +spec: + package: +``` + +#### Declare as a project dependency + +You can declare _RemoteConfigurations_ as dependencies in your control plane's +[project file][project-file]. Use the up CLI to add the dependency, providing +the `--remote` flag: + +```tsx live +up dep add --remote +``` + +This command adds a declaration in the `spec.apiDependencies` stanza of your +project's `upbound.yaml` as demonstrated below: + +```yaml +apiVersion: meta.dev.upbound.io/v1alpha1 +kind: Project +metadata: + name: service-controlplane +spec: + apiDependencies: + - configuration: xpkg.upbound.io/upbound/remote-configuration + version: '>=v0.0.0' + dependsOn: + - provider: xpkg.upbound.io/upbound/provider-kubernetes + version: '>=v0.0.0' +``` + +Like a Configuration, a _RemoteConfigurationRevision_ gets created when the +package gets installed on a control plane. Unlike Configurations, XRDs and +compositions **don't** get installed by a _RemoteConfiguration_. Only the CRDs +for claimable composite types get installed and Crossplane thereafter manages +their lifecycle. You can tell when a CRD gets installed by a +_RemoteConfiguration_ because it has the `internal.scheduling.upbound.io/remote: +true` label: + +```yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: things.networking.acme.com + labels: + internal.scheduling.upbound.io/remote: "true" +``` + +## Use an _Environment_ to route resources + +_Environment_ is a resource type available in Upbound control planes that works +in tandem with resources installed by _RemoteConfigurations_. _Environment_ is a +namespace-scoped resource that lets you configure how to route remote resources +to other control planes by a set of user-defined dimensions. + +### Define a routing dimension + +To establish a routing dimensions between two control planes, you must do two +things: + +1. Annotate the service control plane with the name and value of a dimension. +2. Configure an environment on another control plane with a dimension matching the field and value of the service control plane. + +The example below demonstrates the creation of a service control plane with a +`region` dimension: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + labels: + dimension.scheduling.upbound.io/region: "us-east-1" + name: prod-1 + namespace: default +spec: +``` + +Upbound's Spaces controller keeps an inventory of all declared dimensions and +listens for control planes to route to them. + +### Create an _Environment_ + +Next, create an _Environment_ on a separate control plane, referencing the +dimension from before. The example below demonstrates routing all remote +resource requests in the `default` namespace of the control plane based on a +single `region` dimension: + +```yaml +apiVersion: scheduling.upbound.io/v1alpha1 +kind: Environment +metadata: + name: default + namespace: default +spec: + dimensions: + region: us-east-1 +``` + +You can specify whichever dimensions as you want. The example below demonstrates +multiple dimensions: + +```yaml +apiVersion: scheduling.upbound.io/v1alpha1 +kind: Environment +metadata: + name: default + namespace: default +spec: + dimensions: + region: us-east-1 + env: prod + offering: databases +``` + +In order for the routing controller to match, _all_ dimensions must match for a +given service control plane. + +You can specify dimension overrides on a per-resource group basis. This lets you +configure default routing rules for a given _Environment_ and override routing +on a per-offering basis. + +```yaml +apiVersion: scheduling.upbound.io/v1alpha1 +kind: Environment +metadata: + name: default + namespace: default +spec: + dimensions: + region: us-east-1 + resourceGroups: + - name: database.platform.upbound.io # database + dimensions: + region: "us-east-1" + env: "prod" + offering: "databases" + - name: networking.platform.upbound.io # networks + dimensions: + region: "us-east-1" + env: "prod" + offering: "networks" +``` + +### Confirm the configured route + +After you create an _Environment_ on a control plane, the routes selected get +reported in the _Environment's_ `.status.resourceGroups`. This is illustrated +below: + +```yaml +apiVersion: scheduling.upbound.io/v1alpha1 +kind: Environment +metadata: + name: default +... +status: + resourceGroups: + - name: database.platform.upbound.io # database + proposed: + controlPlane: ctp-1 + group: default + space: upbound-gcp-us-central1 + dimensions: + region: "us-east-1" + env: "prod" + offering: "databases" +``` + +If you don't see a response in the `.status.resourceGroups`, this indicates a +match wasn't found or an error establishing routing occurred. + +:::tip +There's no limit to the number of control planes you can route to. You can also +stack routing and form your own topology of control planes, with multiple layers +of routing. +::: + +### Limitations + + +Routing from one control plane to another is currently scoped to control planes +that exist in a single Space. You can't route resource requests to control +planes that exist on a cross-Space boundary. + + +[project-file]: /manuals/cli/howtos/project +[contact-us]: https://www.upbound.io/usage/support/contact +[crossplane-management-policies]: https://docs.crossplane.io/latest/managed-resources/managed-resources/#managementpolicies +[vscode-pretty-json]: https://marketplace.visualstudio.com/items?itemName=chrismeyers.vscode-pretty-json +[configure-routing]: #use-an-environment-to-route-resources +[configuration]: https://docs.crossplane.io/latest/packages/providers diff --git a/spaces_versioned_docs/version-v1.10/howtos/ctp-connector.md b/spaces_versioned_docs/version-v1.10/howtos/ctp-connector.md new file mode 100644 index 000000000..b2cc48c49 --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/ctp-connector.md @@ -0,0 +1,508 @@ +--- +title: Control Plane Connector +weight: 80 +description: A guide for how to connect a Kubernetes app cluster to a control plane in Upbound using the Control Plane connector feature +plan: "standard" +--- + + + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). Control Plane Connector functionality is consistent across versions. + +For related API specifications and resources, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +Control Plane Connector connects arbitrary Kubernetes application clusters outside the +Upbound Spaces to your control planes running in Upbound Spaces. +This lets you interact with your control plane's API from the app cluster. The claim APIs and the namespaced XR APIs +you define via CompositeResourceDefinitions (XRDs) in the control plane, are available in +your app cluster alongside Kubernetes workload APIs like Pod. Control Plane Connector +enables the same experience as a locally installed Crossplane. + +![control plane connector operations flow](/img/ConnectorFlow.png) + +### Control Plane Connector operations + +Control Plane Connector leverages the [Kubernetes API AggregationLayer][kubernetes-api-aggregationlayer] +to create an extension API server and serve the claim APIs and the namespaced XR APIs in the control plane. It +discovers the claim APIs and the namespaced XR APIs available in the control plane and registers corresponding +APIService resources on the app cluster. Those APIService resources refer to the +extension API server of Control Plane Connector. + +The claim APIs and the namespaced XR APIs are available in your Kubernetes cluster, just like all native +Kubernetes APIs. + +The Control Plane Connector processes every request targeting the claim APIs and the namespaced XR APIs and makes the +relevant requests to the connected control plane. + +Only the connected control plane stores and processes all claims and namespaced XRs created in the app +cluster, eliminating any storage use at the application cluster. The control plane +connector provisions a target namespace at the control plane for the app cluster and stores +all claims and namespaced XRs in this target namespace. + +For managing the claims and namespaced XRs, the Control Plane Connector creates a unique identifier for a +resource by combining input parameters from claims, including: +- `metadata.name` +- `metadata.namespace` +- `your cluster name` + + +It employs SHA-256 hashing to generate a hash value and then extracts the first +16 characters of that hash. This ensures the resulting identifier remains within +the 64-character limit in Kubernetes. + + + +For instance, if a claim named `my-bucket` exists in the test namespace in +`cluster-dev`, the system calculates the SHA-256 hash from +`my-bucket-x-test-x-00000000-0000-0000-0000-000000000000` and takes the first 16 +characters. The control plane side then names the claim `claim-c603e518969b413e`. + +For namespaced XRs, the process is similar, only the prefix is different. +The name becomes `nxr-c603e518969b413e`. + + +### Installation + + + + + +Log in with the up CLI: + +```bash +up login +``` + +Connect your app cluster to a namespace in an Upbound control plane with `up controlplane connector install `. This command creates a user token and installs the Control Plane Connector to your cluster. It's recommended you create a values file called `connector-values.yaml` and provide the following below. Select the tab according to which environment your control plane is running in. + + + + + + +```yaml +upbound: + # This is your org account in Upbound e.g. the name displayed after executing `up org list` + account: + # This is a personal access token generated in the Upbound Console + token: + +spaces: + # If your control plane is running in Upbound's GCP Cloud Space, else use upbound-aws-us-east-1.spaces.upbound.io + host: "upbound-gcp-us-west-1.spaces.upbound.io" + insecureSkipTLSVerify: true + controlPlane: + # The name of the control plane you want the Connector to attach to + name: + # The control plane group the control plane resides in + group: + # The namespace within the control plane to sync claims from the app cluster to. NOTE: This must be created before you install the connector. + claimNamespace: +``` + + + + + +1. Create a [kubeconfig][kubeconfig] for the control plane. Update your Upbound context to the path for your desired control plane. +```ini +up login +up ctx /upbound-gcp-us-central-1/default/your-control-plane +up ctx . -f - > context.yaml +``` + +2. Write it to a secret in the cluster where you plan to +install the Control Plane Connector to. +```ini +kubectl create secret generic my-controlplane-kubeconfig --from-file=context.yaml +``` + +3. Reference this secret in the +`spaces.controlPlane.kubeconfigSecret` field below. + +```yaml +spaces: + controlPlane: + # The namespace within the control plane to sync claims from the app cluster to. NOTE: This must be created before you install the connector. + claimNamespace: + kubeconfigSecret: + name: my-controlplane-kubeconfig + key: kubeconfig +``` + + + + + + +Provide the values file above when you run the CLI command: + + +```bash {copy-lines="3"} +up controlplane connector install my-control-plane my-app-ns-1 --file=connector-values.yaml +``` + +The Claim APIs and the namespaced XR APIs from your control plane are now visible in the cluster. +You can verify this with `kubectl api-resources`. + +```bash +kubectl api-resources +``` + +### Uninstall + +Disconnect an app cluster that you prior installed the Control Plane Connector on by +running the following: + +```bash +up ctp connector uninstall +``` + +This command uninstalls the helm chart for the Control Plane Connector from an app +cluster. It moves any claims in the app cluster into the control plane +at the specified namespace. + +:::tip +Make sure your kubeconfig's current context is pointed at the app cluster where +you want to uninstall Control Plane Connector from. +::: + + + + +It's recommended you create a values file called `connector-values.yaml` and +provide the following below. Select the tab according to which environment your +control plane is running in. + + + + + + +```yaml +upbound: + # This is your org account in Upbound e.g. the name displayed after executing `up org list` + account: + # This is a personal access token generated in the Upbound Console + token: + +spaces: + # Upbound GCP US-West-1 upbound-gcp-us-west-1.spaces.upbound.io + # Upbound AWS US-East-1 upbound-aws-us-east-1.spaces.upbound.io + # Upbound GCP US-Central-1 upbound-gcp-us-central-1.spaces.upbound.io + host: "" + insecureSkipTLSVerify: true + controlPlane: + # The name of the control plane you want the Connector to attach to + name: + # The control plane group the control plane resides in + group: + # The namespace within the control plane to sync claims from the app cluster to. + # NOTE: This must be created before you install the connector. + claimNamespace: +``` + + + + +Create a [kubeconfig][kubeconfig-1] for the +control plane. Write it to a secret in the cluster where you plan to +install the Control Plane Connector to. Reference this secret in the +`spaces.controlPlane.kubeconfigSecret` field below. + +```yaml +spaces: + controlPlane: + # The namespace within the control plane to sync claims from the app cluster to. NOTE: This must be created before you install the connector. + claimNamespace: + kubeconfigSecret: + name: my-controlplane-kubeconfig + key: kubeconfig +``` + + + + + + +Provide the values file above when you `helm install` the Control Plane Connector: + + +```bash +helm install --wait mcp-connector oci://xpkg.upbound.io/spaces-artifacts/mcp-connector -n kube-system -f connector-values.yaml +``` +:::tip +Create an API token from the Upbound user account settings page in the console by following [these instructions][these-instructions]. +::: + +### Uninstall + +You can uninstall Control Plane Connector with Helm by running the following: + +```bash +helm uninstall mcp-connector +``` + + + + + +### Example usage + +This example creates a control plane using [Configuration +EKS][configuration-eks]. `KubernetesCluster` is +available as a claim API in your control plane. The following is [an +example][an-example] +object you can create in your control plane. + +```yaml +apiVersion: k8s.starter.org/v1alpha1 +kind: KubernetesCluster +metadata: + name: my-cluster + namespace: default +spec: + id: my-cluster + parameters: + nodes: + count: 3 + size: small + services: + operators: + prometheus: + version: "34.5.1" + writeConnectionSecretToRef: + name: my-cluster-kubeconfig +``` + +After connecting your Kubernetes app cluster to the control plane, you +can create the `KubernetesCluster` object in your app cluster. Although your +local cluster has an Object, the actual resources is in your managed control +plane inside Upbound. + +```bash {copy-lines="3"} +# Applying the claim YAML above. +# kubectl is set up to talk with your Kubernetes cluster. +kubectl apply -f claim.yaml + + +kubectl get claim -A +NAME SYNCED READY CONNECTION-SECRET AGE +my-cluster True True my-cluster-kubeconfig 2m +``` + +Once Kubernetes creates the object, view the console to see your object. + +![Claim by connector in console](/img/ClaimInConsole.png) + +You can interact with the object through your cluster just as if it +lives in your cluster. + +### Migration to control planes + +This guide details the migration of a Crossplane installation to Upbound-managed +control planes using the Control Plane Connector to manage claims on an application +cluster. + +![migration flow application cluster to control plane](/img/ConnectorMigration.png) + +#### Export all resources + +Before proceeding, ensure that you have set the correct kubecontext for your application +cluster. + +```bash +up controlplane migration export --pause-before-export --output=my-export.tar.gz --yes +``` + +This command performs the following: +- Pauses all claim, composite, and managed resources before export. +- Scans the control plane for resource types. +- Exports Crossplane and native resources. +- Archives the exported state into `my-export.tar.gz`. + +Example output: +```bash +Exporting control plane state... + ✓ Pausing all claim resources before export... 1 resources paused! ⏸️ + ✓ Pausing all composite resources before export... 7 resources paused! ⏸️ + ✓ Pausing all managed resources before export... 34 resources paused! ⏸️ + ✓ Scanning control plane for types to export... 231 types found! 👀 + ✓ Exporting 231 Crossplane resources...125 resources exported! 📤 + ✓ Exporting 3 native resources...19 resources exported! 📤 ✓ Archiving exported state... archived to "my-export.tar.gz"! 📦 + +Successfully exported control plane state! +``` + +#### Import all resources + +The system restores the target control plane with the exported +resources, which serves as the destination for the Control Plane Connector. + + +Log into Upbound and select the correct context: + +```bash +up login +up ctx +up ctp create ctp-a +``` + +Output: +```bash +ctp-a created +``` + +Verify that the Crossplane version on both the application cluster and the new managed +control plane matches the core Crossplane version. + +Use the following command to import the resources: +```bash +up controlplane migration import -i my-export.tar.gz \ + --unpause-after-import \ + --mcp-connector-cluster-id=my-appcluster \ + --mcp-connector-claim-namespace=my-appcluster +``` + +This command: +- Note: `--mcp-connector-cluster-id` needs to be unique per application cluster +- Note: `--mcp-connector-claim-namespace` is the namespace the system creates + during the import +- Restores base resources +- Waits for XRDs and packages to establish +- Imports Claims, XRs resources +- Finalizes the import and resumes managed resources + +Example output: +```bash +Importing control plane state... + ✓ Reading state from the archive... Done! 👀 + ✓ Importing base resources... 56 resources imported!📥 + ✓ Waiting for XRDs... Established! ⏳ + ✓ Waiting for Packages... Installed and Healthy! ⏳ + ✓ Importing remaining resources... 88 resources imported! 📥 + ✓ Finalizing import... Done! 🎉 + ✓ Unpausing managed resources ... Done! ▶️ + +fully imported control plane state! +``` + +Verify Imported Claims + + +The Control Plane Connector renames all claims and adds additional labels to them. + +```bash +kubectl get claim -A +``` + +Example output: +```bash +NAMESPACE NAME SYNCED READY CONNECTION-SECRET AGE +my-appcluster cluster.aws.platformref.upbound.io/claim-e708ff592b974f51 True True platform-ref-aws-kubeconfig 3m17s +``` + +Inspect the labels: +```bash +kubectl get -n my-appcluster cluster.aws.platformref.upbound.io/claim-e708ff592b974f51 -o yaml | yq .metadata.labels +``` + +Example output: +```bash +mcp-connector.upbound.io/app-cluster: my-appcluster +mcp-connector.upbound.io/app-namespace: default +mcp-connector.upbound.io/app-resource-name: example +``` + +#### Cleanup the app cluster + +Remove all Crossplane-related resources from the application cluster, including: + +- Managed Resources +- Claims +- Compositions +- XRDs +- Packages (Functions, Configurations, Providers) +- Crossplane and all associated CRDs + + +#### Install Control Plane Connector + + +Follow the preceding installation guide and configure the `connector-values.yaml`: + +```yaml +# NOTE: clusterID needs to match --mcp-connector-cluster-id used in the import on the managed control Plane +clusterID: my-appcluster +upbound: + account: + token: + +spaces: + host: "" + insecureSkipTLSVerify: true + controlPlane: + name: + group: + # NOTE: This is the --mcp-connector-claim-namespace used during the import to the control plane + claimNamespace: +``` +Once the Control Plane Connector installs, verify that resources exist in the application +cluster: + +```bash +kubectl api-resources | grep platform +``` + +Example output: +```bash +awslbcontrollers aws.platform.upbound.io/v1alpha1 true AWSLBController +podidentities aws.platform.upbound.io/v1alpha1 true PodIdentity +sqlinstances aws.platform.upbound.io/v1alpha1 true SQLInstance +clusters aws.platformref.upbound.io/v1alpha1 true Cluster +osss observe.platform.upbound.io/v1alpha1 true Oss +apps platform.upbound.io/v1alpha1 true App +``` + +Restore claims from the control plane to the application cluster: + +```bash +kubectl get claim -A +``` + +Example output: +```bash +NAMESPACE NAME SYNCED READY CONNECTION-SECRET AGE +default cluster.aws.platformref.upbound.io/example True True platform-ref-aws-kubeconfig 127m +``` + +With this guide, you migrated your Crossplane installation to +Upbound-control planes. This ensures seamless integration with your +application cluster using the Control Plane Connector. + +### Connect multiple app clusters to a control plane + +Claims are store in a unique namespace in the Upbound control plane. +Every cluster creates a new control plane namespace. + +![Multi-cluster architecture with control plane connector](/img/ConnectorMulticlusterArch.png) + +There's no limit on the number of clusters connected to a single control plane. +Control plane operators can see all their infrastructure in a central control +plane. + +Without using control planes and Control Plane Connector, users have to install +Crossplane and providers for cluster. Each cluster requires configuration for +providers with necessary credentials. With a single control plane where multiple +clusters connected through Upbound tokens, you don't need to give out any cloud +credentials to the clusters. + + +[kubeconfig]: /manuals/cli/howtos/context-config/#generate-a-kubeconfig-for-a-control-plane-in-a-group +[kubeconfig-1]:/spaces/concepts/control-planes/#connect-directly-to-your-control-plane +[these-instructions]:/manuals/console/#create-a-personal-access-token +[kubernetes-api-aggregationlayer]: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/ +[configuration-eks]: https://github.com/upbound/configuration-eks +[an-example]: https://github.com/upbound/configuration-eks/blob/9f86b6d/.up/examples/cluster.yaml diff --git a/spaces_versioned_docs/version-v1.10/howtos/debugging-a-ctp.md b/spaces_versioned_docs/version-v1.10/howtos/debugging-a-ctp.md new file mode 100644 index 000000000..521271e40 --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/debugging-a-ctp.md @@ -0,0 +1,128 @@ +--- +title: Debugging issues on a control plane +sidebar_position: 70 +description: A guide for how to debug resources on a control plane running in Upbound. +--- + +This guide provides troubleshooting guidance for how to identify and fix issues on a control plane. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). Debugging methods are consistent across versions. + +For related control plane specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version-specific features, see the . +::: + +## Start from Upbound Console + + +The Upbound [Console][console] has a built-in control plane explorer experience +that surfaces status and events for the resources on your control plane. The +explorer is claim-based. Resources in this view exist only if they exist in the +reference chain originating from a claim. This view is a helpful starting point +if you are attempting to debug an issue originating from a claim. + +:::tip +If you directly create Crossplane Managed Resources (`MR`s) or Composite +Resources (`XR`s), they won't render in the explorer. +::: + +### Example + +The example below uses the control plane explorer view to inspect why a claim for an EKS Cluster isn't healthy. + +#### Check the health status of claims + +From the API type card, two claims branching from of it: one shows a healthy green icon, while the other shows an unhealthy red icon. + +![Use control plane explorer view to see status of claims](/img/debug-overview.png) + +Select `More details` on the unhealthy claim card and Upbound shows details for the claim. + +![Use control plane explorer view to see details of claims](/img/debug-claim-more-details.png) + +Looking at the three events for this claim: + +- **ConfigureCompositeResource**: this event indicates Upbound created the claimed Composite Resource (`XR`). + +- **BindCompositeResource**: this indicates the Composite Resource (`XR`) that's being "claimed" isn't ready yet. A claim doesn't show `HEALTHY` until the XR it references is ready. + +- **ConfigureCompositeResource**: the error saying, `cannot apply composite resource...the object has been modified; please apply your changes to the latest version and try again` is a generic event from Crossplane resources. It's safe to ignore this error. + +Next, look at the `status` field of the rendered YAML for the resource. + +![Use control plane explorer view to see status details of claims](/img/debug-claim-status.png) + +The status reports a similar message as the event stream: this claim is waiting for a Composite Resource to be ready. Based on this, investigate the Composite Resource referenced by this claim next. + +#### Check the health status of the Composite Resource + + +The control plane explorer only shows the claim cards by default. Selecting the claim card renders the rest of the Crossplane resource tree associated with the selected claim. + + +The previous claim expands into this screenshot: + +![Use control plane explorer view to expand tree of claim](/img/debug-claim-expansion.png) + +This renders the XR referenced by the claim (along with all its references). You can see the XR is showing the same unhealthy status icon in its card. Notice the XR has itself two nested XRs. One of the nested XRs shows a healthy green icon on its card, while the other shows an unhealthy red icon. Like the claim, a Composite Resource doesn't show healthy until all referenced resources also show healthy. + +#### Inspecting Managed Resources + +Select `more details` to inspect one of the unhealthy Managed Resources shows the following: + +![Use control plane explorer view to view events for an MR](/img/debug-mr-event.png) + +This event reveals it's unhealthy because it's waiting on a reference to another Managed Resource. Searching the rendered YAML of the MR for this resource shows the following: + +![Use control plane explorer view to view status for an MR](/img/debug-mr-status.png) + +The rendered YAML shows this MR is referencing a sibling MR that shares the same controller. The same parent XR created both of these managed resources. Inspect the sibling MR to see what its status is. + +![Use control plane explorer view to view status for a sibling MR](/img/debug-mr-dependency-status.png) + +The sibling MR event stream shows the Provider processed the resource create request. Ignore the `CannotInitalizeManagedResrouce` event. EKS clusters can take 15 minutes or more to provision in AWS. The root cause is everything is fine -- all the resources are still provisioning. Waiting longer and then looking at the control plane explorer again, shows all resources are healthy. reference, below is an example status field for a resource that's healthy and provisioned. + +```yaml +... +status: + atProvider: + id: team-b-app-cluster-bhwfb-hwtgs-20230403135452772300000008 + conditions: + - lastTransitionTime: '2023-04-03T13:56:35Z' + reason: Available + status: 'True' + type: Ready + - lastTransitionTime: '2023-04-03T13:54:02Z' + reason: ReconcileSuccess + status: 'True' + type: Synced + - lastTransitionTime: '2023-04-03T13:54:53Z' + reason: Success + status: 'True' + type: LastAsyncOperation + - lastTransitionTime: '2023-04-03T13:54:53Z' + reason: Finished + status: 'True' + type: AsyncOperation +``` + +### Control plane explorer limitations + +The control plane explorer view is currently designed around claims (`XC`s). The control plane explorer doesn't inspect other Crossplane resources. To inspect other Crossplane resources, use the `up` CLI. + +Some examples of Crossplane resources that require the `up` CLI + +- Managed Resources that aren't associated with a claim +- Composite Resources that aren't associated with a claim +- The status of _deleting_ resources +- ProviderConfigs +- Provider events + +## Use direct CLI access + +If your preference is to use a terminal instead of a GUI, Upbound supports direct access to the API server of the control plane. Use [`up ctx`][up-ctx] to connect directly to your control plane. + + +[console]: /manuals/console/upbound-console +[up-ctx]: /reference/cli-reference diff --git a/spaces_versioned_docs/version-v1.10/howtos/managed-service.md b/spaces_versioned_docs/version-v1.10/howtos/managed-service.md new file mode 100644 index 000000000..40b983a76 --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/managed-service.md @@ -0,0 +1,23 @@ +--- +title: Managed Upbound control planes +description: "Learn about the managed service capabilities of a Space" +sidebar_position: 10 +--- + +Control planes in Upbound are fully isolated [Upbound Crossplane][uxp] instances +that Upbound manages for you. This means: + +- the underlying lifecycle of infrastructure (compute, memory, and storage) required to power your instance. +- scaling of the infrastructure. +- the maintenance of the core Upbound Crossplane components that make up a control plane. + +This lets users focus on building their APIs and operating their control planes, +while Upbound handles the rest. Each control plane has its own dedicated API +server connecting users to their control plane. + +## Learn about Upbound control planes + +Read the [concept][ctp-concept] documentation to learn about Upbound control planes. + +[uxp]: /manuals/uxp/overview +[ctp-concept]: /spaces/concepts/control-planes \ No newline at end of file diff --git a/spaces_versioned_docs/version-v1.10/howtos/mcp-connector-guide.md b/spaces_versioned_docs/version-v1.10/howtos/mcp-connector-guide.md new file mode 100644 index 000000000..8a3866d07 --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/mcp-connector-guide.md @@ -0,0 +1,169 @@ +--- +title: Consume control plane APIs in an app cluster with control plane connector +sidebar_position: 99 +description: A tutorial to configure a Space with Argo to declaratively create and + manage control planes +--- + +In this tutorial, you learn how to configure a Kubernetes app cluster to communicate with a control plane in an Upbound self-hosted Space. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Control plane connector functionality is consistent across versions. + +For related API specifications and resources, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +The [control plane connector][control-plane-connector] bridges your Kubernetes application clusters---running outside of Upbound--to your control planes running in Upbound. This allows you to interact with your control plane's API right from the app cluster. The claim APIs you define via `CompositeResourceDefinitions` are available alongside Kubernetes workload APIs like `Pod`. In effect, control plane connector provides the same experience as a locally installed Crossplane. + +## Prerequisites + +To complete this tutorial, you need the following: + +- Have already deployed an Upbound Space. +- Have already deployed an Kubernetes cluster (referred to as `app cluster`). + +## Create a control plane + +Create a new control plane in your self-hosted Space. Run the following command in a terminal: + +```bash +up ctp create my-control-plane +``` + +Once the control plane is ready, connect to it. + +```bash +up ctp connect my-control-plane +``` + +For convenience, install a an Upbound [platform reference Configuration][platform-reference-configuration] from the marketplace. production scenarios, replace this with your own Crossplane Configurations or compositions. + +```bash +up ctp configuration install xpkg.upbound.io/upbound/platform-ref-aws:v1.4.0 +``` + +## Fetch the control plane's connection details + +Run the following command in a terminal: + +```shell +kubectl get secret kubeconfig-my-control-plane -n default -o jsonpath='{.data.kubeconfig}' | base64 -d > kubeconfig-my-control-plane.yaml +``` + +This command saves the kubeconfig for the control plane to a file in your working directory. + +## Install control plane connector in your app cluster + +Switch contexts to your Kubernetes app cluster. To install the control plane connector in your app cluster, you must first provide a secret containing your control plane's kubeconfig at install-time. Run the following command in a terminal: + +:::important +Make sure the following commands are executed against your **app cluster**, not your control plane. +::: + +```bash +kubectl create secret generic kubeconfig-my-control-plane -n kube-system --from-file=kubeconfig=./kubeconfig-my-control-plane.yaml +``` + +Set the environment variable below to configure which namespace _in your control plane_ you wish to sync the app cluster's claims to. + +```shell +export CONNECTOR_CTP_NAMESPACE=app-cluster-1 +``` + +Install the Control Plane Connector in the app cluster and point it to your control plane. + +```bash +up ctp connector install my-control-plane $CONNECTOR_CTP_NAMESPACE --control-plane-secret=kubeconfig-my-control-plane +``` + +## Inspect your app cluster + +After you install Control Plane Connector in the app cluster, you can now see APIs which live on the control plane. You can confirm this is the case by running the following command on your app cluster: + +```bash {copy-lines="1"} +kubectl api-resources | grep upbound + +# The output should look like this: +sqlinstances aws.platform.upbound.io/v1alpha1 true SQLInstance +clusters aws.platformref.upbound.io/v1alpha1 true Cluster +osss observe.platform.upbound.io/v1alpha1 true Oss +apps platform.upbound.io/v1alpha1 true App +``` + +## Claim a database instance on your app cluster + +Create a database claim against the `SQLInstance` API and observe resources get created by your control plane. Apply the following resources to your app cluster: + +```yaml +cat < --output + ``` + + The command exports your existing Crossplane control plane configuration/state into an archive file. + +::: note +By default, the export command doesn't make any changes to your existing Crossplane control plane state, leaving it intact. Use the `--pause-before-export` flag to pause the reconciliation on managed resources before exporting the archive file. + +This safety mechanism ensures the control plane you migrate state to doesn't assume ownership of resources before you're ready. +::: + +2. Use the control plane [create command][create-command] to create a managed +control plane in Upbound: + + ```bash + up controlplane create my-controlplane + ``` + +3. Use [`up ctx`][up-ctx] to connect to the control plane created in the previous step: + + ```bash + up ctx "///my-controlplane" + ``` + + The command configures your local `kubeconfig` to connect to the control plane. + +4. Run the following command to import the archive file into the control plane: + + ```bash + up controlplane migration import --input + ``` + +:::note +By default, the import command leaves the control plane in an inactive state by pausing the reconciliation on managed +resources. This pause gives you an opportunity to review the imported configuration/state before activating the control plane. +Use the `--unpause-after-import` flag to change the default behavior and activate the control plane immediately after +importing the archive file. +::: + + + +5. Review and validate the imported configuration/state. When you are ready, activate your managed + control plane by running the following command: + + ```bash + kubectl annotate managed --all crossplane.io/paused- + ``` + + At this point, you can delete the source Crossplane control plane. + +## CLI options + +### Filtering + +The migration tool captures the state of a Control Plane. The only filtering +supported is Kubernetes namespace and Kubernetes resource Type filtering. + +You can exclude namespaces using the `--exclude-namespaces` CLI option. This can prevent the CLI from including unwanted resources in the export. + +```bash +--exclude-namespaces=kube-system,kube-public,kube-node-lease,local-path-storage,... + +# A list of specific namespaces to exclude from the export. Defaults to 'kube-system', 'kube-public','kube-node-lease', and 'local-path-storage'. +``` + +You can exclude Kubernetes Resource types by using the `--exclude-resources` CLI option: + +```bash +--exclude-resources=EXCLUDE-RESOURCES,... + +# A list of resource types to exclude from the export in "resource.group" format. No resources are excluded by default. +``` + +For example, here's an example for excluding the CRDs installed by Crossplane functions (since they're not needed): + +```bash +up controlplane migration export \ + --exclude-resources=gotemplates.gotemplating.fn.crossplane.io,kclinputs.template.fn.crossplane.io +``` + +:::warning +You must specify resource names in lowercase "resource.group" format (for example, `gotemplates.gotemplating.fn.crossplane.io`). Using only the resource kind (for example, `GoTemplate`) isn't supported. +::: + + +:::tip Function Input CRDs + +Exclude function input CRDs (`inputs.template.fn.crossplane.io`, `resources.pt.fn.crossplane.io`, `gotemplates.gotemplating.fn.crossplane.io`, `kclinputs.template.fn.crossplane.io`) from migration exports. Upbound automatically recreates these resources during import. Function input CRDs typically have owner references to function packages and may have restricted RBAC access. Upbound installs these CRDs during the import when function packages are restored. + +::: + + +After export, users can also change the archive file to only include necessary resources. + +### Export non-Crossplane resources + +Use the `--include-extra-resources=` CLI option to select other CRD types to include in the export. + +### Set the kubecontext + +Currently `--context` isn't supported in the migration CLI. You should be able to use the `--kubeconfig` CLI option to use a file that's set to the correct context. example: + +```bash +up controlplane migration export --kubeconfig +``` + +Use this in tandem with `up ctx` to export a control plane's kubeconfig: + +```bash +up ctx --kubeconfig ~/.kube/config + +# To list the current contet +up ctx . --kubeconfig ~/.kube/config +``` + +## Export archive + +The migration CLI exports an archive upon successful completion. Below is an example export of a control plane that excludes several CRD types and skips the confirmation prompt. A file gets written to the working directory, unless you select another output file: + +
+ +View the example export + +```bash +$ up controlplane migration export --exclude-resources=gotemplates.gotemplating.fn.crossplane.io,kclinputs.template.fn.crossplane.io --yes +Exporting control plane state... +✓ Scanning control plane for types to export... 121 types found! 👀 +✓ Exporting 121 Crossplane resources...60 resources exported! 📤 +✓ Exporting 3 native resources...8 resources exported! 📤 +✓ Archiving exported state... archived to "xp-state.tar.gz"! 📦 +``` + +
+ + +When an export occurs, a file named `xp-state.tar.gz` by default gets created in the working directory. You can unzip the file and all the contents of the export are all text YAML files. + +- Each CRD (for example `vpcs.ec2.aws.upbound.io`) gets its own directory +which contains: + - A `metadata.yaml` file that contains Kubernetes Object Metadata + - A list of Kubernetes Categories the resource belongs to +- A `cluster` directory that contains YAML manifests for all resources provisioned +using the CRD. + +Sample contents for a Cluster with a single `XNetwork` Composite from +[configuration-aws-network][configuration-aws-network] is show below: + + +
+ +View the example cluster content + +```bash +├── compositionrevisions.apiextensions.crossplane.io +│ ├── cluster +│ │ ├── kcl.xnetworks.aws.platform.upbound.io-4ca6a8a.yaml +│ │ └── xnetworks.aws.platform.upbound.io-9859a34.yaml +│ └── metadata.yaml +├── configurations.pkg.crossplane.io +│ ├── cluster +│ │ └── configuration-aws-network.yaml +│ └── metadata.yaml +├── deploymentruntimeconfigs.pkg.crossplane.io +│ ├── cluster +│ │ └── default.yaml +│ └── metadata.yaml +├── export.yaml +├── functions.pkg.crossplane.io +│ ├── cluster +│ │ ├── crossplane-contrib-function-auto-ready.yaml +│ │ ├── crossplane-contrib-function-go-templating.yaml +│ │ └── crossplane-contrib-function-kcl.yaml +│ └── metadata.yaml +├── internetgateways.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-xgl4q.yaml +│ └── metadata.yaml +├── mainroutetableassociations.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-t2qh7.yaml +│ └── metadata.yaml +├── namespaces +│ └── cluster +│ ├── crossplane-system.yaml +│ ├── default.yaml +│ └── upbound-system.yaml +├── providerconfigs.aws.upbound.io +│ ├── cluster +│ │ └── default.yaml +│ └── metadata.yaml +├── providerconfigusages.aws.upbound.io +│ ├── cluster +│ │ ├── 0a2a3ec6-ef13-45f9-9cf0-63af7f4a6b6b.yaml +...redacted +│ │ └── f7092b0f-3a78-4bfe-82c8-57e5085a9b11.yaml +│ └── metadata.yaml +├── providers.pkg.crossplane.io +│ ├── cluster +│ │ ├── upbound-provider-aws-ec2.yaml +│ │ └── upbound-provider-family-aws.yaml +│ └── metadata.yaml +├── routes.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-dt9cj.yaml +│ └── metadata.yaml +├── routetableassociations.ec2.aws.upbound.io +│ ├── cluster +│ │ ├── borrelli-backup-test-mr2sd.yaml +│ │ ├── borrelli-backup-test-ngq5h.yaml +│ │ ├── borrelli-backup-test-nrkgg.yaml +│ │ └── borrelli-backup-test-wq752.yaml +│ └── metadata.yaml +├── routetables.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-dv4mb.yaml +│ └── metadata.yaml +├── secrets +│ └── namespaces +│ ├── crossplane-system +│ │ ├── cert-token-signing-gateway-pub.yaml +│ │ ├── mxp-hostcluster-certs.yaml +│ │ ├── package-pull-secret.yaml +│ │ └── xgql-tls.yaml +│ └── upbound-system +│ └── aws-creds.yaml +├── securitygrouprules.ec2.aws.upbound.io +│ ├── cluster +│ │ ├── borrelli-backup-test-472f4.yaml +│ │ └── borrelli-backup-test-qftmw.yaml +│ └── metadata.yaml +├── securitygroups.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-w5jch.yaml +│ └── metadata.yaml +├── storeconfigs.secrets.crossplane.io +│ ├── cluster +│ │ └── default.yaml +│ └── metadata.yaml +├── subnets.ec2.aws.upbound.io +│ ├── cluster +│ │ ├── borrelli-backup-test-8btj6.yaml +│ │ ├── borrelli-backup-test-gbmrm.yaml +│ │ ├── borrelli-backup-test-m7kh7.yaml +│ │ └── borrelli-backup-test-nttt5.yaml +│ └── metadata.yaml +├── vpcs.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-7hwgh.yaml +│ └── metadata.yaml +└── xnetworks.aws.platform.upbound.io +├── cluster +│ └── borrelli-backup-test.yaml +└── metadata.yaml +43 directories, 87 files +``` + +
+ + +The `export.yaml` file contains metadata about the export, including the configuration of the export, Crossplane information, and what's included in the export bundle. + +
+ +View the export + +```yaml +version: v1alpha1 +exportedAt: 2025-01-06T17:39:53.173222Z +options: + excludedNamespaces: + - kube-system + - kube-public + - kube-node-lease + - local-path-storage + includedResources: + - namespaces + - configmaps + - secrets + excludedResources: + - gotemplates.gotemplating.fn.crossplane.io + - kclinputs.template.fn.crossplane.io +crossplane: + distribution: universal-crossplane + namespace: crossplane-system + version: 1.17.3-up.1 + featureFlags: + - --enable-provider-identity + - --enable-environment-configs + - --enable-composition-functions + - --enable-usages +stats: + total: 68 + nativeResources: + configmaps: 0 + namespaces: 3 + secrets: 5 + customResources: + amicopies.ec2.aws.upbound.io: 0 + amilaunchpermissions.ec2.aws.upbound.io: 0 + amis.ec2.aws.upbound.io: 0 + availabilityzonegroups.ec2.aws.upbound.io: 0 + capacityreservations.ec2.aws.upbound.io: 0 + carriergateways.ec2.aws.upbound.io: 0 + compositeresourcedefinitions.apiextensions.crossplane.io: 0 + compositionrevisions.apiextensions.crossplane.io: 2 + compositions.apiextensions.crossplane.io: 0 + configurationrevisions.pkg.crossplane.io: 0 + configurations.pkg.crossplane.io: 1 +...redacted +``` + +
+ +### Skipped resources + +Along with to the resources excluded via CLI options, the following resources aren't +included in the backup: + +- The `kube-root-ca.crt` ConfigMap, since this is cluster-specific +- Resources directly managed via Helm (ArgoCD's helm implementation, which templates +Helm resources and then applies them, get included in the backup). The migration creates the exclusion list by looking for: + - Any Resource with the label `"app.kubernetes.io/managed-by" == "Helm"` + - Kubernetes Secrets with the label prefix `helm.sh/release`. example, `helm.sh/release.v1` +- Resources installed via a Crossplane package. These have an `ownerReference` with +a prefix `pkg.crossplane.io`. The expectation is that during import, the Crossplane Package Manager bears responsibility for installing the resources. +- Crossplane Locks: Any `Lock.pkg.crossplane.io` resource isn't included in the +export. + +## Restore + +The following is an example of a successful import run. At the end of the import, all Managed Resources are in a paused state. + +
+ +View the migration import + +```bash +$ up controlplane migration import +Importing control plane state... +✓ Reading state from the archive... Done! 👀 +✓ Importing base resources... 18 resources imported! 📥 +✓ Waiting for XRDs... Established! ⏳ +✓ Waiting for Packages... Installed and Healthy! ⏳ +✓ Importing remaining resources... 50 resources imported! 📥 +✓ Finalizing import... Done! 🎉 +``` + +
+ +Your scenario may involve migrating resources which already exist through other automation on the platform. When executing an import in these circumstances, the importer applies the new manifests to the cluster. If the resource already exists, the restore sets fields to what's in the backup. + +The importer restores all resources in the export archive. Managed Resources get imported with the `crossplane.io/paused: "true"` annotation set. Use the `--unpause-after-import` CLI argument to automatically un-pause resources that got +paused during backup, or remove the annotation manually. + +### Restore order + +The importer restores based on Kubernetes types. The restore order doesn't include parent/child relationships. + +Because Crossplane Composites create new Managed Resources if not present on the cluster, all +Claims, Composites and Managed Resources get imported in a paused state. You can un-pause them after the restore completes. + +The first step of import is installing Base Resources into the cluster. These resources (such has +packages and XRDs) must be ready before proceeding with the import. +Base Resources are: + +- Kubernetes Resources + - ConfigMaps + - Namespaces + - Secrets +- Crossplane Resources + - ControllerConfigs: `controllerconfigs.pkg.crossplane.io` + - DeploymentRuntimeConfigs: `deploymentruntimeconfigs.pkg.crossplane.io` + - StoreConfigs: `storeconfigs.secrets.crossplane.io` +- Crossplane Packages + - Providers: `providers.pkg.crossplane.io` + - Functions: `functions.pkg.crossplane.io` + - Configurations: `configurations.pkg.crossplane.io` + +Restore waits for the base resources to be `Ready` before moving on to the next step. Next, restore walks through the archive and restores all the manifests present. + +During import, the `crossplane.io/paused` annotation gets added to Managed Resources, Claims +and Composites. + +To manually un-pause managed resources after an import, remove the annotation by running: + +```bash +kubectl annotate managed --all crossplane.io/paused- +``` + +You can also run import again with the `--unpause-after-import` flag to remove the annotations. + +```bash +up controlplane migration import --unpause-after-import +``` + +### Restoring resource status + +The importer applies the status of all resources during import. The importer determines if the CRD version has a status field defined based on the stored CRD version. + + +[cli-command]: /reference/cli-reference +[up-cli]: /reference/cli-reference +[up-cli-1]: /manuals/cli/overview +[create-command]: /reference/cli-reference +[up-ctx]: /reference/cli-reference +[configuration-aws-network]: https://marketplace.upbound.io/configurations/upbound/configuration-aws-network diff --git a/spaces_versioned_docs/version-v1.10/howtos/observability.md b/spaces_versioned_docs/version-v1.10/howtos/observability.md new file mode 100644 index 000000000..8fc5c3278 --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/observability.md @@ -0,0 +1,395 @@ +--- +title: Observability +sidebar_position: 50 +description: A guide for how to use the integrated observability pipeline feature + in a Space. +plan: "enterprise" +--- + + + +This guide explains how to configure observability in Upbound Spaces. Upbound +provides integrated observability features built on +[OpenTelemetry][opentelemetry] to collect, process, and export logs, metrics, +and traces. + +Upbound Spaces offers two levels of observability: + +1. **Space-level observability** - Observes the cluster infrastructure where Spaces software is installed (Self-Hosted only) +2. **Control plane observability** - Observes workloads running within individual control planes + + + + + +:::info API Version Information & Version Selector +This guide applies to all supported versions (v1.9-v1.15+). Observability features evolved: + +- **v1.9-v1.12**: SharedTelemetryConfig available (alpha until v1.11) +- **v1.11+**: Observability promoted to stable with logs export support +- **v1.14+**: Both space-level and control-plane observability GA + +**View API Reference for Your Version**: +| Version | Status | Link | +|---------|--------|------| +| v1.15 | Latest | [Spaces API v1.15](../../../next/reference/apis/spaces-api/) | +| v1.14 | Supported | [Spaces API v1.14](../../../1.14/reference/apis/spaces-api/) | +| v1.13 | Supported | [Spaces API v1.13](../../../1.13/reference/apis/spaces-api/) | +| v1.12 | Supported | [Spaces API v1.12](../../../1.12/reference/apis/spaces-api/) | +| v1.11 | Supported | [Spaces API v1.11](../../../1.11/reference/apis/spaces-api/) | +| v1.10 | Limited | [Spaces API v1.10](../../../1.10/reference/apis/spaces-api/) | +| v1.9 | EOL | [Spaces API v1.9](../../../1.9/reference/apis/spaces-api/) | + +For version support policy and feature availability, see and . +::: + +:::important +**Space-level observability** (available since v1.6.0, GA in v1.14.0): +- Disabled by default +- Requires manual enablement and configuration +- Self-Hosted Spaces only + +**Control plane observability** (available since v1.13.0, GA in v1.14.0): +- Enabled by default +- No additional configuration required +::: + + + + +## Prerequisites + + +**Control plane observability** is enabled by default. No additional setup is +required. + + + +### Self-hosted Spaces + +1. **Enable the observability feature** when installing Spaces: + ```bash + up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "observability.enabled=true" + ``` + +Set `features.alpha.observability.enabled=true` instead if using Spaces version +before `v1.14.0`. + +2. **Install OpenTelemetry Operator** (required for Space-level observability): + ```bash + kubectl apply -f https://github.com/open-telemetry/opentelemetry-operator/releases/download/v0.116.0/opentelemetry-operator.yaml + ``` + + :::important + If running Spaces `v1.11` or later, use OpenTelemetry Operator `v0.110.0` or later due to breaking changes. + ::: + + +## Space-level Observability + +Space-level observability is only available for self-hosted Spaces and allows +administrators to observe the cluster infrastructure. + +### Configuration + +Configure Space-level observability using the `spacesCollector` value in your +Spaces Helm chart: + +```yaml +observability: + spacesCollector: + config: + exporters: + otlphttp: + endpoint: "" + headers: + api-key: YOUR_API_KEY + exportPipeline: + logs: + - otlphttp + metrics: + - otlphttp +``` + +This configuration exports metrics and logs from: + +- Crossplane installation +- Spaces infrastructure (controller, API, router, etc.) + +### Router metrics + +The Spaces router uses Envoy as a reverse proxy and automatically exposes +metrics when you enable Space-level observability. These metrics provide +visibility into: + +- Traffic routing to control planes and services +- Request status codes, timeouts, and retries +- Circuit breaker state preventing cascading failures +- Client connection patterns and request volume +- Request latency (P50, P95, P99) + +For more information about available metrics, example queries, and how to enable +this feature, see the [Space-level observability guide][space-level-o11y]. + +## Control plane observability + +Control plane observability collects telemetry data from workloads running +within individual control planes using `SharedTelemetryConfig` resources. + +The pipeline deploys [OpenTelemetry Collectors][opentelemetry-collectors] per +control plane, defined by a `SharedTelemetryConfig` at the group level. +Collectors pass data to external observability backends. + +:::important +From Spaces `v1.13` and beyond, telemetry only includes user-facing control +plane workloads (Crossplane, providers, functions). + +Self-hosted users can include system workloads (`api-server`, `etcd`) by setting +`observability.collectors.includeSystemTelemetry=true` in Helm. +::: + +:::important +Spaces validates `SharedTelemetryConfig` resources before applying them by +sending telemetry to configured exporters. self-hosted Spaces, ensure that +`spaces-controller` can reach the exporter endpoints. +::: + +### `SharedTelemetryConfig` + +`SharedTelemetryConfig` is a group-scoped custom resource that defines telemetry +configuration for control planes. + +#### New Relic example + +```yaml +apiVersion: observability.spaces.upbound.io/v1alpha1 +kind: SharedTelemetryConfig +metadata: + name: newrelic + namespace: default +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + org: foo + exporters: + otlphttp: + endpoint: https://otlp.nr-data.net + headers: + api-key: YOUR_API_KEY + exportPipeline: + metrics: [otlphttp] + traces: [otlphttp] + logs: [otlphttp] +``` + +#### Datadog Example + +```yaml +apiVersion: observability.spaces.upbound.io/v1alpha1 +kind: SharedTelemetryConfig +metadata: + name: datadog + namespace: default +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + org: foo + exporters: + datadog: + api: + site: ${DATADOG_SITE} + key: ${DATADOG_API_KEY} + exportPipeline: + metrics: [datadog] + traces: [datadog] + logs: [datadog] +``` + +### Control plane selection + +Use `spec.controlPlaneSelector` to specify which control planes should use the +telemetry configuration. + +#### Label-based selection + +```yaml +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +#### Expression-based selection + +```yaml +spec: + controlPlaneSelector: + labelSelectors: + - matchExpressions: + - { key: environment, operator: In, values: [production,staging] } +``` + +#### Name-based selection + +```yaml +spec: + controlPlaneSelector: + names: + - controlplane-dev + - controlplane-staging + - controlplane-prod +``` + +### Manage sensitive data + +:::important +Available from Spaces `v1.10` +::: + +Store sensitive data in Kubernetes secrets and reference them in your +`SharedTelemetryConfig`: + +1. **Create the secret:** + ```bash + kubectl create secret generic sensitive -n \ + --from-literal=apiKey='YOUR_API_KEY' + ``` + +2. **Reference in SharedTelemetryConfig:** + ```yaml + apiVersion: observability.spaces.upbound.io/v1alpha1 + kind: SharedTelemetryConfig + metadata: + name: newrelic + spec: + configPatchSecretRefs: + - name: sensitive + key: apiKey + path: exporters.otlphttp.headers.api-key + controlPlaneSelector: + labelSelectors: + - matchLabels: + org: foo + exporters: + otlphttp: + endpoint: https://otlp.nr-data.net + headers: + api-key: dummy # Replaced by secret value + exportPipeline: + metrics: [otlphttp] + traces: [otlphttp] + logs: [otlphttp] + ``` + +### Telemetry processing + +:::important +Available from Spaces `v1.11` +::: + +Configure processing pipelines to transform telemetry data using the [transform +processor][transform-processor]. + +#### Add labels to metrics + +```yaml +spec: + processors: + transform: + error_mode: ignore + metric_statements: + - context: datapoint + statements: + - set(attributes["newLabel"], "someLabel") + processorPipeline: + metrics: [transform] +``` + +#### Remove labels + +From metrics: +```yaml +processors: + transform: + metric_statements: + - context: datapoint + statements: + - delete_key(attributes, "kubernetes_namespace") +``` + +From logs: +```yaml +processors: + transform: + log_statements: + - context: log + statements: + - delete_key(attributes, "log.file.name") +``` + +#### Modify log messages + +```yaml +processors: + transform: + log_statements: + - context: log + statements: + - set(attributes["original"], body) + - set(body, Concat(["log message:", body], " ")) +``` + +### Monitor status + +Check the status of your `SharedTelemetryConfig`: + +```bash +kubectl get stc +NAME SELECTED FAILED PROVISIONED AGE +datadog 1 0 1 63s +``` + +- `SELECTED`: Number of control planes selected +- `FAILED`: Number of control planes that failed provisioning +- `PROVISIONED`: Number of successfully running collectors + +For detailed status information: + +```bash +kubectl describe stc +``` + +## Supported exporters + +Both Space-level and control plane observability support: +- `datadog` -. Datadog integration +- `otlphttp` - General-purpose exporter (used by New Relic, among others) +- `debug` -. troubleshooting + +## Considerations + +- **Control plane conflicts**: Each control plane can only use one `SharedTelemetryConfig`. Multiple configs selecting the same control plane conflict. +- **Custom collector image**: Both Space-level and control plane observability use the same custom OpenTelemetry Collector image with supported exporters. +- **Resource scope**: `SharedTelemetryConfig` resources are group-scoped, allowing different telemetry configurations per group. + +For more advanced configuration options, review the [Helm chart +reference][helm-chart-reference] and [OpenTelemetry Transformation Language +documentation][opentelemetry-transformation-language]. + + +[opentelemetry]: https://opentelemetry.io/ +[opentelemetry-collectors]: https://opentelemetry.io/docs/collector/ +[opentelemetry-collector-configuration]: https://opentelemetry.io/docs/collector/configuration/#exporters +[opentelemetry-operator]: https://opentelemetry.io/docs/kubernetes/operator/ +[transform-processor]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/transformprocessor/README.md +[opentelemetry-transformation-language]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl +[space-level-o11y]: /spaces/howtos/self-hosted/space-observability +[helm-chart-reference]: /reference/helm-reference +[opentelemetry-transformation-language-functions]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/ottlfuncs/README.md +[opentelemetry-transformation-language-contexts]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts +[guide-on-ottl]: https://betterstack.com/community/guides/observability/ottl/#a-brief-overview-of-the-ottl-grammar diff --git a/spaces_versioned_docs/version-v1.10/howtos/query-api.md b/spaces_versioned_docs/version-v1.10/howtos/query-api.md new file mode 100644 index 000000000..78163de2f --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/query-api.md @@ -0,0 +1,320 @@ +--- +title: Query API +sidebar_position: 40 +description: Use the `up` CLI to query objects and resources +--- + + + + +Upbound's Query API allows users to inspect objects and resources within their control planes. The read-only `up alpha query` and `up alpha get` CLI commands allow you to gather information on your control planes in a fast and efficient package. These commands follow the [`kubectl` conventions][kubectl-conventions] for filtering, sorting, and retrieving information from your Space. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). Query API is available in Cloud Spaces since v1.6 and Self-Hosted since v1.8. + +For detailed deployment procedures, see [Deploy Query API infrastructure - Self-Hosted](./self-hosted/query-api.md). version compatibility details, see the . +::: + + + +## Using the Query API + + +The Query API allows you to retrieve control plane information faster than traditional `kubectl` commands. This feature lets you debug your Crossplane resources with the CLI or within the Upbound Console's enhanced management views. + +### Query within a single control plane + +Use the `up alpha get` command to retrieve information about objects within the current control plane context. This command uses the **Query** endpoint and targets the current control plane. + +To switch between control plane groups, use the [`up ctx` ][up-ctx] and change to your desired context with an interactive prompt or specify with your control plane path: + +```shell +up ctx /// +``` + +You can query within a single control plane with the [`up alpha get` command][up-alpha-get-command] to return more information about a given object within the current kubeconfig context. + +The `up alpha get` command can query resource types and aliases to return objects in your control plane. + +```shell +up alpha get managed +NAME READY SYNCED AGE +custom-account1-5bv5j-sa True True 15m +custom-cluster1-bq6dk-net True True 15m +custom-account1-5bv5j-subnet True True 15m +custom-cluster1-bq6dk-nodepool True True 15m +custom-cluster1-bq6dk-cluster True True 15m +custom-account1-5bv5j-net True True 15m +custom-cluster1-bq6dk-subnet True True 15m +custom-cluster1-bq6dk-sa True True 15m +``` + +The [`-A` flag][a-flag] queries for objects across all namespaces. + +```shell +up alpha get configmaps -A +NAMESPACE NAME AGE +crossplane-system uxp-versions-config 18m +crossplane-system universal-crossplane-config 18m +crossplane-system kube-root-ca.crt 18m +upbound-system kube-root-ca.crt 18m +kube-system kube-root-ca.crt 18m +kube-system coredns 18m +default kube-root-ca.crt 18m +kube-node-lease kube-root-ca.crt 18m +kube-public kube-root-ca.crt 18m +kube-system kube-apiserver-legacy-service-account-token-tracking 18m +kube-system extension-apiserver-authentication 18m +``` + +To query for [multiple resource types][multiple-resource-types], you can add the name or alias for the resource as a comma separated string. + +```shell +up alpha get providers,providerrevisions + +NAME HEALTHY REVISION IMAGE STATE DEP-FOUND DEP-INSTALLED AGE +providerrevision.pkg.crossplane.io/crossplane-contrib-provider-nop-ecc25c121431 True 1 xpkg.upbound.io/crossplane-contrib/provider-nop:v0.2.1 Active 18m +NAME INSTALLED HEALTHY PACKAGE AGE +provider.pkg.crossplane.io/crossplane-contrib-provider-nop True True xpkg.upbound.io/crossplane-contrib/provider-nop:v0.2.1 18m +``` + +### Query multiple control planes + +The [`up alpha query` command][up-alpha-query-command] returns a list of objects of any kind within all the control planes in your Space. This command uses either the **SpaceQuery** or **GroupQuery** endpoints depending on your query scope. The `-A` flag switches the query context from the group level to the entire Space + +The `up alpha query` command accepts resources and aliases to return objects across your group or Space. + +```shell +up alpha query crossplane + +NAME ESTABLISHED OFFERED AGE +compositeresourcedefinition.apiextensions.crossplane.io/xnetworks.platform.acme.co True True 20m +compositeresourcedefinition.apiextensions.crossplane.io/xaccountscaffolds.platform.acme.co True True 20m + + +NAME XR-KIND XR-APIVERSION AGE +composition.apiextensions.crossplane.io/xaccountscaffolds.platform.acme.co XAccountScaffold platform.acme.co/v1alpha1 20m +composition.apiextensions.crossplane.io/xnetworks.platform.acme.co XNetwork platform.acme.co/v1alpha1 20m + + +NAME REVISION XR-KIND XR-APIVERSION AGE +compositionrevision.apiextensions.crossplane.io/xaccountscaffolds.platform.acme.co-5ae9da5 1 XAccountScaffold platform.acme.co/v1alpha1 20m +compositionrevision.apiextensions.crossplane.io/xnetworks.platform.acme.co-414ce80 1 XNetwork platform.acme.co/v1alpha1 20m + +NAME READY SYNCED AGE +nopresource.nop.crossplane.io/custom-cluster1-bq6dk-subnet True True 19m +nopresource.nop.crossplane.io/custom-account1-5bv5j-net True True 19m + +## Output truncated... + +``` + + +The [`--sort-by` flag][sort-by-flag] allows you to return information to your specifications. You can construct your sort order in a JSONPath expression string or integer. + + +```shell +up alpha query crossplane -A --sort-by="{.metadata.name}" + +CONTROLPLANE NAME AGE +default/test deploymentruntimeconfig.pkg.crossplane.io/default 10m + +CONTROLPLANE NAME AGE TYPE DEFAULT-SCOPE +default/test storeconfig.secrets.crossplane.io/default 10m Kubernetes crossplane-system +``` + +To query for multiple resource types, you can add the name or alias for the resource as a comma separated string. + +```shell +up alpha query namespaces,configmaps -A + +CONTROLPLANE NAME AGE +default/test namespace/upbound-system 15m +default/test namespace/crossplane-system 15m +default/test namespace/kube-system 16m +default/test namespace/default 16m + +CONTROLPLANE NAMESPACE NAME AGE +default/test crossplane-system configmap/uxp-versions-config 15m +default/test crossplane-system configmap/universal-crossplane-config 15m +default/test crossplane-system configmap/kube-root-ca.crt 15m +default/test upbound-system configmap/kube-root-ca.crt 15m +default/test kube-system configmap/coredns 16m +default/test default configmap/kube-root-ca.crt 16m + +## Output truncated... + +``` + +The Query API also allows you to return resource types with specific [label columns][label-columns]. + +```shell +up alpha query composite -A --label-columns=crossplane.io/claim-namespace + +CONTROLPLANE NAME SYNCED READY COMPOSITION AGE CLAIM-NAMESPACE +query-api-test/test xeks.argo.discover.upbound.io/test-k7xbk False xeks.argo.discover.upbound.io 51d default + +CONTROLPLANE NAME EXTERNALDNS SYNCED READY COMPOSITION AGE CLAIM-NAMESPACE +spaces-clusters/controlplane-query-api-test-spaces-playground xexternaldns.externaldns.platform.upbound.io/spaces-cluster-0-xd8v2-lhnl7 6.34.2 True True xexternaldns.externaldns.platform.upbound.io 19d default +default/query-api-test xexternaldns.externaldns.platform.upbound.io/space-awg-kine-f7dxq-nkk2q 6.34.2 True True xexternaldns.externaldns.platform.upbound.io 55d default + +## Output truncated... + +``` + +### Query API request format + +The CLI can also return a version of your query request with the [`--debug` flag][debug-flag]. This flag returns the API spec request for your query. + +```shell +up alpha query composite -A -d + +apiVersion: query.spaces.upbound.io/v1alpha1 +kind: SpaceQuery +metadata: + creationTimestamp: null +spec: + cursor: true + filter: + categories: + - composite + controlPlane: {} + limit: 500 + objects: + controlPlane: true + table: {} + page: {} +``` + +For more complex queries, you can interact with the Query API like a Kubernetes-style API by creating a query and applying it with `kubectl`. + +The example below is a query for `claim` resources in every control plane from oldest to newest and returns specific information about those claims. + + +```yaml +apiVersion: query.spaces.upbound.io/v1alpha1 +kind: SpaceQuery +spec: + filter: + categories: + - claim + order: + - creationTimestamp: Asc + cursor: true + count: true + objects: + id: true + controlPlane: true + object: + kind: true + apiVersion: true + metadata: + name: true + uid: true + spec: + containers: + image: true +``` + + +The Query API is served by the Spaces API endpoint. You can use `up ctx` to +switch the kubectl context to the Spaces API ingress. After that, you can use +`kubectl create` and receive the `response` for your query parameters. + + +```shell +kubectl create -f spaces-query.yaml -o yaml +``` + +Your `response` should look similar to this example: + +```yaml {copy-lines="none"} +apiVersion: query.spaces.upbound.io/v1alpha1 +kind: SpaceQuery +metadata: + creationTimestamp: "2024-08-08T14:41:46Z" + name: default +response: + count: 3 + cursor: + next: "" + page: 0 + pageSize: 100 + position: 0 + objects: + - controlPlane: + name: query-api-test + namespace: default + id: default/query-api-test/823b2781-7e70-4d91-a6f0-ee8f455d67dc + object: + apiVersion: spaces.platform.upbound.io/v1alpha1 + kind: Space + metadata: + name: space-awg-kine + resourceVersion: "803868" + uid: 823b2781-7e70-4d91-a6f0-ee8f455d67dc + spec: {} + - controlPlane: + name: test-1 + namespace: test + id: test/test-1/08a573dd-851a-42cc-a600-b6f6ed37ee8d + object: + apiVersion: argo.discover.upbound.io/v1alpha1 + kind: EKS + metadata: + name: test-1 + resourceVersion: "4270320" + uid: 08a573dd-851a-42cc-a600-b6f6ed37ee8d + spec: {} + - controlPlane: + name: controlplane-query-api-test-spaces-playground + namespace: spaces-clusters + id: spaces-clusters/controlplane-query-api-test-spaces-playground/b5a6770f-1f85-4d09-8990-997c84bd4159 + object: + apiVersion: spaces.platform.upbound.io/v1alpha1 + kind: Space + metadata: + name: spaces-cluster-0 + resourceVersion: "1408337" + uid: b5a6770f-1f85-4d09-8990-997c84bd4159 + spec: {} +``` + + +## Query API Explorer + + + +import CrdDocViewer from '@site/src/components/CrdViewer'; + +### Query + +The Query resource allows you to query objects in a single control plane. + + + +### GroupQuery + +The GroupQuery resource allows you to query objects across a group of control planes. + + + +### SpaceQuery + +The SpaceQuery resource allows you to query objects across all control planes in a space. + + + + + + +[documentation]: /spaces/howtos/self-hosted/query-api +[up-ctx]: /reference/cli-reference +[up-alpha-get-command]: /reference/cli-reference +[a-flag]: /reference/cli-reference +[multiple-resource-types]: /reference/cli-reference +[up-alpha-query-command]: /reference/cli-reference +[sort-by-flag]: /reference/cli-reference +[label-columns]: /reference/cli-reference +[debug-flag]: /reference/cli-reference +[kubectl-conventions]: https://kubernetes.io/docs/reference/kubectl/generated/kubectl_get/ diff --git a/spaces_versioned_docs/version-v1.10/howtos/secrets-management.md b/spaces_versioned_docs/version-v1.10/howtos/secrets-management.md new file mode 100644 index 000000000..88e730ae5 --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/secrets-management.md @@ -0,0 +1,719 @@ +--- +title: Secrets Management +sidebar_position: 20 +description: A guide for how to configure synchronizing external secrets into control + planes in a Space. +--- + +Upbound's _Shared Secrets_ is a built in secrets management feature that +provides an integrated way to manage secrets across your platform. It allows you +to store sensitive data like passwords and certificates for your managed control +planes as secrets in an external secret store. + +This feature is a wrapper around the [External Secrets Operator (ESO)][external-secrets-operator-eso] that pulls secrets from external vaults and distributes them across your platform. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). The SharedSecretStore and SharedExternalSecret CRDs have been stable since v1.9. + +For API specifications and configuration details, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +## Benefits + +The Shared Secrets feature allows you to: + +* Access secrets from a variety of external secret stores without operation overhead +* Configure synchronization for multiple control planes in a group +* Store and manage all your secrets centrally +* Use Shared Secrets across all Upbound environments(Cloud and Disconnected Spaces) +* Synchronize secrets across groups of control planes while maintaining clear security boundaries +* Manage secrets at scale programmatically while ensuring proper isolation and access control + +## Understanding the Architecture + +The Shared Secrets feature uses a hierarchical approach to centrally manage +secrets and effectively control their distribution. + +![Shared Secrets workflow diagram](/img/shared-secrets-workflow.png) + +1. The flow begins at the group level, where you define your secret sources and distribution rules +2. These rules automatically create corresponding resources in your control planes +3. In each control plane, specific namespaces receive the secrets +4. Changes at the group level automatically propagate through this chain + +## Component configuration + +Upbound Shared Secrets consists of two components: + +1. **SharedSecretStore**: Defines connections to external secret providers +2. **SharedExternalSecret**: Specifies which secrets to synchronize and where + + +### Connect to an External Vault + + +The `SharedSecretStore` component is the connection point to your external +secret vaults. It provisions ClusterSecretStore resources into control planes +within the group. + + +#### AWS Secrets Manager + + + +In this example, you'll create a `SharedSecretStore` to connect to AWS +Secrets Manager in `us-west-2`. Then apply access to all control planes labeled with +`environment: production`, and make these secrets available in the `default` and +`crossplane-system` namespaces. + + +You can configure access to AWS Secrets Manager using static credentials or +workload identity. + +:::important +While the underlying ESO API supports more auth methods, static credentials are currently the only supported auth method in Cloud Spaces. +::: + +##### Static credentials + +1. Use the AWS CLI to create access credentials. + + +2. Create your access credentials. +```ini +# Create a text file with AWS credentials +cat > aws-credentials.txt << EOF +[default] +aws_access_key_id = +aws_secret_access_key = +EOF +``` + +3. Next,store the access credentials in a secret in the namespace you want to have access to the `SharedSecretStore`. +```shell +kubectl create secret \ + generic aws-credentials \ + -n default \ + --from-file=creds=./aws-credentials.txt +``` + +4. Create a `SharedSecretStore` custom resource file called `secretstore.yaml`. + Paste the following configuration: +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: aws-secrets +spec: + # Define which control planes should receive this configuration + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production + + # Define which namespaces within those control planes can access secrets + namespaceSelector: + names: + - default + - crossplane-system + + # Configure the connection to AWS Secrets Manager + provider: + aws: + service: SecretsManager + region: us-west-2 + auth: + secretRef: + accessKeyIDSecretRef: + name: aws-credentials + key: access-key-id + secretAccessKeySecretRef: + name: aws-credentials + key: secret-access-key +``` + + + +##### Workload Identity with IRSA + + + +You can also use AWS IAM Roles for Service Accounts (IRSA) depending on your +organizations needs: + +1. Ensure you have deployed the Spaces software into an IRSA-enabled EKS cluster. +2. Follow the AWS instructions to create an IAM OIDC provider with your EKS OIDC + provider URL. +3. Determine the Spaces-generated `controlPlaneID` of your control plane: +```shell +kubectl get controlplane -o jsonpath='{.status.controlPlaneID}' +``` + +4. Create an IAM trust policy in your AWS account to match the control plane. +```yaml +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam:::oidc-provider/" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + ":aud": "sts.amazonaws.com", + ":sub": [ +"system:serviceaccount:mxp--system:external-secrets-controller"] + } + } + } + ] +} +``` + +5. Update your Spaces deployment to annotate the SharedSecrets service account + with the role ARN. +```shell +up space upgrade ... \ + --set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."eks\.amazonaws\.com/role-arn"="" +``` + +6. Create a SharedSecretStore and reference the SharedSecrets service account: +```ini {copy-lines="all"} +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: aws-sm + namespace: default +spec: + provider: + aws: + service: SecretsManager + region: + auth: + jwt: + serviceAccountRef: + name: external-secrets-controller + controlPlaneSelector: + names: + - + namespaceSelector: + names: + - default +``` + +When you create a `SharedSecretStore` the underlying mechanism: + +1. Applies at the group level +2. Determines which control planes should receive this configuration by the `controlPlaneSelector` +3. Automatically creates a ClusterSecretStore inside each identified control plane +4. Maintains a connection in each control plane with the ClusterSecretStore + credentials and configuration from the parent SharedSecretStore + +Upbound automatically generates a ClusterSecretStore in each matching control +plane when you create a SharedSecretStore. + +```yaml {copy-lines="none"} +# Automatically created in each matching control plane +apiVersion: external-secrets.io/v1beta1 +kind: ClusterSecretStore +metadata: + name: aws-secrets # Name matches the parent SharedSecretStore +spec: + provider: + upboundspaces: + storeRef: + name: aws-secret +``` + +When you create the SharedSecretStore controller, it replaces the provider with +a special provider called `upboundspaces`. This provider references the +SharedSecretStore object in the Spaces API. This avoids copying the actual cloud +credentials from Spaces to each control plane. + +This workflow allows you to configure the store connection only once at the +group level and automatically propagates to each control plane. Individual control +planes can use the store without exposure to the group-level configuration and +updates all child ClusterSecretStores when updated. + + +#### Azure Key Vault + + +:::important +While the underlying ESO API supports more auth methods, static credentials are currently the only supported auth method in Cloud Spaces. +::: + +##### Static credentials + +1. Use the Azure CLI to create a service principal and authentication file. +2. Create a service principal and save credentials in a file: +```json +{ + "appId": "myAppId", + "displayName": "myServicePrincipalName", + "password": "myServicePrincipalPassword", + "tenant": "myTentantId" +} +``` + +3. Store the credentials as a Kubernetes secret: +```shell +kubectl create secret \ + generic azure-secret-sp \ + -n default \ + --from-file=creds=./azure-credentials.json +``` + +4. Create a SharedSecretStore referencing these credentials: +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: azure-kv +spec: + provider: + azurekv: + tenantId: "" + vaultUrl: "" + authSecretRef: + clientId: + name: azure-secret-sp + key: ClientID + clientSecret: + name: azure-secret-sp + key: ClientSecret + controlPlaneSelector: + names: + - + namespaceSelector: + names: + - default +``` + +##### Workload Identity + + +You can also use Entra Workload Identity Federation to access Azure Key Vault +without needing to manage secrets. + +To use Entra Workload ID with AKS: + + +1. Deploy the Spaces software into a [workload identity-enabled AKS cluster][workload-identity-enabled-aks-cluster]. +2. Retrieve the OIDC issuer URL of the AKS cluster: +```ini +az aks show --name "" \ + --resource-group "" \ + --query "oidcIssuerProfile.issuerUrl" \ + --output tsv +``` + +3. Use the Azure CLI to make a managed identity: +```ini +az identity create \ + --name "" \ + --resource-group "" \ + --location "" \ + --subscription "" +``` + +4. Look up the managed identity's client ID: +```ini +az identity show \ + --resource-group "" \ + --name "" \ + --query 'clientId' \ + --output tsv +``` + +5. Update your Spaces deployment to annotate the SharedSecrets service account with the associated Entra application client ID from the previous step: +```ini +up space upgrade ... \ + --set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."azure\.workload\.identity/client-id"="" \ + --set-string controlPlanes.sharedSecrets.pod.customLabels."azure\.workload\.identity/use"="true" +``` + +6. Determine the Spaces-generated `controlPlaneID` of your control plane. When you deploy a `kind: controlplane` in a Space, the Spaces software deploys a set of pods in a new namespace following the format `mxp--system`. +```ini +kubectl get controlplane -o jsonpath='{.status.controlPlaneID}' +``` + +7. Create a federated identity credential. +```ini +FEDERATED_IDENTITY_CREDENTIAL_NAME= +USER_ASSIGNED_IDENTITY_NAME= +RESOURCE_GROUP= +AKS_OIDC_ISSUER= +CONTROLPLANE_ID= +az identity federated-credential create --name ${FEDERATED_IDENTITY_CREDENTIAL_NAME} --identity-name "${USER_ASSIGNED_IDENTITY_NAME}" --resource-group "${RESOURCE_GROUP}" --issuer "${AKS_OIDC_ISSUER}" --subject system:serviceaccount:"mxp-${CONTROLPLANE_ID}-system:external-secrets-controller" --audience api://AzureADTokenExchange +``` + +8. Assign the `Key Vault Secrets User` role to the user-assigned managed identity that you created earlier. This step gives the managed identity permission to read secrets from the key vault: +```ini +az role assignment create \ + --assignee-object-id "${IDENTITY_PRINCIPAL_ID}" \ + --role "Key Vault Secrets User" \ + --scope "${KEYVAULT_RESOURCE_ID}" \ + --assignee-principal-type ServicePrincipal +``` + +:::important +You must manually restart a workload's pod when you add the annotation to the running pod's service account. The Entra workload identity mutating admission webhook requires a restart to inject the necessary environment. +::: + +8. Create a `SharedSecretStore`. Replace `vaultURL` with the URL of your Azure Key Vault instance. Replace `identityId` with the client ID of the managed identity created earlier: +```yaml {copy-lines="all"} +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: azure-kv +spec: + provider: + azurekv: + authType: WorkloadIdentity + vaultUrl: "" + controlPlaneSelector: + names: + - + namespaceSelector: + names: + - default +``` + + + + +#### Google Cloud Secret Manager + + + +You can configure access to Google Cloud Secret Manager using static credentials or workload identity. Below are instructions for configuring either. See the [ESO provider API][eso-provider-api] for more information. + +:::important +While the underlying ESO API supports more auth methods, static credentials are currently the only supported auth method in Cloud Spaces. +::: + +##### Static credentials + +1. Use the [GCP CLI][gcp-cli] to create access credentials. +2. Save the output in a file called `gcp-credentials.json`. +3. Store the access credentials in a secret in the same namespace as the `SharedSecretStore`. + ```shell {label="kube-create-secret",copy-lines="all"} + kubectl create secret \ + generic gcpsm-secret \ + -n default \ + --from-file=creds=./gcp-credentials.json + ``` + +4. Create a `SharedSecretStore`, referencing the secret created earlier. Replace `projectID` with your GCP Project ID: +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: gcp-sm +spec: + provider: + gcpsm: + auth: + secretRef: + secretAccessKeySecretRef: + name: gcpsm-secret + key: creds + projectID: + controlPlaneSelector: + names: + - + namespaceSelector: + names: + - default +``` + +:::tip +The example above maps a Shared Secret Store into a single namespace of a single control plane. Read [control plane selection][control-plane-selection] and [namespace selection][namespace-selection] to learn how to map into one or more namespaces of one or more control planes. +::: + + +##### Workload identity with Service Accounts to IAM Roles + + +To configure, grant the `roles/iam.workloadIdentityUser` role to the Kubernetes +service account in the control plane namespace to impersonate the IAM service +account. + +1. Ensure you've deployed Spaces on a [Workload Identity Federation-enabled][workload-identity-federation-enabled] GKE cluster. +2. Determine the Spaces-generated `controlPlaneID` of your control plane. When you deploy a `kind: controlplane` in a Space, the Spaces software deploys a set of pods in a new namespace following the format `mxp--system`. +```ini +kubectl get controlplane -o jsonpath='{.status.controlPlaneID}' +``` + +3. Create a GCP IAM service account with the [GCP CLI][gcp-cli-1]: +```ini +gcloud iam service-accounts create \ + --project= +``` + +4. Grant the IAM service account the role to access GCP Secret Manager: +```ini +SA_NAME= +IAM_SA_PROJECT_ID= +gcloud projects add-iam-policy-binding IAM_SA_PROJECT_ID \ + --member "serviceAccount:SA_NAME@IAM_SA_PROJECT_ID.iam.gserviceaccount.com" \ + --role roles/secretmanager.secretAccessor +``` + +5. When you enable the Shared Secrets feature, a service account gets created in each control plane for the External Secrets Operator. Apply a [GCP IAM policy binding][gcp-iam-policy-binding] to associate this service account with the desired GCP IAM role. +```ini +PROJECT_ID= +PROJECT_NUMBER= +CONTROLPLANE_ID= +gcloud projects add-iam-policy-binding projects/${PROJECT_ID} \ + --role "roles/iam.workloadIdentityUser" \ + --member=principal://iam.googleapis.com/projects/${PROJECT_NUMBER}/locations/global/workloadIdentityPools/${PROJECT_ID}.svc.id.goog/subject/ns/mxp-${CONTROLPLANE_ID}-system/sa/external-secrets-controller +``` + +6. Update your Spaces deployment to annotate the SharedSecrets service account with GCP IAM service account's identifier: +```ini +up space upgrade ... \ + --set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."iam\.gke\.io/gcp-service-account"="" +``` + +7. Create a `SharedSecretStore`. Replace `projectID` with your GCP Project ID: +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: gcp-sm +spec: + provider: + gcpsm: + projectID: + controlPlaneSelector: + names: + - + namespaceSelector: + names: + - default +``` + +:::tip +The example above maps a Shared Secret Store into a single namespace of a single control plane. Read [control plane selection][control-plane-selection-1] and [namespace selection][namespace-selection-2] to learn how to map into one or more namespaces of one or more control planes. +::: + +### Manage your secret distribution + +After you create your SharedSecretStore, you can define which secrets to +distribute using SharedExternalSecret: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedExternalSecret +metadata: + name: database-credentials + namespace: default +spec: + # Select the same control planes as your SharedSecretStore + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production + + externalSecretSpec: + refreshInterval: 1h + secretStoreRef: + name: aws-secrets # References the SharedSecretStore name + kind: ClusterSecretStore + target: + name: db-credentials + data: + - secretKey: username + remoteRef: + key: prod/database/credentials + property: username + - secretKey: password + remoteRef: + key: prod/database/credentials + property: password +``` + +This configuration: + +* Pulls database credentials from your external secret provider +* Creates secrets in all production control planes +* Refreshes the secrets every hour +* Creates a secret called `db-credentials` in each control plane + +When you create a SharedExternalSecret at the group level, Upbound's system +creates a template for the corresponding ClusterExternalSecrets in each selected +control plane. + +The example below simulates the ClusterExternalSecret that Upbound creates: + +```yaml +# Inside each matching control plane: +apiVersion: external-secrets.io/v1beta1 +kind: ClusterExternalSecret +metadata: + name: database-credentials +spec: + refreshInterval: 1h + secretStoreRef: + name: aws-secrets + kind: ClusterSecretStore + data: + - secretKey: username + remoteRef: + key: prod/database/credentials + property: username +``` + +The hierarchy in this configuration is: + +1. SharedExternalSecret (group level) defines what secrets to distribute +2. ClusterExternalSecret (control plane level) manages the distribution within + each control plane + +3. Kubernetes Secrets (namespace level) are created in specified namespaces + + +#### Control plane selection + +To configure which control planes in a group you want to project a SecretStore into, use the `spec.controlPlaneSelector` field. You can either use `labelSelectors` or the `names` of a control plane directly. A control plane matches if any of the label selectors match. + +This example matches all control planes in the group that have `environment: production` as a label: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +You can use the more complex `matchExpressions` to match labels based on an expression. This example matches control planes that have label `environment: production` or `environment: staging`: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + controlPlaneSelector: + labelSelectors: + - matchExpressions: + - { key: environment, operator: In, values: [production,staging] } +``` + +You can also specify the names of control planes directly: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + controlPlaneSelector: + names: + - controlplane-dev + - controlplane-staging + - controlplane-prod +``` + + +#### Namespace selection + +To configure which namespaces **within each matched control plane** to project the secret store into, use `spec.namespaceSelector` field. The projected secret store only appears in the namespaces matching the provided selector. You can either use `labelSelectors` or the `names` of namespaces directly. A control plane matches if any of the label selectors match. + +**For all control planes matched by** `spec.controlPlaneSelector`, This example matches all namespaces in each selected control plane that have `team: team1` as a label: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + namespaceSelector: + labelSelectors: + - matchLabels: + team: team1 +``` + +You can use the more complex `matchExpressions` to match labels based on an expression. This example matches namespaces that have label `team: team1` or `team: team2`: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + namespaceSelector: + labelSelectors: + - matchExpressions: + - { key: team, operator: In, values: [team1,team2] } +``` + +You can also specify the names of namespaces directly: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + namespaceSelector: + names: + - team1-namespace + - team2-namespace +``` + +## Configure secrets directly in a control plane + + +The above explains using group-scoped resources to project secrets into multiple control planes. You can also use ESO API types directly in a control plane as you would in standalone Crossplane or Kubernetes. + + +See the [ESO documentation][eso-documentation] for a full guide on using the API types. + +## Best practices + +When you configure secrets management in your Upbound environment, keep the +following best practices in mind: + +**Use consistent labeling schemes** across your control planes for predictable +and manageable secret distribution. + +**Organize your secrets** in your external provider using a hierarchical +structure that mirrors your control plane organization. + +**Set appropriate refresh intervals** based on your security requires and the +nature of the secrets. + +**Use namespace selection sparingly** to limit secret distribution to only the +namespaces that need them. + +**Use separate tokens for each environment.** Keep them in distinct +SharedSecretStores. Users could bypass SharedExternalSecret selectors by +creating ClusterExternalSecrets directly in control planes. This grants access to all +secrets available to that token. + +**Document your secret management architecture**, including which control planes +should receive which secrets. + +[control-plane-selection]: #control-plane-selection +[namespace-selection]: #namespace-selection +[control-plane-selection-1]: #control-plane-selection +[namespace-selection-2]: #namespace-selection + +[external-secrets-operator-eso]: https://external-secrets.io +[workload-identity-enabled-aks-cluster]: https://learn.microsoft.com/en-us/azure/aks/workload-identity-deploy-cluster +[eso-provider-api]: https://external-secrets.io/latest/provider/google-secrets-manager/ +[gcp-cli]: https://cloud.google.com/iam/docs/creating-managing-service-account-keys +[workload-identity-federation-enabled]: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#enable_on_clusters_and_node_pools +[gcp-cli-1]: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#kubernetes-sa-to-iam +[gcp-iam-policy-binding]: https://cloud.google.com/sdk/gcloud/reference/iam/service-accounts/add-iam-policy-binding +[eso-documentation]: https://external-secrets.io/latest/introduction/getting-started/ diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/_category_.json b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/_category_.json new file mode 100644 index 000000000..5bf23bb0a --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/_category_.json @@ -0,0 +1,11 @@ +{ + "label": "Self-Hosted Spaces", + "position": 2, + "collapsed": true, + "customProps": { + "plan": "business" + } + +} + + diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/administer-features.md b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/administer-features.md new file mode 100644 index 000000000..ce878014e --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/administer-features.md @@ -0,0 +1,121 @@ +--- +title: Administer features +sidebar_position: 12 +description: Enable and disable features in Spaces +--- + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Feature flags and their availability may vary by version. + +For detailed feature availability across versions, see the . +::: + +This guide shows how to enable or disable features in your self-hosted Space. + +## Shared secrets + +**Status:** Preview + +This feature is enabled by default in Cloud Spaces. + +To enable this feature in a self-hosted Space, set +`features.alpha.sharedSecrets.enabled=true` when installing the Space: + +```bash +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "features.alpha.sharedSecrets.enabled=true" \ +``` + + +## Observability + +**Status:** GA +**Available from:** Spaces v1.13+ + +This feature is enabled by default in Cloud Spaces. + + + +To enable this feature in a self-hosted Space, set +`observability.enabled=true` (`features.alpha.observability.enabled=true` before `v1.14.0`) when installing the Space: + +```bash +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "observability.enabled=true" \ +``` + +The observability feature collects telemetry data from user-facing control +plane workloads like: + +* Crossplane +* Providers +* Functions + +Self-hosted Spaces users can add control plane system workloads such as the +`api-server`, `etcd` by setting the +`observability.collectors.includeSystemTelemetry` Helm flag to true. + +### Sensitive data + +To avoid exposing sensitive data in the `SharedTelemetryConfig` resource, use +Kubernetes secrets to store the sensitive data and reference the secret in the +`SharedTelemetryConfig` resource. + +Create the secret in the same namespace/group as the `SharedTelemetryConfig` +resource. The example below uses `kubectl create secret` to create a new secret: + +```bash +kubectl create secret generic sensitive -n \ + --from-literal=apiKey='YOUR_API_KEY' +``` + +Next, reference the secret in the `SharedTelemetryConfig` resource: + +```yaml +apiVersion: observability.spaces.upbound.io/v1alpha1 +kind: SharedTelemetryConfig +metadata: + name: newrelic +spec: + configPatchSecretRefs: + - name: sensitive + key: apiKey + path: exporters.otlphttp.headers.api-key + controlPlaneSelector: + labelSelectors: + - matchLabels: + org: foo + exporters: + otlphttp: + endpoint: https://otlp.nr-data.net + headers: + api-key: dummy # This value is replaced by the secret value, can be omitted + exportPipeline: + metrics: [otlphttp] + traces: [otlphttp] + logs: [otlphttp] +``` + +The `configPatchSecretRefs` field in the `spec` specifies the secret `name`, +`key`, and `path` values to inject the secret value in the +`SharedTelemetryConfig` resource. + +## Shared backups + +As of Spaces `v.12.0`, this feature is enabled by default. + +To disable in a self-hosted Space, pass the `features.alpha.sharedBackup.enabled=false` as a Helm chart value. +`--set "features.alpha.sharedBackup.enabled=false"` + +## Query API + +**Status:** Preview +The Query API is available in the Cloud Space offering and enabled by default. + +Query API is required for self-hosted deployments with connected Spaces. See the +related [documentation][documentation] +to enable this feature. + +[documentation]: /spaces/howtos/query-api/ diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/attach-detach.md b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/attach-detach.md new file mode 100644 index 000000000..1465921cf --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/attach-detach.md @@ -0,0 +1,198 @@ +--- +title: Connect or disconnect a Space +sidebar_position: 12 +description: Enable and connect self-hosted Spaces to the Upbound console +--- +:::info API Version Information +This guide applies to **Spaces v1.8.0 and later** (Self-Hosted only). Connection to Upbound console requires Query API and RBAC to be enabled. + +For version-specific features and requirements, see the . Query API setup details, see [Deploy Query API infrastructure](./query-api.md). +::: + +:::important +This feature is in preview. Starting in Spaces `v1.8.0` and later, you must +deploy and [enable the Query API][enable-the-query-api] and [enable Upbound +RBAC][enable-upbound-rbac] to connect a Space to Upbound. +::: + +[Upbound][upbound] allows you to connect self-hosted Spaces and enables a streamlined operations and debugging experience in your Console. + +## Usage + +### Connect + +Before you begin, make sure you have: + +- An existing Upbound [organization][organization] in Upbound SaaS. +- The `up` CLI installed and logged into your organization +- `kubectl` installed with the kubecontext of your self-hosted Space cluster. +- A `token.json` license, provided by your Upbound account representative. +- You enabled the [Query API][query-api] in the self-hosted Space. + +Create a new `UPBOUND_SPACE_NAME`. If you don't create a name, `up` automatically generates one for you: + +```ini +export UPBOUND_SPACE_NAME=your-self-hosted-space +``` + +#### With up CLI + +:::tip +The command tries to connect the Space to the org account context pointed at by your `up` CLI profile. Make sure you've logged into Upbound SaaS with `up login -a ` before trying to connect the Space. +::: + +Connect the Space to the Console: + +```bash +up space connect "${UPBOUND_SPACE_NAME}" +``` + +This command installs a Connect agent, creates a service account, and configures permissions in your Upbound cloud organization in the `upbound-system` namespace of your Space. + +#### With Helm + +Export your Upbound org account name to an environment variable called `UPBOUND_ORG_NAME`. You can see this value by running `up org list` after logging on to Upbound. + +```ini +export UPBOUND_ORG_NAME=your-org-name +``` + +Create a new robot token and export it to an environment variable called `UPBOUND_TOKEN`: + +```bash +up robot create "${UPBOUND_SPACE_NAME}" --description="Robot used for authenticating Space '${UPBOUND_SPACE_NAME}' with Upbound Connect" +export UPBOUND_TOKEN=$(up robot token create "$UPBOUND_SPACE_NAME" "$UPBOUND_SPACE_NAME" --file - | jq -r '.token') +``` + +:::note +Follow the [`jq` installation guide][jq-install] if your machine doesn't include +it by default. +::: + +Create a secret containing the robot token: + +```bash +kubectl create secret -n upbound-system generic connect-token --from-literal=token=${UPBOUND_TOKEN} +``` + +Specify your username and password for the helm OCI registry: + +```bash +jq -r .token $SPACES_TOKEN_PATH | helm registry login xpkg.upbound.io -u $(jq -r .accessId $SPACES_TOKEN_PATH) --password-stdin +``` + +In the same cluster where you installed the Spaces software, install the Upbound connect agent with your token secret. + +```bash +helm -n upbound-system upgrade --install agent \ + oci://xpkg.upbound.io/spaces-artifacts/agent \ + --version "0.0.0-441.g68777b9" \ + --set "image.repository=xpkg.upbound.io/spaces-artifacts/agent" \ + --set "registration.image.repository=xpkg.upbound.io/spaces-artifacts/register-init" \ + --set "imagePullSecrets[0].name=upbound-pull-secret" \ + --set "registration.enabled=true" \ + --set "space=${UPBOUND_SPACE_NAME}" \ + --set "organization=${UPBOUND_ORG_NAME}" \ + --set "tokenSecret=connect-token" \ + --wait +``` + + +#### View your Space in the Console + + +Go to the [Upbound Console][upbound-console], log in, and choose the newly connected Space from the Space selector dropdown. + +![A screenshot of the Upbound Console space selector dropdown](/img/attached-space.png) + +:::note +You can only connect a self-hosted Space to a single organization at a time. +::: + +### Disconnect + +#### With up CLI + +To disconnect a self-hosted Space or a deleted self-hosted Space, run the following command: + +```bash +up space disconnect "${UPBOUND_SPACE_NAME}" +``` + +If the Space still exists, this command uninstalls the Connect agent and deletes the associated service account and permissions. + +#### With Helm + +To disconnect a self-hosted Space or a deleted self-hosted Space, run the following command: + +```bash +helm delete -n upbound-system agent +``` + +Clean up the robot token you created for this self-hosted Space: + +```bash +up robot delete "${UPBOUND_SPACE_NAME}" --force +``` + +## Security model + +### Architecture + +![An architectural diagram of a self-hosted Space attached to Upbound](/img/console-attach-architecture.jpg) + +:::note +This diagram illustrates a self-hosted Space running in AWS connected to the global Upbound Console. The same model applies to a Space running in AKS, GKE, or other Kubernetes environments. +::: + +### Data path + +Upbound uses a Pub/Sub model over TLS to communicate between Upbound's global +console and your self-hosted Space. Self-hosted Spaces establishes a secure +connection with `connect.upbound.io`. `api.upbound.io`, and `auth.upbound.io` and subscribes to an +endpoint. + +:::important +Add `connect.upbound.io`, `api.upbound.io`, and `auth.upbound.io` to your organization's list of +allowed endpoints. +::: + +The +Upbound Console communicates to the Space through that endpoint. The data flow +is: + +1. Users sign in to the Upbound Console, redirecting to authenticate with an organization's configured Identity Provider via SSO. +2. Once authenticated, actions in the Console, like listing control planes or specific resource types from a control plane. These requests post as messages to the Upbound Connect service. +3. A user's self-hosted Space polls the Upbound Connect service periodically for new messages, verifies the authenticity of the message, and fulfills the request contained. +4. A user's self-hosted Space returns the results of the request to the Upbound Connect service and the Console renders the results in the user's browser session. + +**Upbound never stores data originated from a self-hosted Space.** The data is transient and only exposed in the user's browser session. The Console needs this data to render your resources and control planes in the UI. + +### Data transmitted + +Users interact with the Upbound Console to generate request queries to the Upbound Connect Service while exploring, managing, or debugging a self-hosted Space. These requests send data back to the user's browser session in the Console, including: + +* Metadata for the Space +* Metadata for control planes in the state +* Configuration manifests for various resource types within your Space: Crossplane managed resources, composite resources, composite resource claims, Upbound shared secrets, Upbound shared backups, Crossplane providers, ProviderConfigs, Configurations, and Crossplane Composite Functions. + +:::important +This data only concerns resource configuration. The data _inside_ the managed +resource in your Space isn't visible at any point. +::: + +**Upbound can't see your data.** Upbound doesn't have access to session-based data rendered for your users in the Upbound Console. Upbound has no information about your self-hosted Space, other than that you've connected a self-hosted Space. + +### Threat vectors + +Only users with editor or administrative permissions can make changes using the Console like creating or deleting control planes or groups. + + +[enable-the-query-api]: /spaces/howtos/self-hosted/query-api +[enable-upbound-rbac]: /manuals/platform/concepts/authorization/upbound-rbac +[upbound]: /manuals/console/upbound-console +[organization]: /manuals/platform/concepts/identity-management/organizations +[query-api]: /spaces/howtos/self-hosted/query-api +[jq-install]: https://jqlang.org/download/ + +[upbound-console]: https://console.upbound.io diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/billing.md b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/billing.md new file mode 100644 index 000000000..145ff9f03 --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/billing.md @@ -0,0 +1,307 @@ +--- +title: Self-Hosted Space Billing +sidebar_position: 50 +description: A guide for how billing works in an Upbound Space +--- + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Billing models may evolve between versions; see Capacity Licensing for alternative models. + +For version-specific features and capacity-based licensing details, see the . reference specifications, see [Capacity Licensing](/spaces/howtos/self-hosted/capacity-licensing). +::: + +Spaces are a self-hosting feature of Upbound's [flagship product][flagship-product] for platform teams to deploy control planes in their self-managed environments. You can install Spaces into any Kubernetes cluster in your own cloud account, on-premises data center, or on the edge. The pricing usage-based and requires an Upbound account and subscription. The billing unit is a `Loop`. + + +:::info +This guide describes the traditional usage-based billing model using object storage. disconnected or air-gapped environments, consider [Capacity Licensing](/spaces/howtos/self-hosted/capacity-licensing), which provides a simpler fixed-capacity model with local usage tracking. +::: + +## Billing details + +Spaces **aren't connected** to Upbound's global service. To enable proper billing, the Spaces software ships a controller whose responsibility is to collect billing data from your Spaces deployment. The collection and storage of your billing data happens expressly locally within your environment; no data is automatically emitted back to Upbound's global service. This data gets written to object storage of your choice. AWS, Azure, and GCP are currently supported. The Spaces software exports billing usage software every ~15 seconds. + +Spaces customers must periodically provide the billing data to Upbound. Contact your Upbound sales representative to learn more. + + + +## AWS S3 + + + +Configure billing to write to an S3 bucket by providing the following values at install-time. Create an S3 bucket if you don't already have one. + +### IAM policy + +You must create an IAM policy and attach it to the IAM user (for static credentials) or IAM role (for assumed +roles). + +The policy example below enables the necessary S3 permissions: + +```json +{ + "Sid":"EnableS3Permissions", + "Effect":"Allow", + "Action": [ + "s3:PutObject", + "s3:GetObject", + "s3:ListBucket", + "s3:DeleteObject" + ], + "Resource": [ + "arn:aws:s3:::your-bucket-name/*", + "arn:aws:s3:::your-bucket-name" + ] +}, +{ + "Sid": "ListBuckets", + "Effect": "Allow", + "Action": "s3:ListAllMyBuckets", + "Resource": "*" +} +``` + +### Authentication with static credentials + +In your Spaces install cluster, create a secret in the `upbound-system` +namespace. This secret must contain keys `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`. + +```bash +kubectl create secret generic billing-credentials -n upbound-system \ + --from-literal=AWS_ACCESS_KEY_ID= \ + --from-literal=AWS_SECRET_ACCESS_KEY= +``` + +Install the Space software, providing the billing details to the other required values. + + + + + + +```bash {hl_lines="2-6"} +helm -n upbound-system upgrade --install spaces ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=aws" \ + --set "billing.storage.aws.region=" \ + --set "billing.storage.aws.bucket=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + +```bash {hl_lines="2-6"} +up space init ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=aws" \ + --set "billing.storage.aws.region=" \ + --set "billing.storage.aws.bucket=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + + + +### Authentication with an IAM role + + +To use short-lived credentials with an assumed IAM role, create an IAM role with +established trust to the `vector`-serviceaccount in all `mxp-*-system` +namespaces. + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::12345678912:oidc-provider/oidc.eks.eu-west-2.amazonaws.com/id/YOUROIDCPROVIDERID" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringLike": { + "oidc.eks.eu-west-2.amazonaws.com/id/YOUROIDCPROVIDERID:sub": "system:serviceaccount:mxp-*-system:vector" + } + } + } + ] +} +``` + +For more information about workload identities, review the [Workload-identity +Configuration documentation][workload-identity-configuration-documentation] + + + + + + +```bash {hl_lines="2-7"} +helm -n upbound-system upgrade --install spaces ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=aws" \ + --set "billing.storage.aws.region=" \ + --set "billing.storage.aws.bucket=" \ + --set "billing.storage.secretRef.name=" \ + --set "controlPlanes.vector.serviceAccount.customAnnotations[eks.amazonaws.com/role-arn]=" + ... +``` + + + + + +```bash {hl_lines="2-7"} +up space init ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=aws" \ + --set "billing.storage.aws.region=" \ + --set "billing.storage.aws.bucket=" \ + --set "billing.storage.secretRef.name=" \ + --set "controlPlanes.vector.serviceAccount.customAnnotations[eks.amazonaws.com/role-arn]=" + ... +``` + + + + + + +*Note*: You must set `billing.storage.secretRef.name` to an empty string when using an assumed role. + + +## Azure blob storage + +Configure billing to write to a blob in Azure by providing the following values at install-time. Create a storage account and container if you don't already have one. + +Then, on the cluster where you installed the Spaces software, create a secret in `upbound-system`. This secret must contain keys `AZURE_TENANT_ID`, `AZURE_CLIENT_ID`, and `AZURE_CLIENT_SECRET`. Make sure to replace the values with details generated from your Azure account. + +```bash +kubectl create secret generic billing-credentials -n upbound-system \ + --from-literal=AZURE_TENANT_ID= \ + --from-literal=AZURE_CLIENT_ID= \ + --from-literal=AZURE_CLIENT_SECRET= +``` + +Install the Space software, providing the billing details to the other required values. + + + + + + +```bash {hl_lines="2-6"} +helm -n upbound-system upgrade --install spaces ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=azure" \ + --set "billing.storage.azure.storageAccount=" \ + --set "billing.storage.azure.container=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + +```bash {hl_lines="2-6"} +up space init ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=azure" \ + --set "billing.storage.azure.storageAccount=" \ + --set "billing.storage.azure.container=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + + + +## GCP Cloud Storage Buckets + + +Configure billing to write to a Cloud Storage bucket in GCP by providing the following values at install-time. Create a bucket if you don't already have one. + +Then, on the cluster where you installed the Spaces software, create a secret in `upbound-system`. This secret must contain the key `google_application_credentials`. Make sure to replace the value with a GCP service account key JSON generated from your GCP account. + +```bash +kubectl create secret generic billing-credentials -n upbound-system \ + --from-literal=google_application_credentials= +``` + +Install the Space software, providing the billing details to the other required values. + + + + + + +```bash {hl_lines="2-5"} +helm -n upbound-system upgrade --install spaces ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=gcp" \ + --set "billing.storage.gcp.bucket=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + +```bash {hl_lines="2-5"} +up space init ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=gcp" \ + --set "billing.storage.gcp.bucket=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + +## Export billing data to send to Upbound + +To prepare the billing data to send to Upbound, do the following: + +Ensure the current context of your kubeconfig points at the Spaces cluster. Then, run the [export][export] command. + + +:::important +Your current CLI must have read access to the bucket to run this command. +::: + + +The example below exports billing data stored in AWS: + +```bash +up space billing export --provider=aws \ + --bucket=spaces-billing-bucket \ + --account=your-upbound-org \ + --billing-month=2024-07 \ + --force-incomplete +``` + +The command creates a billing report that's zipped up in your current working directory. Send the output to your Upbound sales representative. + + +You can find full instructions and command options in the up [CLI reference][cli-reference] docs. + + +[export]: /reference/cli-reference +[cli-reference]: /reference/cli-reference +[flagship-product]: https://www.upbound.io/platform +[workload-identity-configuration-documentation]: https://docs.upbound.io/operate/accounts/authentication/oidc-configuration diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/capacity-licensing.md b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/capacity-licensing.md new file mode 100644 index 000000000..a1dc6c101 --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/capacity-licensing.md @@ -0,0 +1,591 @@ +--- +title: Capacity Licensing +sidebar_position: 60 +description: A guide for capacity-based licensing in self-hosted Spaces +plan: "enterprise" +--- + + + + + +This guide explains how to configure and monitor capacity-based licensing in +self-hosted Upbound Spaces. Capacity licensing provides a simplified billing +model for disconnected or air-gapped environments where automated usage +reporting isn't possible. + +:::info +Spaces `v1.15` and later support Capacity Licensing as an +alternative to the traditional usage-based billing model described in the +[Self-Hosted Space Billing][space-billing] guide. +::: + +## Overview + +Capacity licensing allows organizations to purchase a fixed capacity of +resources upfront. The Spaces software tracks usage locally and provides +visibility into consumption against your purchased capacity, all without +requiring external connectivity to Upbound's services. + +### Key concepts + +- **Resource Hours**: The primary billing unit representing all resources + managed by Crossplane over time. This includes managed resources, + composites (XRs), claims (XRCs), and all composed resources - essentially + everything Crossplane manages. The system aggregates resource counts over each + hour using trapezoidal integration to accurately account for changes in + resource count throughout the hour. +- **Operations**: The number of Operations invoked by Crossplane. +- **License Capacity**: The total amount of resource hours and operations included in your license. +- **Usage Tracking**: Continuous monitoring of consumption with real-time utilization percentages. + +### How it works + +1. Upbound provides you with a license file containing your purchased capacity +2. You configure a `SpaceLicense` in your Spaces cluster +3. The metering system automatically: + - Collects measurements from all control planes every minute + - Aggregates usage data into hourly intervals + - Stores usage data in a local PostgreSQL database + - Updates the `SpaceLicense` status with current consumption + +## Prerequisites + +### PostgreSQL database + +Capacity licensing requires a PostgreSQL database to store usage measurements. You can use: + +- An existing PostgreSQL instance +- A managed PostgreSQL service (AWS RDS, Azure Database, Google Cloud SQL) +- A PostgreSQL instance deployed in your cluster + +The database must be: + +- Accessible from the Spaces cluster +- Configured with a dedicated database and credentials + +#### Example: Deploy PostgreSQL with CloudNativePG + +If you don't have an existing PostgreSQL instance, you can deploy one in your +cluster using [CloudNativePG] (CNPG). CNPG is a Kubernetes operator that +manages PostgreSQL clusters. + +1. Install the CloudNativePG operator: + +```bash +kubectl apply -f https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.24/releases/cnpg-1.24.1.yaml +``` + +2. Create a PostgreSQL cluster for metering: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: metering-postgres + namespace: upbound-system +spec: + instances: 1 + imageName: ghcr.io/cloudnative-pg/postgresql:16 + bootstrap: + initdb: + database: metering + owner: metering + postInitApplicationSQL: + - ALTER ROLE "metering" CREATEROLE; + storage: + size: 5Gi + # Optional: Configure resources for production use + # resources: + # requests: + # memory: "512Mi" + # cpu: "500m" + # limits: + # memory: "1Gi" + # cpu: "1000m" +--- +apiVersion: v1 +kind: Secret +metadata: + name: metering-postgres-app + namespace: upbound-system + labels: + cnpg.io/reload: "true" +stringData: + username: metering + password: "your-secure-password-here" +type: kubernetes.io/basic-auth +``` + +```bash +kubectl apply -f metering-postgres.yaml +``` + +3. Wait for the cluster to be ready: + +```bash +kubectl wait --for=condition=ready cluster/metering-postgres -n upbound-system --timeout=5m +``` + +4. You can access the PostgreSQL cluster at `metering-postgres-rw.upbound-system.svc.cluster.local:5432`. + +:::tip +For production deployments, consider: +- Increasing `instances` to 3 for high availability +- Configuring [backups] to object storage +- Setting appropriate resource requests and limits +- Using a dedicated storage class with good I/O performance +::: + +### License file + +Contact your Upbound sales representative to obtain a license file for your organization. The license file contains: +- Your unique license ID +- Purchased capacity (resource hours and operations) +- License validity period +- Any usage restrictions (such as cluster UUID pinning) + +## Configuration + +### Step 1: Create database credentials secret + +Create a Kubernetes secret containing your PostgreSQL password using the pgpass format: + +```bash +# Create a pgpass file with format: hostname:port:database:username:password +# Note: The database name and username must be 'metering' +# For CNPG clusters, use the read-write service endpoint: -rw..svc.cluster.local +echo "metering-postgres-rw.upbound-system.svc.cluster.local:5432:metering:metering:your-secure-password-here" > pgpass + +# Create the secret +kubectl create secret generic metering-postgres-credentials \ + -n upbound-system \ + --from-file=pgpass=pgpass + +# Clean up the pgpass file +rm pgpass +``` + +The secret must contain a single key: +- **`pgpass`**: PostgreSQL password file in the format `hostname:port:metering:metering:password` + +:::note +The database name and username are fixed as `metering`. Ensure your PostgreSQL instance has a database named `metering` with a user `metering` that has appropriate permissions. + +If you deployed PostgreSQL using CNPG as shown in the example above, the password should match what you set in the `metering-postgres-app` secret. +::: + +:::tip +For production environments, consider using external secret management solutions: +- [External Secrets Operator][eso] +- Cloud-specific secret managers (AWS Secrets Manager, Azure Key Vault, GCP Secret Manager) +::: + +### Step 2: Enable metering in Spaces + +Enable the metering feature when installing or upgrading Spaces: + + + + + +```bash {hl_lines="2-7"} +helm -n upbound-system upgrade --install spaces ... \ + --set "metering.enabled=true" \ + --set "metering.storage.postgres.connection.url=metering-postgres-rw.upbound-system.svc.cluster.local:5432" \ + --set "metering.storage.postgres.connection.credentials.secret.name=metering-postgres-credentials" \ + --set "metering.interval=1m" \ + --set "metering.workerCount=10" \ + --set "metering.aggregationInterval=1h" \ + --set "metering.measurementRetentionDays=30" + ... +``` + + + + + +```bash {hl_lines="2-7"} +up space init ... \ + --set "metering.enabled=true" \ + --set "metering.storage.postgres.connection.url=metering-postgres-rw.upbound-system.svc.cluster.local:5432" \ + --set "metering.storage.postgres.connection.credentials.secret.name=metering-postgres-credentials" \ + --set "metering.interval=1m" \ + --set "metering.workerCount=10" \ + --set "metering.aggregationInterval=1h" \ + --set "metering.measurementRetentionDays=30" + ... +``` + + + + + +#### Configuration options + +| Option | Default | Description | +|--------|---------|-------------| +| `metering.enabled` | `false` | Enable the metering feature | +| `metering.storage.postgres.connection.url` | - | PostgreSQL host and port (format: `host:port`, required) | +| `metering.storage.postgres.connection.credentials.secret.name` | - | Name of the secret containing PostgreSQL credentials (required) | +| `metering.storage.postgres.connection.sslmode` | `require` | SSL mode for PostgreSQL connection (`disable`, `allow`, `prefer`, `require`, `verify-ca`, `verify-full`) | +| `metering.storage.postgres.connection.ca.name` | - | Name of the secret containing CA certificate for TLS connections (optional) | +| `metering.interval` | `1m` | How often to collect measurements from control planes | +| `metering.workerCount` | `10` | Number of parallel workers for measurement collection | +| `metering.aggregationInterval` | `1h` | How often to aggregate measurements into hourly usage data | +| `metering.measurementRetentionDays` | `30` | Days to retain raw measurements (0 = indefinite) | + + +#### Database sizing and retention + +The metering system uses two PostgreSQL tables to track usage: + +**Raw measurements table** (`measurements`): +- Stores point-in-time snapshots collected every measurement interval (default: 1 minute) +- One row per control plane per interval +- Affected by the `measurementRetentionDays` setting +- Used for detailed auditing and troubleshooting + +**Aggregated usage table** (`hourly_usage`): +- Stores hourly aggregated resource hours and operations per license +- One row per hour per license +- Never deleted (required for accurate license tracking) +- Grows much slower than raw measurements + +##### Storage sizing guidelines + +Estimate your PostgreSQL storage needs based on these factors: + + +| Deployment Size | Control Planes | Measurement Interval | Retention Days | Raw Measurements | Indexes & Overhead | Total Storage | +|----------------|----------------|---------------------|----------------|------------------|-------------------|---------------| +| Small | 10 | 1m | 30 | ~85 MB | ~40 MB | **~125 MB** | +| Medium | 50 | 1m | 30 | ~430 MB | ~215 MB | **~645 MB** | +| Large | 200 | 1m | 30 | ~1.7 GB | ~850 MB | **~2.5 GB** | +| Large (90-day retention) | 200 | 1m | 90 | ~5.2 GB | ~2.6 GB | **~7.8 GB** | + +The aggregated hourly usage table adds minimal overhead (~50 KB per year per license). + +**Formula for custom calculations**: +``` +Daily measurements per control plane = (24 * 60) / interval_minutes +Total rows = control_planes × daily_measurements × retention_days +Storage (MB) ≈ (total_rows × 200 bytes) / 1,048,576 × 1.5 (with indexes) +``` + +##### Retention behavior + +The `measurementRetentionDays` setting controls retention of raw measurement data: + +- **Default: 30 days** - Balances audit capabilities with storage efficiency +- **Set to 0**: Disables cleanup, retains all raw measurements indefinitely +- **Cleanup runs**: Every aggregation interval (default: hourly) +- **What's kept forever**: Aggregated hourly usage data (needed for license tracking) +- **What's cleaned up**: Raw point-in-time measurements older than retention period + +**Recommendations**: +- **30 days**: For most troubleshooting and short-term auditing +- **60 to 90 days**: For environments requiring extended audit trails +- **Unlimited (0)**: Only for environments with ample storage or specific compliance requirements + +:::note +Increasing retention period linearly increases storage requirements for raw measurements. The aggregated hourly data is always retained regardless of this setting. +::: + +### Step 3: Apply your license + +Use the `up` CLI to apply your license file: + +```bash +up space license apply /path/to/license.json +``` + +This command automatically: +- Creates a secret containing your license file in the `upbound-system` namespace +- Creates the `SpaceLicense` resource configured to use that secret + +:::tip +You can specify a different namespace for the license secret using the `--namespace` flag: +```bash +up space license apply /path/to/license.json --namespace my-namespace +``` +::: + +
+Alternative: Manual kubectl approach + +If you prefer not to use the `up` CLI, you can manually create the resources: + +1. Create the license secret: + +```bash +kubectl create secret generic space-license \ + -n upbound-system \ + --from-file=license.json=/path/to/license.json +``` + +2. Create the SpaceLicense resource: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceLicense +metadata: + name: space +spec: + secretRef: + name: space-license + namespace: upbound-system + key: license.json +``` + +```bash +kubectl apply -f spacelicense.yaml +``` + +:::important +You **must** name the `SpaceLicense` resource `space`. This resource is a singleton and only one can exist in the cluster. +::: + +
+ +## Monitoring usage + +### Check license status + +Use the `up` CLI to view your license details and current usage: + +```bash +up space license show +``` + +Example output: + +``` +Spaces License Status: Valid (License is valid) + +Created: 2024-01-01T00:00:00Z +Expires: 2025-01-01T00:00:00Z + +Plan: enterprise + +Resource Hour Limit: 1000000 +Operation Limit: 500000 + +Enabled Features: +- spaces +- query-api +- backup-restore +``` + +The output shows: +- License validity status and any validation messages +- Creation and expiration dates +- Your commercial plan tier +- Capacity limits for resource hours and operations +- Enabled features in your license +- Any restrictions (such as cluster UUID pinning) + +
+Alternative: View detailed status with kubectl + +For detailed information including usage statistics, use kubectl: + +```bash +kubectl get spacelicense space -o yaml +``` + +Example output showing usage data: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceLicense +metadata: + name: space +spec: + secretRef: + name: space-license + namespace: upbound-system +status: + conditions: + - type: LicenseValid + status: "True" + reason: Valid + message: "License is valid" + id: "lic_abc123xyz" + plan: "enterprise" + capacity: + resourceHours: 1000000 + operations: 500000 + usage: + resourceHours: 245680 + operations: 12543 + resourceHoursUtilization: "24.57%" + operationsUtilization: "2.51%" + firstMeasurement: "2024-01-15T10:00:00Z" + lastMeasurement: "2024-02-10T14:30:00Z" + createdAt: "2024-01-01T00:00:00Z" + expiresAt: "2025-01-01T00:00:00Z" + enabledFeatures: + - "spaces" + - "query-api" + - "backup-restore" +``` + +
+ +### Understanding the status fields + +| Field | Description | +|-------|-------------| +| `status.id` | Unique license identifier | +| `status.plan` | Your commercial plan (community, standard, enterprise) | +| `status.capacity` | Total capacity included in your license | +| `status.usage.resourceHours` | Total resource hours consumed | +| `status.usage.operations` | Total operations performed | +| `status.usage.resourceHoursUtilization` | Percentage of resource hours capacity used | +| `status.usage.operationsUtilization` | Percentage of operations capacity used | +| `status.usage.firstMeasurement` | When usage tracking began | +| `status.usage.lastMeasurement` | Most recent usage update | +| `status.expiresAt` | License expiration date | + +### Monitor with kubectl + +Watch your license utilization in real-time: + +```bash +kubectl get spacelicense space -w +``` + +Short output format: + +``` +NAME PLAN VALID REASON AGE +space enterprise True Valid 45d +``` + +## Managing licenses + +### Updating your license + +To update your license with a new license file (for example, when renewing or upgrading capacity), apply the new license: + +```bash +up space license apply /path/to/new-license.json +``` + +This command replaces the existing license secret and updates the SpaceLicense resource. + +### Removing a license + +To remove a license: + +```bash +up space license remove +``` + +This command: +- Prompts for confirmation before proceeding +- Removes the license secret + +To skip the confirmation prompt, use the `--force` flag: + +```bash +up space license remove --force +``` + +## Troubleshooting + +### License not updating + +If the license status doesn't update with usage data: + +1. **Check metering controller logs**: + ```bash + kubectl logs -n upbound-system deployment/spaces-controller -c metering + ``` + +2**Check if the system captures your measurements**: + + ```bash + # Connect to PostgreSQL and query the measurements table + kubectl exec -it -- psql -U -d \ + -c "SELECT COUNT(*) FROM measurements WHERE timestamp > NOW() - INTERVAL '1 hour';" + ``` + +### High utilization warnings + +If you're approaching your capacity limits: + +1. **Review resource usage** by control plane to identify high consumers +2. **Contact your Upbound sales representative** to discuss capacity expansion +3. **Optimize managed resources** by cleaning up unused resources + +### License validation failures + +If your license shows as invalid: + +1. **Check expiration date**: `kubectl get spacelicense space -o jsonpath='{.status.expiresAt}'` +2. **Verify license file integrity**: Ensure the secret contains valid JSON +3. **Check for cluster UUID restrictions**: Upbound pins some licenses to + specific clusters +4. **Review controller logs** for detailed error messages + +## Differences from traditional billing + +### Capacity licensing + +- ✅ Works in disconnected environments +- ✅ Provides real-time usage visibility +- ✅ No manual data export required +- ✅ Requires PostgreSQL database +- ✅ Fixed capacity model + +### Traditional billing (object storage) + + +- ❌ Requires periodic manual export +- ❌ Delayed visibility into usage +- ✅ Works with S3/Azure Blob/GCS +- ❌ Requires cloud storage access +- ✅ Pay-as-you-go model + +## Best practices + +### Database management + +1. **Regular backups**: Back up your metering database regularly to preserve usage history +2. **Monitor database size**: Set appropriate retention periods to manage storage growth +3. **Use managed databases**: Consider managed PostgreSQL services for production +4. **Connection pooling**: Use connection pooling for better performance at scale + +### License management + +1. **Monitor utilization**: Set up alerts before reaching 80% capacity +2. **Plan renewals early**: Start renewal discussions 60 days before expiration +3. **Track grace periods**: Note the `gracePeriodEndsAt` date for planning +4. **Secure license files**: Treat license files as sensitive credentials + +### Operational monitoring + +1. **Set up dashboards**: Create Grafana dashboards for usage trends +2. **Enable alerting**: Configure alerts for high utilization and expiration +3. **Regular audits**: Periodically review usage patterns across control planes +4. **Capacity planning**: Use historical data to predict future capacity needs + +## Next steps + +- Learn about [Observability] to monitor your Spaces deployment +- Explore [Backup and Restore][backup-restore] to protect your control plane data +- Review [Self-Hosted Space Billing][space-billing] for the traditional billing model +- Contact [Upbound Sales][sales] to discuss capacity licensing options + + +[space-billing]: /spaces/howtos/self-hosted/billing +[CloudNativePG]: https://cloudnative-pg.io/ +[backups]: https://cloudnative-pg.io/documentation/current/backup_recovery/ +[backup-restore]: /spaces/howtos/backup-and-restore +[sales]: https://www.upbound.io/contact +[eso]: https://external-secrets.io/ +[Observability]: /spaces/howtos/observability + + diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/certs.md b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/certs.md new file mode 100644 index 000000000..e517c250e --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/certs.md @@ -0,0 +1,274 @@ +--- +title: Istio Ingress Gateway With Custom Certificates +sidebar_position: 20 +description: Install self hosted spaces using istio ingress gateway in a Kind cluster +--- + +:::important +Prerequisites + +- Spaces Token available in a file +- `docker login xpkg.upbound.io -u -p ` +- [`istioctl`][istioctl] installation +- `jq` installation +::: + +This document describes the installation of a self hosted space on an example `kind` +cluster along with Istio Ingress Gateway and certificates. The service mesh and certificates +installation is transferable to self hosted spaces in arbitrary clouds. + +## Create a kind cluster + +```shell +cat < +## Install Istio + + + +:::important +This is an example and not recommended for use in production. +::: + + +1. Create the `istio-values.yaml` file + +```shell +cat > istio-values.yaml << 'EOF' +apiVersion: install.istio.io/v1alpha1 +kind: IstioOperator +spec: + hub: gcr.io/istio-release + components: + ingressGateways: + - enabled: true + name: istio-ingressgateway + k8s: + nodeSelector: + ingress-ready: "true" + overlays: + - apiVersion: apps/v1 + kind: Deployment + name: istio-ingressgateway + patches: + - path: spec.template.spec.containers.[name:istio-proxy].ports + value: + - containerPort: 8080 + hostPort: 80 + - containerPort: 8443 + hostPort: 443 +EOF +``` + +2. Install istio via `istioctl` + +```shell +istioctl install -f istio-values.yaml +``` + +## Create a self-signed Certificate via cert-manager + +:::important +This Certificate manifest creates a self-signed certificate for a proof of concept +environment and isn't recommended for production use cases. +::: + +1. Create the upbound-system namespace + +```shell +kubectl create namespace upbound-system +``` + +2. Create a self-signed certificate + +```shell +cat < +## Create an Istio Gateway and VirtualService + + + + +Configure an Istio Gateway and VirtualService to use TLS passthrough. + + +```shell +cat < spaces-values.yaml << 'EOF' +# Configure spaces-router to use the TLS secret created by cert-manager. +externalTLS: + tlsSecret: + name: example-tls-secret + caBundleSecret: + name: example-tls-secret + key: ca.crt +ingress: + provision: false + # Allow Istio Ingress Gateway to communicate to the spaces-router + namespaceLabels: + kubernetes.io/metadata.name: istio-system + podLabels: + app: istio-ingressgateway + istio: ingressgateway +EOF +``` + +2. Set the required environment variables + +```shell +# Update these according to your account/token file +export SPACES_TOKEN_PATH= +export UPBOUND_ACCOUNT= +# Replace SPACES_ROUTER_HOST with your Spaces ingress hostname +export SPACES_ROUTER_HOST="proxy.upbound-127.0.0.1.nip.io" +export SPACES_VERSION="1.14.1" +``` + +3. Create an image pull secret for Spaces + +```shell +kubectl -n upbound-system create secret docker-registry upbound-pull-secret \ + --docker-server=https://xpkg.upbound.io \ + --docker-username="$(jq -r .accessId $SPACES_TOKEN_PATH)" \ + --docker-password="$(jq -r .token $SPACES_TOKEN_PATH)" +``` + +4. Install the Spaces helm chart + +```shell +# Login to xpkg.upbound.io +jq -r .token $SPACES_TOKEN_PATH | helm registry login xpkg.upbound.io -u $(jq -r .accessId $SPACES_TOKEN_PATH) --password-stdin + +# Install spaces helm chart +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "authentication.hubIdentities=true" \ + --set "authorization.hubRBAC=true" \ + --wait -f spaces-values.yaml +``` + +## Validate the installation + +Successful access of the `up` command to interact with your self hosted space validates the +certificate installation. + +- `up ctx .` + +You can also issue control plane creation, list and deletion commands. + +- `up ctp create cert-test` +- `up ctp list` +- `up ctx disconnected/kind-kind/default/cert-test && kubectl get namespace` +- `up ctp delete cert-test` + +:::note +If `up` can't connect to your control plane, follow [this guide to create a new profile][up-profile]. +::: + +## Troubleshooting + +Examine your certificate with `openssl`: + +```shell +openssl s_client -connect proxy.upbound-127.0.0.1.nip.io:443 -showcerts +``` + +[istioctl]: https://istio.io/latest/docs/ops/diagnostic-tools/istioctl/ +[up-profile]: /manuals/cli/howtos/profile-config/ diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/configure-ha.md b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/configure-ha.md new file mode 100644 index 000000000..ddf36c55e --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/configure-ha.md @@ -0,0 +1,450 @@ +--- +title: Production Scaling and High Availability +description: Configure your Self-Hosted Space for production +sidebar_position: 5 +--- + + + +This guide explains how to configure an existing Upbound Space deployment for +production operation at scale. + +Use this guide when you're ready to deploy production scaling, high availability, +and monitoring in your Space. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. + +For API specifications on ControlPlane resources and configurations, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +## Prerequisites + +Before you begin scaling your Spaces deployment, make sure you have: + + +* A working Space deployment +* Cluster administrator access +* An understanding of load patterns and growth in your organization +* A familiarity with node affinity, tainting, and Horizontal Pod Autoscaling + (HPA) + + +## Production scaling strategy + + +In this guide, you will: + + + +* Create dedicated node pools for different component types +* Configure high-availability to ensure there are no single points of failure +* Set dynamic scaling for variable workloads +* Optimize your storage and component operations +* Monitor your deployment health and performance + +## Spaces architecture + +The basic Spaces workflow follows the pattern below: + + +![Spaces workflow][spaces-workflow] + +## Node architecture + +You can mitigate resource contention and improve reliability by separating system +components into dedicated node pools. + +### `etcd` dedicated nodes + +`etcd` performance directly impacts your entire Space, so isolate it for +consistent performance. + +1. Create a dedicated `etcd` node pool + + **Requirements:** + - **Minimum**: 3 nodes for HA + - **Instance type**: General purpose with high network throughput/low latency + - **Storage**: High performance storage (`etcd` is I/O sensitive) + +2. Taint `etcd` nodes to reserve them + + ```bash + kubectl taint nodes target=etcd:NoSchedule + ``` + +3. Configure `etcd` storage + + `etcd` is sensitive to storage I/O performance. Review the [`etcd` scaling + documentation][scaling] + for specific storage guidance. + +### API server dedicated nodes + +API servers handle all control plane requests and should run on dedicated +infrastructure. + +1. Create dedicated API server nodes + + **Requirements:** + - **Minimum**: 2 nodes for HA + - **Instance type**: Compute-optimized, memory-optimized, or general-purpose + - **Scaling**: Scale vertically based on API server load patterns + +2. Taint API server nodes + + ```bash + kubectl taint nodes target=apiserver:NoSchedule + ``` + +### Configure cluster autoscaling + +Enable cluster autoscaling for all node pools. + +For AWS EKS clusters, Upbound recommends using [`Karpenter`][karpenter] for +improved bin-packing and instance type selection. + +For GCP GKE clusters, follow the [GKE autoscaling][gke-autoscaling] guide. + +For Azure AKS clusters, follow the [AKS autoscaling][aks-autoscaling] guide. + + +## Configure high availability + +Ensure control plane components can survive node and zone failures. + +### Enable high availability mode + +1. Configure control planes for high availability + + ```yaml + controlPlanes: + ha: + enabled: true + ``` + + This configures control plane pods to run with multiple replicas and + associated pod disruption budgets. + +### Configure component distribution + +1. Set up API server pod distribution + + ```yaml + controlPlanes: + vcluster: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: target + operator: In + values: + - apiserver + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vcluster + topologyKey: "kubernetes.io/hostname" + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vcluster + topologyKey: topology.kubernetes.io/zone + weight: 100 + ``` + +2. Configure `etcd` pod distribution + + ```yaml + controlPlanes: + etcd: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: target + operator: In + values: + - etcd + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vcluster-etcd + topologyKey: "kubernetes.io/hostname" + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vcluster-etcd + topologyKey: topology.kubernetes.io/zone + weight: 100 + ``` + +### Configure tolerations + +Allow control plane pods to schedule on the tainted dedicated nodes (available +in Spaces v1.14+). + +1. Add tolerations for `etcd` pods + + ```yaml + controlPlanes: + etcd: + tolerations: + - key: "target" + operator: "Equal" + value: "etcd" + effect: "NoSchedule" + ``` + +2. Add tolerations for API server pods + + ```yaml + controlPlanes: + vcluster: + tolerations: + - key: "target" + operator: "Equal" + value: "apiserver" + effect: "NoSchedule" + ``` + + +## Configure autoscaling for Spaces components + + +Set up the Spaces system components to handle variable load automatically. + +### Scale API and `apollo` services + +1. Configure minimum replicas for availability + + ```yaml + api: + replicaCount: 2 + + features: + alpha: + apollo: + enabled: true + replicaCount: 2 + ``` + + Both services support horizontal and vertical scaling based on load patterns. + +### Configure router autoscaling + +The `spaces-router` is the entry point for all traffic and needs intelligent +scaling. + + +1. Enable Horizontal Pod Autoscaler + + ```yaml + router: + hpa: + enabled: true + minReplicas: 2 + maxReplicas: 8 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 80 + ``` + +2. Monitor scaling factors + + **Router scaling behavior:** + - **Vertical scaling**: Scales based on number of control planes + - **Horizontal scaling**: Scales based on request volume + - **Resource monitoring**: Monitor CPU and memory usage + + + +### Configure controller scaling + +The `spaces-controller` manages Space-level resources and requires vertical +scaling. + +1. Configure adequate resources with headroom + + ```yaml + controller: + resources: + requests: + cpu: "500m" + memory: "1Gi" + limits: + cpu: "2000m" + memory: "4Gi" + ``` + + **Important**: The controller can spike when reconciling large numbers of + control planes, so provide adequate headroom for resource spikes. + +## Set up production storage + + +### Configure Query API database + + +1. Use a managed PostgreSQL database + + **Recommended services:** + - [AWS RDS][rds] + - [Google Cloud SQL][gke-sql] + - [Azure Database for PostgreSQL][aks-sql] + + **Requirements:** + - Minimum 400 IOPS performance + + +## Monitoring + + + +Monitor key metrics to ensure healthy scaling and identify issues quickly. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +### Control plane health + +Track these `spaces-controller` metrics: + +1. **Total control planes** + + ``` + spaces_control_plane_exists + ``` + + Tracks the total number of control planes in the system. + +2. **Degraded control planes** + + ``` + spaces_control_plane_degraded + ``` + + Returns control planes that don't have a `Synced`, `Ready`, and + `Healthy` state. + +3. **Stuck control planes** + + ``` + spaces_control_plane_stuck + ``` + + Control planes stuck in a provisioning state. + +4. **Deletion issues** + + ``` + spaces_control_plane_deletion_stuck + ``` + + Control planes stuck during deletion. + +### Alerting + +Configure alerts for critical scaling and health metrics: + +- **High error rates**: Alert when 4xx/5xx response rates exceed thresholds +- **Control plane health**: Alert when degraded or stuck control planes exceed acceptable counts + +## Architecture overview + +**Spaces System Components:** + +- **`spaces-router`**: Entry point for all endpoints, dynamically builds routes to control plane API servers +- **`spaces-controller`**: Reconciles Space-level resources, serves webhooks, works with `mxp-controller` for provisioning +- **`spaces-api`**: API for managing groups, control planes, shared secrets, and telemetry objects (accessed only through spaces-router) +- **`spaces-apollo`**: Hosts the Query API, connects to PostgreSQL database populated by `apollo-syncer` pods + + +**Control Plane Components (per control plane):** +- **`mxp-controller`**: Handles provisioning tasks, serves webhooks, installs UXP and `XGQL` +- **`XGQL`**: GraphQL API powering console views +- **`kube-state-metrics`**: Collects usage metrics for billing (updated by `mxp-controller` when CRDs change) +- **`vector`**: Works with `kube-state-metrics` to send usage data to external storage for billing +- **`apollo syncer`**: Syncs `etcd` data into PostgreSQL for the Query API + + +### `up ctx` workflow + + + up ctx workflow diagram + + +### Access a control plane API server via kubectl + + + kubectl workflow diagram + + +### Query API/Apollo + + + query API workflow diagram + + +## See also + +* [Upbound Spaces deployment requirements][deployment] +* [Upbound `etcd` scaling resources][scaling] + +[up-ctx-workflow]: /img/up-ctx-workflow.png +[kubectl]: /img/kubectl-workflow.png +[query-api]: /img/query-api-workflow.png +[spaces-workflow]: /img/up-basic-flow.png +[rds]: https://aws.amazon.com/rds/postgresql/ +[gke-sql]: https://cloud.google.com/kubernetes-engine/docs/tutorials/stateful-workloads/postgresql +[aks-sql]: https://learn.microsoft.com/en-us/azure/aks/deploy-postgresql-ha?tabs=azuredisk +[deployment]: https://docs.upbound.io/spaces/howtos/self-hosted/deployment-reqs/ +[karpenter]: https://docs.aws.amazon.com/eks/latest/best-practices/karpenter.html +[gke-autoscaling]: https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-autoscaler +[aks-autoscaling]: https://learn.microsoft.com/en-us/azure/aks/cluster-autoscaler-overview +[scaling]: https://docs.upbound.io/deploy/self-hosted-spaces/scaling-resources#scaling-etcd-storage diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/controllers.md b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/controllers.md new file mode 100644 index 000000000..692740638 --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/controllers.md @@ -0,0 +1,389 @@ +--- +title: Controllers +weight: 250 +description: A guide to how to wrap and deploy an Upbound controller into control planes on Upbound. +--- + +:::important +This feature is in private preview for select customers in Upbound Spaces. If you're interested in this feature, please [contact us](https://www.upbound.io/contact-us). +::: + +Upbound's _Controllers_ feature lets you build and deploy control plane software from the Kubernetes ecosystem. With the _Controllers_ feature, you're not limited to just managing resource types defined by Crossplane. Now you can create resources from _CustomResourceDefinitions_ defined by other Kubernetes ecosystem tooling. + +This guide explains how to bundle and deploy control plane software from the Kubernetes ecosystem on a control plane in Upbound. + +## Benefits + +The Controllers feature provides the following benefits: + +* Deploy control plane software from the Kubernetes ecosystem. +* Use your control plane's package manager to handle the lifecycle of the control plane software and define dependencies between package. +* Build powerful compositions that combine both Crossplane and Kubernetes _CustomResources_. + +## How it works + +A _Controller_ is a package type that bundles control plane software from the Kubernetes ecosystem. Examples of such software includes: + +- Kubernetes policy engines +- CI/CD tooling +- Your own private custom controllers defined by your organization + +You build a _Controller_ package by wrapping a helm chart along with its requisite _CustomResourceDefinitions_. Your _Controller_ package gets pushed to an OCI registry, and from there you can apply it to a control plane like you would any other Crossplane package. Your control plane's package manager is responsible for managing the lifecycle of the software once applied. + +## Prerequisites + +Enable the Controllers feature in the Space you plan to run your control plane in: + +- Cloud Spaces: Not available yet +- Connected Spaces: Space administrator must enable this feature +- Disconnected Spaces: Space administrator must enable this feature + +Packaging a _Controller_ requires [up CLI][cli] `v0.39.0` or later. + + + +## Build a _Controller_ package + + + +_Controllers_ are a package type that get administered by your control plane's package manager. + +### Prepare the package + +To define a _Controller_, you need a Helm chart. This guide assumes the control plane software you want to build into a _Controller_ already has a Helm chart available. + +Start by making a working directory to assemble the necessary parts: + +```ini +mkdir controller-package +cd controller-package +``` + +Inside the working directory, pull the Helm chart: + +```shell +export CHART_REPOSITORY= +export CHART_NAME= +export CHART_VERSION= + +helm pull $CHART_NAME --repo $CHART_REPOSITORY --version $CHART_VERSION +``` + +Be sure to update the Helm chart repository, name, and version with your own. + +Move the Helm chart into its own folder: + +```ini +mkdir helm +mv $CHART_NAME-$CHART_VERSION.tgz helm/chart.tgz +``` + +Unpack the CRDs from the Helm chart into their own directory: + +```shell +export RELEASE_NAME= +export RELEASE_NAMESPACE= + +mkdir crds +helm template $RELEASE_NAME helm/chart.tgz -n $RELEASE_NAMESPACE --include-crds | \ + yq e 'select(.kind == "CustomResourceDefinition")' - | \ + yq -s '("crds/" + .metadata.name + ".yaml")' - +``` +Be sure to update the Helm release name, and namespace with your own. + +:::info +The instructions above assume your CRDs get deployed as part of your Helm chart. If they're deployed another way, you need to manually copy your CRDs instead. +::: + +Create a `crossplane.yaml` with your controller metadata: + +```yaml +cat < crossplane.yaml +apiVersion: meta.pkg.upbound.io/v1alpha1 +kind: Controller +metadata: + annotations: + friendly-name.meta.crossplane.io: Controller + meta.crossplane.io/description: | + A brief description of what the controller does. + meta.crossplane.io/license: Apache-2.0 + meta.crossplane.io/maintainer: + meta.crossplane.io/readme: | + An explanation of your controller. + meta.crossplane.io/source: + name: +spec: + packagingType: Helm + helm: + releaseName: + releaseNamespace: + # Value overrides for the helm release can be provided below. + # values: + # foo: bar +EOF +``` + +Your controller's file structure should look like this: + +```ini +. +├── crds +│ ├── your-crd.yaml +│ ├── second-crd.yaml +│ └── another-crd.yaml +├── crossplane.yaml +└── helm + └── chart.tgz +``` + +### Package and push the _Controller_ + +At the root of your controller's working directory, build the contents into an xpkg: + +```ini +up xpkg build +``` + +This causes an xpkg to get saved to your current directory with a name like `controller-f7091386b4c0.xpkg`. + +Push the package to your desired OCI registry: + +```shell +export UPBOUND_ACCOUNT= +export CONTROLLER_NAME= +export CONTROLLER_VERSION= +export XPKG_FILENAME= + +up xpkg push xpkg.upbound.io/$UPBOUND_ACCOUNT/$CONTROLLER_NAME:$CONTROLLER_VERSION -f $XPKG_FILENAME +``` + + + +## Deploy a _Controller_ package + + + +:::important +_Controllers_ are only installable on control planes running Crossplane `v1.19.0` or later. +::: + +Set your kubecontext to the desired control plane in Upbound. Change the package path to the OCI registry you pushed it to. Then, deploy the _Controller_ directly: + +```shell +export CONTROLLER_NAME= +export CONTROLLER_VERSION= + +cat < crossplane.yaml +apiVersion: meta.pkg.upbound.io/v1alpha1 +kind: Controller +metadata: + annotations: + friendly-name.meta.crossplane.io: Controller ArgoCD + meta.crossplane.io/description: | + The ArgoCD Controller enables continuous delivery and declarative configuration + management for Kubernetes applications using GitOps principles. + meta.crossplane.io/license: Apache-2.0 + meta.crossplane.io/maintainer: Upbound Maintainers + meta.crossplane.io/readme: | + ArgoCD is a declarative GitOps continuous delivery tool for Kubernetes that + follows the GitOps methodology to manage infrastructure and application + configurations. + meta.crossplane.io/source: https://github.com/argoproj/argo-cd + name: argocd +spec: + packagingType: Helm + helm: + releaseName: argo-cd + releaseNamespace: argo-system + # values: + # foo: bar +EOF +``` + +Your controller's file structure should look like this: + +```ini +. +├── crds +│ ├── applications.argoproj.io.yaml +│ ├── applicationsets.argoproj.io.yaml +│ └── appprojects.argoproj.io.yaml +├── crossplane.yaml +└── helm + └── chart.tgz +``` + +### Package and push controller-argocd + +At the root of your controller's working directory, build the contents into an xpkg: + +```ini +up xpkg build +``` + +This causes an xpkg to get saved to your current directory with a name like `argocd-f7091386b4c0.xpkg`. + +Push the package to your desired OCI registry: + +```shell +export UPBOUND_ACCOUNT= +export CONTROLLER_NAME=controller-argocd +export CONTROLLER_VERSION=v7.8.8 +export XPKG_FILENAME= + +up xpkg push --create xpkg.upbound.io/$UPBOUND_ACCOUNT/$CONTROLLER_NAME:$CONTROLLER_VERSION -f $XPKG_FILENAME +``` + +### Deploy controller-argocd to a control plane + +Set your kubecontext to the desired control plane in Upbound. Change the package path to the OCI registry you pushed it to. Then, deploy the _Controller_ directly: + +```ini +cat < + +## Frequently asked questions + +
+Can I package any software or are there any prerequisites to be a Controller? + +We define a *Controller* as a software that has at least one Custom Resource Definition (CRD) and a Kubernetes controller for that CRD. This is the minimum requirement to be a *Controller*. We have some checks to enforce this at packaging time. + +
+ +
+How can I package my software as a Controller? + +Currently, we support Helm charts as the underlying package format for *Controllers*. As long as you have a Helm chart, you can package it as a *Controller*. + +If you don't have a Helm chart, you can't deploy the software. We only support Helm charts as the underlying package format for *Controllers*. We may extend this to support other packaging formats like Kustomize in the future. + +
+ +
+Can I package Crossplane XRDs/Compositions as a Helm chart to deploy as a Controller? + +This is not recommended. For packaging Crossplane XRDs/ and Compositions, we recommend using the `Configuration` package format. A helm chart only with Crossplane XRDs/Compositions does not qualify as a *Controller*. + +
+ +
+How can I override the Helm values when deploying a Controller? + +Overriding the Helm values is possible at two levels: +- During packaging time, in the package manifest file. +- At runtime, using a `ControllerRuntimeConfig` resource (similar to Crossplane `DeploymentRuntimeConfig`). + +
+ +
+How can I configure the helm release name and namespace for the controller? + +Right now, it is not possible to configure this at runtime. The package author configures release name and namespace during packaging, so it is hardcoded inside the package. Unlike a regular application that is deployed by a Helm chart, *Controllers* can only be deployed once in a given control plane, so, we hope it should be ok to rely on predefined release names and namespaces. We may consider exposing these in `ControllerRuntimeConfig` later, but, we would like to keep it opinionated unless there are strong reasons to do so. + +
+ +
+Can I deploy more than one instance of a Controller package? + +No, this is not possible. Remember, a *Controller* package introduces CRDs which are cluster-scoped objects. Just like one cannot deploy more than one instance of the same Crossplane Provider package today, it is not possible to deploy more than one instance of a *Controller*. + +
+ +
+Do I need a specific Crossplane version to run Controllers? + +Yes, you need to use Crossplane v1.19.0 or later to use *Controllers*. This is because of the changes in the Crossplane codebase to support third-party package formats in dependencies. + +Spaces `v1.12.0` supports Crossplane `v1.19` in the *Rapid* release channel. + +
+ +
+Can I deploy Controllers outside of an Upbound control plane? With UXP? + +No, *Controllers* are a proprietary package format and are only available for control planes running in Spaces hosting environments in Upbound. + +
+ + +[cli]: /manuals/uxp/overview + diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/ctp-audit-logs.md b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/ctp-audit-logs.md new file mode 100644 index 000000000..52f52c776 --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/ctp-audit-logs.md @@ -0,0 +1,549 @@ +--- +title: Control plane audit logging +--- + +This guide explains how to enable and configure audit logging for control planes +in Self-Hosted Upbound Spaces. + +Starting in Spaces `v1.14.0`, each control plane contains an API server that +supports audit log collection. You can use audit logging to track creation, +updates, and deletions of Crossplane resources. Control plane audit logs +use observability features to collect audit logs with `SharedTelemetryConfig` and +send logs to an OpenTelemetry (`OTEL`) collector. + +:::info API Version Information +This guide applies to **Spaces v1.14.0 and later** (Self-Hosted only). Audit logging is not available in earlier versions. + +For API specifications on observability resources, see the [Spaces API Reference - v1.14](../../../../../1.14/reference/apis/spaces-api/). details on observability evolution across versions, see the . +::: + +## Prerequisites + +Before you begin, make sure you have: + +* Spaces `v1.14.0` or greater +* Admin access to your Spaces host cluster +* `kubectl` configured to access the host cluster +* `helm` installed +* `yq` installed +* `up` CLI installed and logged in to your organization + +## Enable observability + + +Observability graduated to General Available in `v1.14.0` but is disabled by +default. + + + + + +### Before `v1.14` +To enable the GA Observability feature, upgrade your Spaces installation to `v1.14.0` +or later and update your installation setting to the new flag: + +```diff +helm upgrade spaces upbound/spaces -n upbound-system \ +- --set "features.alpha.observability.enabled=true" ++ --set "observability.enabled=true" +``` + + + +### After `v1.14` + +To enable the GA Observability feature for `v1.14.0` and later, pass the feature +flag: + +```sh +helm upgrade spaces upbound/spaces -n upbound-system \ + --set "observability.enabled=true" + +``` + + + + +To confirm Observability is enabled, run the `helm get values` command: + + +```shell +helm get values --namespace upbound-system spaces | yq .observability +``` + +Your output should return: + +```shell-noCopy + enabled: true +``` + +## Install an observability backend + +:::note +If you already have an observability backend in your environment, skip to the +next section. +::: + + +For this guide, you'll use Grafana's `docker-otel-lgtm` bundle to validate audit log +generation. production environments, configure a dedicated observability +backend like Datadog, Splunk, or an enterprise-grade Grafana stack. + + + +First, make sure your `kubectl` context points to your Spaces host cluster: + +```shell +kubectl config current-context +``` + +The output should return your cluster name. + +Next, install `docker-otel-lgtm` as a deployment using port-forwarding to +connect to Grafana. Create a manifest file and paste the +following configuration: + +```yaml title="otel-lgtm.yaml" +apiVersion: v1 +kind: Namespace +metadata: + name: observability +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: otel-lgtm + name: otel-lgtm + namespace: observability +spec: + ports: + - name: grpc + port: 4317 + protocol: TCP + targetPort: 4317 + - name: http + port: 4318 + protocol: TCP + targetPort: 4318 + - name: grafana + port: 3000 + protocol: TCP + targetPort: 3000 + selector: + app: otel-lgtm +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: otel-lgtm + labels: + app: otel-lgtm + namespace: observability +spec: + replicas: 1 + selector: + matchLabels: + app: otel-lgtm + template: + metadata: + labels: + app: otel-lgtm + spec: + containers: + - name: otel-lgtm + image: grafana/otel-lgtm + ports: + - containerPort: 4317 + - containerPort: 4318 + - containerPort: 3000 +``` + +Next, apply the manifest: + +```shell +kubectl apply --filename otel-lgtm.yaml +``` + +Your output should return the resources: + +```shell +namespace/observability created + service/otel-lgtm created + deployment.apps/otel-lgtm created +``` + +To verify your resources deployed, use `kubectl get` to display resources with +an `ACTIVE` or `READY` status. + +Next, forward the Grafana port: + +```shell +kubectl port-forward svc/otel-lgtm --namespace observability 3000:3000 +``` + +Now you can access the Grafana UI at http://localhost:3000. + + +## Create an audit-enabled control plane + +To enable audit logging for a control plane, you need to label it so the +`SharedTelemetryConfig` can identify and apply audit settings. This section +creates a new control plane with the `audit-enabled: "true"` label. The +`audit-enabled: "true"` label marks this control plane for audit logging. The +`SharedTelemetryConfig` (created in the next section) finds control planes with +this label and enables audit logging on them. + +Create a new manifest file and paste the configuration below: + +
+```yaml title="ctp-audit.yaml" +apiVersion: v1 +kind: Namespace +metadata: + name: audit-test +--- +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + labels: + audit-enabled: "true" + name: ctp1 + namespace: audit-test +spec: + writeConnectionSecretToRef: + name: kubeconfig-ctp1 + namespace: audit-test +``` +
+ +The `metadata.labels` section contains the `audit-enabled` setting. + +Apply the manifest: + +```shell +kubectl apply --filename ctp-audit.yaml +``` + +Confirm your control plane reaches the `READY` status: + +```shell +kubectl get --filename ctp-audit.yaml +``` + +## Create a `SharedTelemetryConfig` + +The `SharedTelemetryConfig` applies to all control plane objects in a namespace +and enables audit logging and routes logs to your `OTEL` endpoint. + +Create a `SharedTelemetryConfig` manifest file and paste the configuration +below: + +
+```yaml title="sharedtelemetryconfig.yaml" +apiVersion: observability.spaces.upbound.io/v1alpha1 +kind: SharedTelemetryConfig +metadata: + name: apiserver-audit + namespace: audit-test +spec: + apiServer: + audit: + enabled: true + exporters: + otlphttp: + endpoint: http://otel-lgtm.observability:4318 + exportPipeline: + logs: [otlphttp] + controlPlaneSelector: + labelSelectors: + - matchLabels: + audit-enabled: "true" +``` +
+ +This configuration: + +* Sets `apiServer.audit.enabled` to `true` +* Configures the `otlphttp` exporter to point to the `docker-otel-lgtm` service +* Uses `controlPlaneSelector` to match any control plane in the namespace with the `audit-enabled` label set to `true` + +:::note +You can configure the `SharedTelemetryConfig` to select control planes in +several ways. more information on control plane selection, see the [control +plane selection][ctp-selection] documentation. +::: + +Apply the `SharedTelemetryConfig`: + +```shell +kubectl apply --filename sharedtelemetryconfig.yaml +``` + +Confirm the configuration selected the control plane: + +```shell +kubectl get --filename sharedtelemetryconfig.yaml +``` + +The output should return `SELECTED` as `1` and `VALIDATED` as `TRUE`. + +For more detailed status information, use `kubectl get`: + +```shell +kubectl get --filename sharedtelemetryconfig.yaml --output yaml | yq .status +``` + +## Generate and monitor audit events + +You enabled telemetry on your new control plane and can now generate events to +test the audit logging. This guide uses the `nop-provider` to simulate resource +operations. + +Switch your `up` context to the new control plane: + +```shell +up ctx /// +``` + +Create a new Provider manifest: + +```yaml title="provider-nop.yaml" +apiVersion: pkg.crossplane.io/v1 + kind: Provider + metadata: + name: crossplane-contrib-provider-nop + spec: + package: xpkg.upbound.io/crossplane-contrib/provider-nop:v0.4.0 +``` + +Apply the provider manifest: + +```shell +kubectl apply --filename provider-nop.yaml +``` + +Verify the provider installed and returns `HEALTHY` status as `TRUE`. + +Apply an example resource to kick off event generation: + + +```shell +kubectl apply --filename https://raw.githubusercontent.com/crossplane-contrib/provider-nop/refs/heads/main/examples/nopresource.yaml +``` + +In your Grafana dashboard, navigate to **Drilldown** > **Logs** under the +Grafana menu. + + +Filter for `controlplane-audit` log messages. + +Create a query to find `create` events on `nopresources` by filtering: + +* The `verb` field for `create` events +* The `objectRef_resource` field to match the Kind `nopresources` + +Review the audit log results. The log stream displays: + +*The client applying the create operation +* The resource kind +* Client details +* The response code + +Expand the example below for an audit log entry: + +
+ Audit log entry + +```json +{ + "level": "Metadata", + "auditID": "51bbe609-14ad-4874-be78-1289c10d506a", + "stage": "ResponseComplete", + "requestURI": "/apis/nop.crossplane.io/v1alpha1/nopresources?fieldManager=kubectl-client-side-apply&fieldValidation=Strict", + "verb": "create", + "user": { + "username": "kubernetes-admin", + "groups": ["system:masters", "system:authenticated"] + }, + "impersonatedUser": { + "username": "upbound:spaces:host:masterclient", + "groups": [ + "system:authenticated", + "upbound:controlplane:admin", + "upbound:spaces:host:system:masters" + ] + }, + "sourceIPs": ["10.244.0.135", "127.0.0.1"], + "userAgent": "kubectl/v1.32.2 (darwin/arm64) kubernetes/67a30c0", + "objectRef": { + "resource": "nopresources", + "name": "example", + "apiGroup": "nop.crossplane.io", + "apiVersion": "v1alpha1" + }, + "responseStatus": { "metadata": {}, "code": 201 }, + "requestReceivedTimestamp": "2025-09-19T23:03:24.540067Z", + "stageTimestamp": "2025-09-19T23:03:24.557583Z", + "annotations": { + "authorization.k8s.io/decision": "allow", + "authorization.k8s.io/reason": "RBAC: allowed by ClusterRoleBinding \"controlplane-admin\" of ClusterRole \"controlplane-admin\" to Group \"upbound:controlplane:admin\"" + } + } +``` +
+ +## Customize the audit policy + +Spaces `v1.14.0` includes a default audit policy. You can customize this policy +by creating a configuration file and passing the values to +`observability.collectors.apiServer.auditPolicy` in the helm values file. + +An example custom audit policy: + +```yaml +observability: + controlPlanes: + apiServer: + auditPolicy: | + apiVersion: audit.k8s.io/v1 + kind: Policy + rules: + # ============================================================================ + # RULE 1: Exclude health check and version endpoints + # ============================================================================ + - level: None + nonResourceURLs: + - '/healthz*' + - '/readyz*' + - /version + # ============================================================================ + # RULE 2: ConfigMaps - Write operations only + # ============================================================================ + - level: Metadata + resources: + - group: "" + resources: + - configmaps + verbs: + - create + - update + - patch + - delete + omitStages: + - RequestReceived + - ResponseStarted + # ============================================================================ + # RULE 3: Secrets - ALL operations + # ============================================================================ + - level: Metadata + resources: + - group: "" + resources: + - secrets + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + omitStages: + - RequestReceived + - ResponseStarted + # ============================================================================ + # RULE 4: Global exclusion of read-only operations + # ============================================================================ + - level: None + verbs: + - get + - list + - watch + # ========================================================================== + # RULE 5: Exclude standard Kubernetes resources from write operation logging + # ========================================================================== + - level: None + resources: + - group: "" + - group: "apps" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "storage.k8s.io" + - group: "batch" + - group: "autoscaling" + - group: "metrics.k8s.io" + - group: "node.k8s.io" + - group: "scheduling.k8s.io" + - group: "coordination.k8s.io" + - group: "discovery.k8s.io" + - group: "events.k8s.io" + - group: "flowcontrol.apiserver.k8s.io" + - group: "internal.apiserver.k8s.io" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "admissionregistration.k8s.io" + verbs: + - create + - update + - patch + - delete + # ============================================================================ + # RULE 6: Catch-all for ALL custom resources and any missed resources + # ============================================================================ + - level: Metadata + verbs: + - create + - update + - patch + - delete + omitStages: + - RequestReceived + - ResponseStarted + # ============================================================================ + # RULE 7: Final catch-all - exclude everything else + # ============================================================================ + - level: None + omitStages: + - RequestReceived + - ResponseStarted +``` +You can apply this policy during Spaces installation or upgrade using the helm values file. + +Audit policies use rules evaluated in order from top to bottom where the first +matching rule applies. Control plane audit policies follow Kubernetes conventions and use the +following logging levels: + +* **None** - Don't log events matching this rule +* **Metadata** - Log request metadata (user, timestamp, resource, verb) but not request or response bodies +* **Request** - Log metadata and request body but not response body +* **RequestResponse** - Log metadata, request body, and response body + +For more information, review the Kubernetes [Auditing] documentation. + +## Disable audit logging + +You can disable audit logging on a control plane by removing it from the +`SharedTelemetryConfig` selector or by deleting the `SharedTelemetryConfig`. + +### Disable for specific control planes + +Remove the `audit-enabled` label from control planes that should stop sending audit logs: + +```bash +kubectl label controlplane --namespace audit-enabled- +``` + +The `SharedTelemetryConfig` no longer selects this control plane, and audit log collection stops. + +### Disable for all control planes + +Delete the `SharedTelemetryConfig` to stop audit logging for all control planes it manages: + +```bash +kubectl delete sharedtelemetryconfig --namespace +``` + +[ctp-selection]: /spaces/howtos/observability/#control-plane-selection +[Auditing]: https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/ diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/declarative-ctps.md b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/declarative-ctps.md new file mode 100644 index 000000000..2c3e5331b --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/declarative-ctps.md @@ -0,0 +1,110 @@ +--- +title: Declaratively create control planes +sidebar_position: 99 +description: A tutorial to configure a Space with Argo to declaratively create and + manage control planes +--- + +In this tutorial, you learn how to configure [Argo CD][argo-cd] to communicate with a self-hosted Space. This flow allows you to declaratively create and manage control planes from Git. Argo CD is a continuous delivery tool for Kubernetes that you can use to drive GitOps flows for your control plane infrastructure. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. + +For API specifications on ControlPlane resources and their declarative creation, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +## Prerequisites + +To complete this tutorial, you need the following: + +- Have already deployed an Upbound Space. +- Have already deployed an instance of Argo CD on a Kubernetes cluster. + +## Connect your Space to Argo CD + +Fetch the kubeconfig for the Space cluster, the Kubernetes cluster where you installed the Upbound Spaces software. You must add the Space cluster as a context to Argo. + +```ini +export SPACES_CLUSTER_SERVER="https://url" +export SPACES_CLUSTER_NAME="cluster" +``` + +Switch contexts to the Kubernetes cluster where you've installed Argo. Create a secret on the Argo cluster whose data contains the connection details of the Space cluster. + +:::important +Make sure the following commands are executed against your **Argo** cluster, not your Space cluster. +::: + +Run the following command in a terminal: + +```yaml +cat < +When you install a Crossplane provider on a control plane, memory gets consumed +according to the number of custom resources it defines. Upbound [Official Provider families][official-provider-families] provide higher fidelity control +to platform teams to install providers for only the resources they need, +reducing the bloat of needlessly installing unused custom resources. Still, you +must factor provider memory usage into your calculations to ensure you've +rightsized the memory available in your Spaces cluster. + + +:::important +Be careful not to conflate `managed resource` with `custom resource definition`. +The former is an "instance" of an external resource in Crossplane, while the +latter defines the API schema of that resource. +::: + +It's estimated that each custom resource definition consumes ~3 MB of memory. +The calculation is: + +```bash +number_of_managed_resources_defined_in_provider x 3 MB = memory_required +``` + +For example, if you plan to use [provider-aws-ec2][provider-aws-ec2], [provider-aws-s3][provider-aws-s3], and [provider-aws-iam][provider-aws-iam], the resulting calculation is: + +```bash +provider-aws-ec2: 98 x 3 MB = 294 MB +provider-aws-s3: 23 x 3 MB = 69 MB +provider-aws-iam 22 x 3 MB = 66 MB +--- +total memory: 429 MB +``` + +In this scenario, you should budget ~430 MB of memory for provider usage on this control plane. + +:::tip +Do this calculation for each provider you plan to install on your control plane. +Then do this calculation for each control plane you plan to run in your Space. +::: + + +#### Total memory usage + +Add the memory usage from the previous sections. Given the preceding examples, +they result in a recommendation to budget ~1 GB memory for each control plane +you plan to run in the Space. + +:::important + +The 1 GB recommendation is an example. +You should input your own provider requirements to arrive at a final number for +your own deployment. + +::: + +### CPU considerations + +#### Managed resource CPU usage + +The number of managed resources under management by a control plane is the largest contributing factor for CPU usage in a Space. CPU usage scales linearly according to the number of managed resources under management by your control plane. In Upbound's testing, CPU usage requirements _does_ vary from provider to provider. Using the Upbound Official Provider families as a baseline: + + +| Provider | MR create operation (CPU core seconds) | MR update or reconciliation operation (CPU core seconds) | +| ---- | ---- | ---- | +| provider-family-aws | 10 | 2 to 3 | +| provider-family-gcp | 7 | 1.5 | +| provider-family-azure | 7 to 10 | 1.5 to 3 | + + +When resources are in a non-ready state, Crossplane providers reconcile often (as fast as every 15 seconds). Once a resource reaches `READY`, each Crossplane provider defaults to a 10 minute poll interval. Given this, a 16-core machine has `16x10x60 = 9600` CPU core seconds available. Interpreting this table: + +- A single control plane that needs to create 100 AWS MRs concurrently would consume 1000 CPU core seconds, or about 1.5 cores. +- A single control plane that continuously reconciles 100 AWS MRs once they've reached a `READY` state would consume 300 CPU core seconds, or a little under half a core. + +Since `provider-family-aws` has the highest recorded numbers for CPU time required, you can use that as an upper limit in your calculations. + +Using these calculations and extrapolating values, given a 16 core machine, it's recommended you don't exceed a single control plane managing 1000 MRs. Suppose you plan to run 10 control planes, each managing 1000 MRs. You want to make sure your node pool has capacity for 160 cores. If you are using a machine type that has 16 cores per machine, that would mean having a node pool of size 10. If you are using a machine type that has 32 cores per machine, that would mean having a node pool of size 5. + +#### Cloud API latency + +Oftentimes, you are using Crossplane providers to talk to external cloud APIs. Those external cloud APIs often have global API rate limits (examples: [Azure limits][azure-limits], [AWS EC2 limits][aws-ec2-limits]). + +For Crossplane providers built on [Upjet][upjet] (such as Upbound Official Provider families), these providers use Terraform under the covers. They expose some knobs (such as `--max-reconcile-rate`) you can use to tweak reconciliation rates. + +### Resource buffers + +The guidance in the preceding sections explains how to calculate CPU and memory usage requirements for: + +- a set of control planes in a Space +- tuned to the number of providers you plan to use +- according to the number of managed resource instances you plan to have managed by your control planes + +Upbound recommends budgeting an extra buffer of 20% to your resource capacity calculations. The numbers shared in the preceding sections don't account for peaks or surges since they're based off average measurements. Upbound recommends budgeting this buffer to account for these things. + +## Deploying more than one Space + +You are welcome to deploy more than one Space. You just need to make sure you have a 1:1 mapping of Space to Kubernetes clusters. Spaces are by their nature constrained to a single Kubernetes Cluster, which are regional entities. If you want to offer control planes in multiple cloud environments or multiple public clouds entirely, these are justifications for deploying >1 Spaces. + +## Cert-manager + +A Spaces deployment uses the [Certificate Custom Resource] from cert-manager to +provision certificates within the Space. This establishes a nice API boundary +between what your platform may need and the Certificate requirements of a +Space. + + +In the event you would like more control over the issuing Certificate Authority +for your deployment or the deployment of cert-manager itself, this guide is for +you. + + +### Deploying + +An Upbound Space deployment doesn't have any special requirements for the +cert-manager deployment itself. The only expectation is that cert-manager and +the corresponding Custom Resources exist in the cluster. + +You should be free to install cert-manager in the cluster in any way that makes +sense for your organization. You can find some [installation ideas] in the +cert-manager docs. + +### Issuers + +A default Upbound Space install includes a [ClusterIssuer]. This `ClusterIssuer` +is a `selfSigned` issuer that other certificates are minted from. You have a +couple of options available to you for changing the default deployment of the +Issuer: +1. Changing the issuer name. +2. Providing your own ClusterIssuer. + + +#### Changing the issuer name + +The `ClusterIssuer` name is controlled by the `certificates.space.clusterIssuer` +Helm property. You can adjust this during installation by providing the +following parameter (assuming your new name is 'SpaceClusterIssuer'): +```shell +--set "certificates.space.clusterIssuer=SpaceClusterIssuer" +``` + + + +#### Providing your own ClusterIssuer + +To provide your own `ClusterIssuer`, you need to first setup your own +`ClusterIssuer` in the cluster. The cert-manager docs have a variety of options +for providing your own. See the [Issuer Configuration] docs for more details. + +Once you have your own `ClusterIssuer` set up in the cluster, you need to turn +off the deployment of the `ClusterIssuer` included in the Spaces deployment. +To do that, provide the following parameter during installation: +```shell +--set "certificates.provision=false" +``` + +###### Considerations +If your `ClusterIssuer` has a name that's different from the default name that +the Spaces installation expects ('spaces-selfsigned'), you need to also specify +your `ClusterIssuer` name during install using: +```shell +--set "certificates.space.clusterIssuer=" +``` + +## Ingress + +To route requests from an external client (kubectl, ArgoCD, etc) to a +control plane, a Spaces deployment includes a default [Ingress] manifest. In +order to ease getting started scenarios, the current `Ingress` includes +configurations (properties and annotations) that assume that you installed the +commonly used [ingress-nginx ingress controller] in the cluster. This section +walks you through using a different `Ingress`, if that's something that your +organization needs. + +### Default manifest + +An example of what the current `Ingress` manifest included in a Spaces install +is below: + +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: mxe-router-ingress + namespace: upbound-system + annotations: + nginx.ingress.kubernetes.io/use-regex: "true" + nginx.ingress.kubernetes.io/ssl-redirect: "false" + nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" + nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" + nginx.ingress.kubernetes.io/proxy-request-buffering: "off" + nginx.ingress.kubernetes.io/proxy-body-size: "0" + nginx.ingress.kubernetes.io/proxy-http-version: "1.1" + nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + nginx.ingress.kubernetes.io/proxy-ssl-verify: "on" + nginx.ingress.kubernetes.io/proxy-ssl-secret: "upbound-system/mxp-hostcluster-certs" + nginx.ingress.kubernetes.io/proxy-ssl-name: spaces-router + nginx.ingress.kubernetes.io/configuration-snippet: | + more_set_headers "X-Request-Id: $req_id"; + more_set_headers "Request-Id: $req_id"; + more_set_headers "Audit-Id: $req_id"; +spec: + ingressClassName: nginx + tls: + - hosts: + - {{ .Values.ingress.host }} + secretName: mxe-router-tls + rules: + - host: {{ .Values.ingress.host }} + http: + paths: + - path: "/v1/controlPlanes" + pathType: Prefix + backend: + service: + name: spaces-router + port: + name: http +``` + +The notable pieces are: +1. Namespace + + + +This property represents the namespace that the spaces-router is deployed to. +In most cases this is `upbound-system`. + + + +2. proxy-ssl-* annotations + +The spaces-router pod terminates TLS using certificates located in the +mxp-hostcluster-certs `Secret` located in the `upbound-system` `Namespace`. + +3. proxy-* annotations + +Requests coming into the ingress-controller can be variable depending on what +the client is requesting. For example, `kubectl get crds` has different +requirements for the connection compared to a 'watch', for example +`kubectl get pods -w`. The ingress-controller is configured to be able to +account for either scenario. + + +4. configuration-snippets + +These commands add headers to the incoming requests that help with telemetry +and diagnosing problems within the system. + +5. Rules + +Requests coming into the control planes use a `/v1/controlPlanes` prefix and +need to be routed to the spaces-router. + + +### Using a different ingress manifest + +Operators can choose to use an `Ingress` manifest and ingress controller that +makes the most sense for their organization. If they want to turn off deploying +the default `Ingress` manifest, they can do so during installation by providing +the following parameter during installation: +```shell +--set ".Values.ingress.provision=false" +``` + +#### Considerations + + + + + +Operators will need to take into account the following considerations when +disabling the default `Ingress` deployment. + +1. Ensure the custom `Ingress` manifest is placed in the same namespace as the +`spaces-router` pod. +2. Ensure that the ingress is configured to use a `spaces-router` as a secure +backend and that the secret used is the mxp-hostcluster-certs secret. +3. Ensure that the ingress is configured to handle long-lived connections. +4. Ensure that the routing rule sends requests prefixed with +`/v1/controlPlanes` to the `spaces-router` using the `http` port. + + + + + + +[cert-manager]: https://cert-manager.io/ +[Certificate Custom Resource]: https://cert-manager.io/docs/usage/certificate/ +[ClusterIssuer]: https://cert-manager.io/docs/concepts/issuer/ +[ingress-nginx ingress controller]: https://kubernetes.github.io/ingress-nginx/deploy/ +[installation ideas]: https://cert-manager.io/docs/installation/ +[Ingress]: https://kubernetes.io/docs/concepts/services-networking/ingress/ +[Issuer Configuration]: https://cert-manager.io/docs/configuration/ +[official-provider-families]: /manuals/packages/providers/provider-families +[aws-eks]: https://aws.amazon.com/eks/ +[google-cloud-gke]: https://cloud.google.com/kubernetes-engine +[microsoft-aks]: https://azure.microsoft.com/en-us/products/kubernetes-service +[upbound-account]: https://www.upbound.io/register/?utm_source=docs&utm_medium=cta&utm_campaign=docs_spaces +[provider-aws-ec2]: https://marketplace.upbound.io/providers/upbound/provider-aws-ec2 +[provider-aws-s3]: https://marketplace.upbound.io/providers/upbound/provider-aws-s3 +[provider-aws-iam]: https://marketplace.upbound.io/providers/upbound/provider-aws-iam +[azure-limits]: https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/request-limits-and-throttling +[aws-ec2-limits]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/throttling.html#throttling-limits-rate-based +[upjet]: https://github.com/upbound/upjet diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/dr.md b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/dr.md new file mode 100644 index 000000000..67ecbfecf --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/dr.md @@ -0,0 +1,412 @@ +--- +title: Disaster Recovery +sidebar_position: 13 +description: Configure Space-wide backups for disaster recovery. +--- + +:::info API Version Information +This guide applies to **Spaces v1.9.0 and later** (Self-Hosted only). Space Backups is a alpha feature enabled by default starting in v1.14.0. + +- **v1.9.0-v1.13.x**: Alpha feature (requires manual enablement) +- **v1.14.0+**: GA (enabled by default) + +For version-specific features and backup resources, see the . control-plane backups, see [Backup and Restore](../backup-and-restore.md). +::: + +:::important +For Connected and Disconnected Spaces, this feature requires Spaces `v1.9.0` and, starting with `v1.14.0`, Spaces enables it by default. + +To enable it on versions earlier than `v1.14.0`, set features.alpha.spaceBackup.enabled=true when you install Spaces. + +```bash +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "features.alpha.spaceBackup.enabled=true" +``` +::: + +Upbound's _Space Backups_ is a built-in Space-wide backup and restore feature. This guide explains how to configure Space Backups and how to restore from one of them in case of disaster recovery. + +This feature is meant for Space administrators. Group or Control Plane users can leverage [Shared Backups][shared-backups] to backup and restore their ControlPlanes. + +## Benefits +The Space Backups feature provides the following benefits: + +* Automatic backups for all resources in a Space and all resources in control planes, without any operational overhead. +* Backup schedules. +* Selectors to specify resources to backup. + +## Prerequisites + +Enabled the Space Backups feature in the Space: + +- Cloud Spaces: Not accessible to users. +- Connected Spaces: Space administrator must enable this feature. +- Disconnected Spaces: Space administrator must enable this feature. + +## Configure a Space Backup Config + +[SpaceBackupConfig][spacebackupconfig] is a cluster-scoped resource. This resource configures the storage details and provider. Whenever a backup executes (either by schedule or manually initiated), it references a SpaceBackupConfig to tell it where store the snapshot. + + +### Backup config provider + + +The `spec.objectStorage.provider` and `spec.objectStorage.config` fields configures: + +* The object storage provider +* The path to the provider +* The credentials needed to communicate with the provider + +You can only set one provider. Upbound currently supports AWS, Azure, and GCP as providers. + + +`spec.objectStorage.config` is a freeform map of configuration options for the object storage provider. See [Thanos object storage][thanos-object-storage] for more information on the formats for each supported cloud provider. `spec.bucket` and `spec.provider` overrides the required values in the config. + + +#### AWS as a storage provider + +This example demonstrates how to use AWS as a storage provider for your backups: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupConfig +metadata: + name: default +spec: + objectStorage: + provider: AWS + bucket: spaces-backup-bucket + config: + endpoint: s3.eu-west-2.amazonaws.com + region: eu-west-2 + credentials: + source: Secret + secretRef: + name: bucket-creds + namespace: upbound-system + key: creds +``` + +This example assumes you've already created an S3 bucket called +`spaces-backup-bucket` in the `eu-west-2` AWS region. To access the bucket, +define the account credentials as a Secret in the specified Namespace +(`upbound-system` in this example). + +#### Azure as a storage provider + +This example demonstrates how to use Azure as a storage provider for your backups: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupConfig +metadata: + name: default + namespace: default +spec: + objectStorage: + provider: Azure + bucket: upbound-backups + config: + storage_account: upbackupstore + container: upbound-backups + endpoint: blob.core.windows.net + credentials: + source: Secret + secretRef: + name: bucket-creds + namespace: upbound-system + key: creds +``` + + +This example assumes you've already created an Azure storage account called +`upbackupstore` and blob `upbound-backups`. To access the blob, +define the account credentials as a Secret in the specified Namespace +(`upbound-system` in this example). + + +#### GCP as a storage provider + +This example demonstrates how to use Google Cloud Storage as a storage provider for your backups: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupConfig +metadata: + name: default + namespace: default +spec: + objectStorage: + provider: GCP + bucket: spaces-backup-bucket + credentials: + source: Secret + secretRef: + name: bucket-creds + namespace: upbound-system + key: creds +``` + + +This example assumes you've already created a Cloud bucket called +"spaces-backup-bucket" and a service account with access to this bucket. Define the key file as a Secret in the specified Namespace +(`upbound-system` in this example). + + +## Configure a Space Backup Schedule + + +[SpaceBackupSchedule][spacebackupschedule] is a cluster-scoped resource. This resource defines a backup schedule for the whole Space. + +Below is an example of a Space Backup Schedule running every day. It backs up all groups having `environment: production` labels and all control planes in those groups having `backup: please` labels. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupSchedule +metadata: + name: daily-schedule +spec: + schedule: "@daily" + configRef: + kind: SpaceBackupConfig + name: default + match: + groups: + labelSelectors: + - matchLabels: + environment: production + controlPlanes: + labelSelectors: + - matchLabels: + backup: please +``` + +### Define a schedule + +The `spec.schedule` field is a [Cron-formatted][cron-formatted] string. Some common examples are below: + +| Entry | Description | +| ----------------- | ------------------------------------------------------------------------------------------------- | +| `@hourly` | Run once an hour. | +| `@daily` | Run once a day. | +| `@weekly` | Run once a week. | +| `0 0/4 * * *` | Run every 4 hours. | +| `0/15 * * * 1-5` | Run every fifteenth minute on Monday through Friday. | +| `@every 1h30m10s` | Run every 1 hour, 30 minutes, and 10 seconds. Hour is the largest measurement of time for @every. | + +### Suspend a schedule + +Use `spec.suspend` field to suspend the schedule. It creates no new backups, but allows running backups to complete. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupSchedule +metadata: + name: daily-schedule +spec: + suspend: true +... +``` + +### Garbage collect backups when the schedule gets deleted + +Set the `spec.useOwnerReferencesInBackup` to garbage collect associated `SpaceBackup` when a `SpaceBackupSchedule` gets deleted. If set to true, backups are garbage collected when the schedule gets deleted. + +### Set the time to live + +Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. + +The time to live is a duration, for example, `168h` for 7 days. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupSchedule +metadata: + name: daily-schedule +spec: + ttl: 168h # Backup is garbage collected after 7 days +... +``` + +## Selecting space resources to backup + +By default, a SpaceBackup selects all groups and, for each of them, all control planes, secrets, and any other group-scoped resources. + +By setting `spec.match`, you can include only specific groups, control planes, secrets, or other Space resources in the backup. + +By setting `spec.exclude`, you can filter out some matched Space API resources from the backup. + +### Including space resources in a backup + +Different fields are available to include resources based on labels or names: +- `spec.match.groups` to include only some groups in the backup. +- `spec.match.controlPlanes` to include only some control planes in the backup. +- `spec.match.secrets` to include only some secrets in the backup. +- `spec.match.extras` to include only some extra resources in the backup. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackup +metadata: + name: my-backup +spec: + configRef: + kind: SpaceBackupConfig + name: default + match: + groups: + labelSelectors: + - matchLabels: + environment: production + controlPlanes: + labelSelectors: + - matchLabels: + backup: please + secrets: + names: + - my-secret + extras: + - apiGroup: "spaces.upbound.io" + kind: "SharedBackupConfig" + names: + - my-shared-backup +``` + +### Excluding Space resources from the backup + +Use the `spec.exclude` field to exclude matched Space API resources from the backup. + +Different fields are available to exclude resources based on labels or names: +- `spec.exclude.groups` to exclude some groups from the backup. +- `spec.exclude.controlPlanes` to exclude some control planes from the backup. +- `spec.exclude.secrets` to exclude some secrets from the backup. +- `spec.exclude.extras` to exclude some extra resources from the backup. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackup +metadata: + name: my-backup +spec: + ttl: 168h # Backup is garbage collected after 7 days + configRef: + kind: SpaceBackupConfig + name: default + match: + groups: + labelSelectors: + - matchLabels: + environment: production + exclude: + groups: + names: + - not-this-one-please +``` + +### Exclude resources in control planes' backups + +By default, it backs up all resources in a selected control plane. + +Use the `spec.controlPlaneBackups.excludedResources` field to exclude resources from control planes' backups. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackup +metadata: + name: my-backup +spec: + ttl: 168h # Backup is garbage collected after 7 days + configRef: + kind: SpaceBackupConfig + name: default + controlPlaneBackups: + excludedResources: + - secrets + - buckets.s3.aws.upbound.io +``` + +## Create a manual backup + +[SpaceBackup][spacebackup] is a cluster-scoped resource that causes a single backup to occur for the whole Space. + +Below is an example of a manual SpaceBackup: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackup +metadata: + name: my-backup +spec: + configRef: + kind: SpaceBackupConfig + name: default + deletionPolicy: Delete +``` + + +The backup specification `DeletionPolicy` defines backup deletion actions, +including the deletion of the backup file from the bucket. The `Deletion Policy` +value defaults to `Orphan`. Set it to `Delete` to remove uploaded files +in the bucket. +For more information on the backup and restore process, review the [Spaces API documentation][spaces-api-documentation]. + +### Set the time to live + +Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackup +metadata: + name: my-backup +spec: + ttl: 168h # Backup is garbage collected after 7 days +... +``` + +## Restore from a space backup + +Space Backup and Restore focuses only on disaster recovery. The restore procedure assumes a new Space installation with no existing resources. The restore procedure is idempotent, so you can run it multiple times without any side effects in case of failures. + +To restore a Space from an existing Space Backup, follow these steps: + +1. Install Spaces from scratch as needed. +2. Create a `SpaceBackupConfig` as needed to access the SpaceBackup from the object storage, for example named `my-backup-config`. +3. Select the backup you want to restore from, for example `my-backup`. +4. Run the following command to restore the Space: + +```shell +export SPACE_BACKUP_CONFIG=my-backup-config +export SPACE_BACKUP=my-backup +kubectl exec -ti -n upbound-system deployments/spaces-controller -c spaces -- hyperspace restore $SPACE_BACKUP $SPACE_BACKUP_CONFIG +``` + +### Restore specific control planes + +:::important +This feature is available from Spaces v1.11. +::: + +Instead of restoring the whole Space, you can choose to restore specific control planes +from a backup using the `--controlplanes` flag. You can also use +the `--skip-space-restore` flag to skip restoring Space objects. +This allows Spaces admins to restore individual control planes without +needing to restore the entire Space. + +```shell +export SPACE_BACKUP_CONFIG=my-backup-config +export SPACE_BACKUP=my-backup +kubectl exec -ti -n upbound-system deployments/spaces-controller -c spaces +-- hyperspace restore $SPACE_BACKUP $SPACE_BACKUP_CONFIG --controlplanes default/ctp1,default/ctp2 --skip-space-restore +``` + + +[shared-backups]: /spaces/howtos/self-hosted/workload-id/backup-restore-config/ +[spacebackupconfig]: /reference/apis/spaces-api/v1_9 +[thanos-object-storage]: https://thanos.io/tip/thanos/storage.md/ +[spacebackupschedule]: /reference/apis/spaces-api/v1_9 +[cron-formatted]: https://en.wikipedia.org/wiki/Cron +[spacebackup]: /reference/apis/spaces-api/v1_9 +[spaces-api-documentation]: /reference/apis/spaces-api/v1_9 + diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/gitops-with-argocd.md b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/gitops-with-argocd.md new file mode 100644 index 000000000..004247a10 --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/gitops-with-argocd.md @@ -0,0 +1,142 @@ +--- +title: GitOps with ArgoCD in Self-Hosted Spaces +sidebar_position: 80 +description: Set up GitOps workflows with Argo CD in self-hosted Spaces +plan: "business" +--- + +:::info Deployment Model +This guide applies to **self-hosted Spaces** deployments. For Upbound Cloud Spaces, see [GitOps with Upbound Control Planes](/spaces/howtos/cloud-spaces/gitops-on-upbound/). +::: + +GitOps is an approach for managing a system by declaratively describing desired resources' configurations in Git and using controllers to realize the desired state. Upbound's control planes are compatible with this pattern and it's strongly recommended you integrate GitOps in the platforms you build on Upbound. + + +## Integrate with Argo CD + + +[Argo CD][argo-cd] is a project in the Kubernetes ecosystem commonly used for +GitOps. You can use it in tandem with Upbound control planes to achieve GitOps +flows. The sections below explain how to integrate these tools with Upbound. + +### Configure connection secrets for control planes + +You can configure control planes to write their connection details to a secret. +Do this by setting the +[`spec.writeConnectionSecretToRef`][spec-writeconnectionsecrettoref] field in a +control plane manifest. For example: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: ctp1 + namespace: default +spec: + writeConnectionSecretToRef: + name: kubeconfig-ctp1 + namespace: default +``` + + +### Configure Argo CD + + +To configure Argo CD for Annotation resource tracking, edit the Argo CD +ConfigMap in the Argo CD namespace. Add `application.resourceTrackingMethod: +annotation` to the data section as below. + +Next, configure the [auto respect RBAC for the Argo CD +controller][auto-respect-rbac-for-the-argo-cd-controller-1]. By default, Argo CD +attempts to discover some Kubernetes resource types that don't exist in a +control plane. You must configure Argo CD to respect the cluster's RBAC rules so +that Argo CD can sync. Add `resource.respectRBAC: normal` to the data section as +below. + +```bash +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-cm +data: + ... + application.resourceTrackingMethod: annotation + resource.respectRBAC: normal +``` + +:::tip +The `resource.respectRBAC` configuration above tells Argo to respect RBAC for +_all_ cluster contexts. If you're using an Argo CD instance to manage more than +only control planes, you should consider changing the `clusters` string match +for the configuration to apply only to control planes. For example, if every +control plane context name followed the convention of being named +`controlplane-`, you could set the string match to be `controlplane-*` +::: + + +### Create a cluster context definition + + +Once the control plane is ready, extract the following values from the secret +containing the kubeconfig: + +```bash +kubeconfig_content=$(kubectl get secrets kubeconfig-ctp1 -n default -o jsonpath='{.data.kubeconfig}' | base64 -d) +server=$(echo "$kubeconfig_content" | grep 'server:' | awk '{print $2}') +bearer_token=$(echo "$kubeconfig_content" | grep 'token:' | awk '{print $2}') +ca_data=$(echo "$kubeconfig_content" | grep 'certificate-authority-data:' | awk '{print $2}') +``` + +Generate a new secret in the cluster where you installed Argo, using the prior +values extracted: + +```yaml +cat < + +import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; + + + + +:::important +This feature is only available for select Business Critical customers. You can't +set up your own Managed Space without the assistance of Upbound. If you're +interested in this deployment mode, please [contact us][contact]. +::: + + + +A Managed Space deployed on AWS is a single-tenant deployment of a control plane +space in your AWS organization in an isolated sub-account. With Managed Spaces, +you can use the same API, CLI, and Console that Upbound offers, with the benefit +of running entirely in a cloud account that you own and Upbound manages for you. + +The following guide walks you through setting up a Managed Space in your AWS +organization. If you have any questions while working through this guide, +contact your Upbound Account Representative for help. + + + + + +A Managed Space deployed on GCP is a single-tenant deployment of a control plane +space in your GCP organization in an isolated project. With Managed Spaces, you +can use the same API, CLI, and Console that Upbound offers, with the benefit of +running entirely in a cloud account that you own and Upbound manages for you. + +The following guide walks you through setting up a Managed Space in your GCP +organization. If you have any questions while working through this guide, +contact your Upbound Account Representative for help. + + + + +## Managed Space on your cloud architecture + + + +A Managed Space is a deployment of the Upbound Spaces software inside an +Upbound-controlled sub-account in your AWS cloud environment. The Spaces +software runs in this sub-account, orchestrated by Kubernetes. Backups and +billing data get stored inside bucket or blob storage in the same sub-account. +The control planes deployed and controlled by the Spaces software runs on the +Kubernetes cluster which gets deployed into the sub-account. + +The diagram below illustrates the high-level architecture of Upbound Managed Spaces: + +![Upbound Managed Spaces arch](/img/managed-arch-aws.png) + +The Spaces software gets deployed on an EKS Cluster in the region of your +choice. This EKS cluster is where your control planes are ultimately run. +Upbound also deploys buckets, 1 for the collection of the billing data and 1 for +control plane backups. + +Upbound doesn't have access to other sub-accounts nor your organization-level +settings in your cloud environment. Outside of your cloud organization, Upbound +runs the Upbound Console, which includes the Upbound API and web application, +including the dashboard you see at `console.upbound.io`. By default, all +connections are encrypted, but public. Optionally, you also have the option to +use private network connectivity through [AWS PrivateLink][aws-privatelink]. + + + + + + +A Managed Space is a deployment of the Upbound Spaces software inside an +Upbound-controlled project in your GCP cloud environment. The Spaces software +runs in this project, orchestrated by Kubernetes. Backups and billing data get +stored inside bucket or blob storage in the same project. The control planes +deployed and controlled by the Spaces software runs on the Kubernetes cluster +which gets deployed into the project. + +The diagram below illustrates the high-level architecture of Upbound Managed Spaces: + +![Upbound Managed Spaces arch](/img/managed-arch-gcp.png) + +The Spaces software gets deployed on a GKE Cluster in the region of your choice. +This GKE cluster is where your control planes are ultimately run. Upbound also +deploys cloud buckets, 1 for the collection of the billing data and 1 for +control plane backups. + +Upbound doesn't have access to other projects nor your organization-level +settings in your cloud environment. Outside of your cloud organization, Upbound +runs the Upbound Console, which includes the Upbound API and web application, +including the dashboard you see at `console.upbound.io`. By default, all +connections are encrypted, but public. Optionally, you also have the option to +use private network connectivity through [GCP Private Service +Connect][gcp-private-service-connect]. + + + +## Prerequisites + +- An organization created on Upbound + + + +- You should have a preexisting AWS organization to complete this guide. +- You must create a new AWS sub-account. Read the [AWS documentation][aws-documentation] to learn how to create a new sub-account in an existing organization on AWS. + +After the sub-account information gets provided to Upbound, **don't change it +any further.** Any changes made to the sub-account or the resources created by +Upbound for the purposes of the Managed Space deployments voids the SLA you have +with Upbound. If you want to make configuration changes, contact your Upbound +Solutions Architect. + + + + + +- You should have a preexisting GCP organization with an active Cloud Billing account to complete this guide. +- You must create a new GCP project. Read the [GCP documentation][gcp-documentation] to learn how to create a new project in an existing organization on GCP. + +After the project information gets provided to Upbound, **don't change it any +further.** Any changes made to the project or the resources created by Upbound +for the purposes of the Managed Space deployments voids the SLA you have with +Upbound. If you want to make configuration changes, contact your Upbound +Solutions Architect. + + + + + +## Set up cross-account management + +Upbound supports using AWS Key Management Service with cross-account IAM +permissions. This enables the isolation of keys so the infrastructure operated +by Upbound has limited access to symmetric keys. + +In the KMS key's account, apply the baseline key policy: + +```json +{ + "Sid": "Allow Upbound to use this key", + "Effect": "Allow", + "Principal": { + "AWS": ["[Managed Space sub-account ID]"] + }, + "Action": ["kms:Encrypt", "kms:Decrypt", "kms:ReEncrypt*", "kms:GenerateDataKey*", "kms:DescribeKey"], + "Resource": "*" +} +``` + +You need another key policy to let the sub-account create persistent resources +with the KMS key: + +```json +{ + "Sid": "Allow attachment of persistent resources for an Upbound Managed Space", + "Effect": "Allow", + "Principal": { + "AWS": "[Managed Space sub-account ID]" + }, + "Action": ["kms:CreateGrant", "kms:ListGrants", "kms:RevokeGrant"], + "Resource": "*", + "Condition": { + "Bool": { + "kms:GrantIsForAWSResource": "true" + } + } +} +``` + +### Configure PrivateLink + +By default, all connections to the Upbound Console are encrypted, but public. +AWS PrivateLink is a feature that allows VPC peering whereby your traffic +doesn't traverse the public internet. To have this configured, contact your +Upbound Account Representative. + + + + + +## Enable APIs + +Enable the following APIs in the new project: + +- Kubernetes Engine API +- Cloud Resource Manager API +- Compute Engine API +- Cloud DNS API + +:::tip +Read how to enable APIs in a GCP project [here][here]. +::: + +## Create a service account + +Create a service account in the new project. Name the service account, +upbound-sa. Give the service account the following roles: + +- Compute Admin +- Project IAM Admin +- Service Account Admin +- DNS Administrator +- Editor + +Select the service account you just created. Select keys. Add a new key and +select JSON. The key gets downloaded to your machine. Save this for later. + +## Create a DNS Zone + +Create a DNS Zone, set the **Zone type** to `Public`. + +### Configure Private Service Connect + +By default, all connections to the Upbound Console are encrypted, but public. +GCP Private Service Connect is a feature that allows VPC peering whereby your +traffic doesn't traverse the public internet. To have this configured, contact +your Upbound Account Representative. + + + +## Provide information to Upbound + +Once these policies get attached to the key, tell your Upbound Account +Representative, providing them the following: + + + +- the full ARN of the KMS key. +- the name of the organization that you created in Upbound. Use the up CLI command, `up org list`, so see this information. +- Confirmation of which region in AWS you want the deployment to target. + + + + + +- The service account JSON key +- The NS records associated with the DNS name created in the last step. +- the name of the organization that you created in Upbound. Use the up CLI command, `up org list`, so see this information. +- Confirmation of which region in GCP you want the deployment to target. + + + +Once Upbound has this information, the request gets processed in a business day. + +## Use your Managed Space + +Once the Managed Space gets deployed, you can see it in the Space selector when browsing your environment on [`console.upbound.io`][console-upbound-io]. + + + + +[contact]: https://www.upbound.io/contact-us +[aws-privatelink]: #configure-privatelink +[aws-documentation]: https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_create.html#orgs_manage_accounts_create-new +[gcp-private-service-connect]: #configure-private-service-connect +[gcp-documentation]: https://cloud.google.com/resource-manager/docs/creating-managing-organization +[here]: https://cloud.google.com/apis/docs/getting-started#enabling_apis +[console-upbound-io]: https://console.upbound.io/ diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/oidc-configuration.md b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/oidc-configuration.md new file mode 100644 index 000000000..cbef4dc42 --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/oidc-configuration.md @@ -0,0 +1,289 @@ +--- +title: Configure OIDC +sidebar_position: 20 +description: Configure OIDC in your Space +--- +:::important +This guide is only applicable for administrators who've deployed self-hosted Spaces. general RBAC in Upbound, read [Upbound RBAC][upbound-rbac]. +::: + +Upbound uses the Kubernetes [Structured Authentication Configuration][structured-auth-config] to validate OIDC tokens sent to the API. Upbound stores this +configuration as a `ConfigMap` and authenticates with the Upbound router +component during installation with Helm. + +This guide walks you through how to create and apply an authentication +configuration to validate Upbound with an external identity provider. Each +section focuses on a specific part of the configuration file. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. + +For details on authentication and access control across versions, see the . related platform authentication features, see the [Platform manual](../../../../platform/). +::: + +## Creating the `AuthenticationConfiguration` file + +First, create a file called `config.yaml` with an `AuthenticationConfiguration` +kind. The `AuthenticationConfiguration` is the initial authentication structure +necessary for Upbound to communicate with your chosen identity provider. + +```yaml +apiVersion: apiserver.config.k8s.io/v1beta1 +kind: AuthenticationConfiguration +jwt: +- issuer: + url: oidc-issuer-url + audiences: + - oidc-client-id + claimMappings: # optional + username: + claim: oidc-username-claim + prefix: oidc-username-prefix + groups: + claim: oidc-groups-claim + prefix: oidc-groups-prefix +``` + + +For detailed configuration options, including the CEL-based token validation, +review the feature [documentation][structured-auth-config]. + + +The `AuthenticationConfiguration` allows you to configure multiple JWT +authenticators as separate issuers. + +### Configure an issuer + +The `jwt` array requires an `issuer` specification and typically contains: + +- A `username` claim mapping +- A `groups` claim mapping +Optionally, the configuration may also include: +- A set of claim validation rules +- A set of user validation rules + +The `issuer` URL must be unique across all configured authenticators. + +```yaml +issuer: + url: https://example.com + discoveryUrl: https://discovery.example.com/.well-known/openid-configuration + certificateAuthority: |- + + audiences: + - client-id-a + - client-id-b + audienceMatchPolicy: MatchAny +``` + +By default, the authenticator assumes the OIDC Discovery URL is +`{issuer.url}/.well-known/openid-configuration`. Most identity providers follow +this structure, and you can omit the `discoveryUrl` field. To use a separate +discovery service, specify the full path to the discovery endpoint in this +field. + +If the CA for the Issuer isn't public, provide the PEM encoded CA for the Discovery URL. + +At least one of the `audiences` entries must match the `aud` claim in the JWT. +For OIDC tokens, this is the Client ID of the application attempting to access +the Upbound API. Having multiple values set allows the same configuration to +apply to multiple client applications, for example the `kubectl` CLI and an +Internal Developer Portal. + +If you specify multiple `audiences` , `audienceMatchPolicy` must equal `MatchAny`. + +### Configure `claimMappings` + +#### Username claim mapping + +By default, the authenticator uses the `sub` claim as the user name. To override this, either: + +- specify *both* `claim` and `prefix`. `prefix` may be explicitly set to the empty string. +or + +- specify a CEL `expression` to calculate the user name. + +```yaml +claimMappings: + username: + claim: "sub" + prefix: "keycloak" + # + expression: 'claims.username + ":external-user"' +``` + + +#### Groups claim mapping + +By default, this configuration doesn't map groups, unless you either: + +- specify both `claim` and `prefix`. `prefix` may be explicitly set to the empty string. +or + +- specify a CEL `expression` that returns a string or list of strings. + + +```yaml +claimMappings: + groups: + claim: "groups" + prefix: "" + # + expression: 'claims.roles.split(",")' +``` + + +### Validation rules + + +Validation rules are outside the scope of this document. Review the +[documentation][structured-auth-config] for more information. Examples include +using CEL expressions to validate authentication such as: + + +- Validating that a token claim has a specific value +- Validating that a token has a limited lifetime +- Ensuring usernames and groups don't contain reserved prefixes + +## Required claims + +To interact with Space and ControlPlane APIs, users must have the `upbound.io/aud` claim set to one of the following: + +| Upbound.io Audience | Notes | +| -------------------------------------------------------- | -------------------------------------------------------------------- | +| `[]` | No Access to Space-level or ControlPlane APIs | +| `['upbound:spaces:api']` | This Identity is only for Space-level APIs | +| `['upbound:spaces:controlplanes']` | This Identity is only for ControlPlane APIs | +| `['upbound:spaces:api', 'upbound:spaces:controlplanes']` | This Identity is for both Space-level and ControlPlane APIs | + + +You can set this claim in two ways: + +- In the identity provider mapped in the ID token. +- Inject in the authenticator with the `jwt.claimMappings.extra` array. + +For example: +```yaml +apiVersion: apiserver.config.k8s.io/v1beta1 +kind: AuthenticationConfiguration +jwt: +- issuer: + url: https://keycloak:8443/realms/master + certificateAuthority: |- + + audiences: + - master-realm + audienceMatchPolicy: MatchAny + claimMappings: + username: + claim: "preferred_username" + prefix: "keycloak:" + groups: + claim: "groups" + prefix: "" + extra: + - key: 'upbound.io/aud' + valueExpression: "['upbound:spaces:controlplanes', 'upbound:spaces:api']" +``` + +## Install the `AuthenticationConfiguration` + +Once you create an `AuthenticationConfiguration` file, specify this file as a +`ConfigMap` in the host cluster for the Upbound Space. + +```sh +kubectl create configmap -n upbound-system --from-file=config.yaml=./path/to/config.yaml +``` + + +To enable OIDC authentication and disable Upbound IAM when installing the Space, +reference the configuration and pass an empty value to the Upbound IAM issuer +parameter: + + +```sh +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "authentication.structuredConfig=" \ + --set "router.controlPlane.extraArgs[0]=--upbound-iam-issuer-url=" +``` + +## Configure RBAC + + +In this scenario, the external identity provider handles authentication, but +permissions for Spaces and ControlPlane APIs use standard RBAC objects. + +### Spaces APIs + +The Spaces APIs include: +```yaml +- apiGroups: + - spaces.upbound.io + resources: + - controlplanes + - sharedexternalsecrets + - sharedsecretstores + - backups + - backupschedules + - sharedbackups + - sharedbackupconfigs + - sharedbackupschedules +- apiGroups: + - observability.spaces.upbound.io + resources: + - sharedtelemetryconfigs +``` + +### ControlPlane APIs + + + +Crossplane specifies three [roles][crossplane-managed-clusterroles] for a +ControlPlane: admin, editor, and viewer. These map to the verbs `admin`, `edit`, +and `view` on the `controlplanes/k8s` resource in the `spaces.upbound.io` API +group. + + +### Control access + +The `groups` claim in the `AuthenticationConfiguration` allows you to control +resource access when you create a `ClusterRoleBinding`. A `ClusterRole` defines +the role parameters and a `ClusterRoleBinding` subject. + +The example below allows `admin` permissions for all ControlPlanes to members of +the `ctp-admins` group: + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: allow-ctp-admin +rules: +- apiGroups: + - spaces.upbound.io + resources: + - controlplanes/k8s + verbs: + - admin +``` + +ctp-admins ClusterRoleBinding +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: allow-ctp-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: allow-ctp-admin +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: Group + name: ctp-admins +``` + +[structured-auth-config]: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#using-authentication-configuration +[crossplane-managed-clusterroles]: https://github.com/crossplane/crossplane/blob/master/design/design-doc-rbac-manager.md#managed-rbac-clusterroles +[upbound-rbac]: /manuals/platform/concepts/authorization/upbound-rbac diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/proxies-config.md b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/proxies-config.md new file mode 100644 index 000000000..3802e4cb0 --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/proxies-config.md @@ -0,0 +1,31 @@ +--- +title: Proxied configuration +sidebar_position: 20 +description: Configure Upbound within a proxied environment +--- + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments in proxied environments. Proxy configuration procedures are consistent across versions. + +For version-specific deployment considerations, see the . +::: + + + +When you install Upbound with Helm in a proxied environment, please update the specified registry with your internal registry. + + + +```bash +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "authentication.hubIdentities=true" \ + --set "authorization.hubRBAC=true" \ + --set "registry=registry.company.corp/spaces" \ + --set "controlPlanes.uxp.registryOverride=registry.company.corp/xpkg.upbound.io" \ + --set "controlPlanes.uxp.repository=registry.company.corp/spaces" \ + --wait +``` diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/query-api.md b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/query-api.md new file mode 100644 index 000000000..c112e9001 --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/query-api.md @@ -0,0 +1,396 @@ +--- +title: Deploy Query API infrastructure +weight: 130 +description: Query API +aliases: + - /all-spaces/self-hosted-spaces/query-api + - /self-hosted-spaces/query-api + - all-spaces/self-hosted-spaces/query-api +--- + + + + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). Query API has evolved across versions: + +- **Cloud Spaces**: Available since v1.6 (enabled by default) +- **Self-Hosted**: Available since v1.8 (requires manual enablement) + +For details on Query API availability across versions, see the . +::: + +:::important + +This feature is in preview. The Query API is available in the Cloud Space offering in `v1.6` and enabled by default. + +This is a requirement to be able to connect a Space since `v1.8.0`, and is off by default, see below to enable it. + +::: + +Upbound's Query API allows users to inspect objects and resources within their control planes. The read-only `up alpha query` and `up alpha get` CLI commands allow you to gather information on your control planes in a fast and efficient package. These commands follow the [`kubectl` conventions][kubectl-conventions] for filtering, sorting, and retrieving information from your Space. + +Query API requires a PostgreSQL database to store the data. You can use the default PostgreSQL instance provided by Upbound or bring your own PostgreSQL instance. + +## Managed setup + +:::tip +If you don't have specific requirements for your setup, Upbound recommends following this approach. +::: + +To enable this feature, set `features.alpha.apollo.enabled=true` and `apollo.apollo.storage.postgres.create=true` when installing Spaces. + +However, you need to install CloudNativePG (`CNPG`) to provide the PostgreSQL instance. You can let the `up` CLI do this for you, or install it manually. + +For more customization, see the [Helm chart reference][helm-chart-reference]. You can modify the number +of PostgreSQL instances, pooling instances, storage size, and more. + +If you have specific requirements not addressed in the Helm chart, see below for more information on how to bring your own [PostgreSQL setup][postgresql-setup]. + +### Using the up CLI + +Before you begin, make sure you have the most recent version of the [`up` CLI installed][up-cli-installed]. + +To enable this feature, set `features.alpha.apollo.enabled=true` and `apollo.apollo.storage.postgres.create=true` when installing Spaces: + +```bash +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "features.alpha.apollo.enabled=true" \ + --set "apollo.apollo.storage.postgres.create=true" +``` + +`up space init` and `up space upgrade` install CloudNativePG automatically, if needed. + +### Helm chart + +If you are installing the Helm chart in some other way, you can manually install CloudNativePG in one of the [supported ways][supported-ways], for example: + +```shell +kubectl apply --server-side -f \ + https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.24.1/cnpg-1.24.1.yaml +kubectl rollout status -n cnpg-system deployment cnpg-controller-manager -w --timeout 120s +``` + +Next, install the Spaces Helm chart with the necessary values, for example: + +```shell +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + ... + --set "features.alpha.apollo.enabled=true" \ + --set "apollo.apollo.storage.postgres.create=true" \ + --wait +``` + +## Self-hosted PostgreSQL configuration + + +If your workflow requires more customization, you can provide your own +PostgreSQL instance and configure credentials manually. + +Using your own PostgreSQL instance requires careful architecture consideration. +Review the architecture and requirements guidelines. + +### Architecture + +The Query API architecture uses three components, other than a PostgreSQL database: +* **Apollo Syncers**: Watching `ETCD` for changes and syncing them to PostgreSQL. One, or more, per control plane. +* **Apollo Server**: Serving the Query API out of the data in PostgreSQL. One, or more, per Space. + +The default setup also uses the `PgBouncer` connection pooler to manage connections from the syncers. +```mermaid +graph LR + User[User] + + subgraph Cluster["Cluster (Spaces)"] + direction TB + Apollo[apollo] + + subgraph ControlPlanes["Control Planes"] + APIServer[API Server] + Syncer[apollo-syncer] + end + end + + PostgreSQL[(PostgreSQL)] + + User -->|requests| Apollo + + Apollo -->|connects| PostgreSQL + Apollo -->|creates schemas & users| PostgreSQL + + Syncer -->|watches| APIServer + Syncer -->|writes| PostgreSQL + + PostgreSQL -->|data| Apollo + + style PostgreSQL fill:#e1f5ff,stroke:#333,stroke-width:2px,color:#000 + style Apollo fill:#ffe1e1,stroke:#333,stroke-width:2px,color:#000 + style Cluster fill:#f0f0f0,stroke:#333,stroke-width:2px,color:#000 + style ControlPlanes fill:#fff,stroke:#666,stroke-width:1px,stroke-dasharray: 5 5,color:#000 +``` + + +Each component needs to connect to the PostgreSQL database. + +In the event of database issues, you can provide a new database and the syncers +automatically repopulate the data. + +### Requirements + +* A PostgreSQL 16 instance or cluster. +* A database, for example named `upbound`. +* **Optional**: A dedicated user for the Apollo Syncers, otherwise the Spaces Controller generates a dedicated set of credentials per syncer with the necessary permissions, for example named `syncer`. +* A dedicated **superuser or admin account** for the Apollo Server. +* **Optional**: A connection pooler, like PgBouncer, to manage connections from the Apollo Syncers. If you didn't provide the optional users, you might have to configure the pooler to allow users to connect using the same credentials as PostgreSQL. +* **Optional**: A read replica for the Apollo Syncers to connect to, to reduce load on the primary database, this might cause a slight delay in the data being available through the Query API. + +Below you can find examples of setups to get you started, you can mix and match the examples to suit your needs. + +### In-cluster setup + +:::tip + +If you don't have strong opinions on your setup, but still want full control on +the resources created for some unsupported customizations, Upbound recommends +the in-cluster setup. + +::: + +For more customization than the managed setup, you can use CloudNativePG for +PostgreSQL in the same cluster. + +For in-cluster setup, manually deploy the operator in one of the [supported ways][supported-ways-1], for example: + +```shell +kubectl apply --server-side -f \ + https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.24.1/cnpg-1.24.1.yaml +kubectl rollout status -n cnpg-system deployment cnpg-controller-manager -w --timeout 120s +``` + +Then create a `Cluster` and `Pooler` in the `upbound-system` namespace, for example: + +```shell +kubectl create ns upbound-system + +kubectl apply -f - < + +### External setup + + +:::tip + +If you want to run your PostgreSQL instance outside the cluster, but are fine with credentials being managed by the `apollo` user, this is the suggested way to proceed. + +::: + +When using this setup, you must manually create the required Secrets in the +`upbound-system` namespace. The `apollo` user must have permissions to create +schemas and users. + +```shell + +kubectl create ns upbound-system + +# A Secret containing the necessary credentials to connect to the PostgreSQL instance +kubectl create secret generic spaces-apollo-pg-app -n upbound-system \ + --from-literal=password=supersecret + +# A Secret containing the necessary CA certificate to verify the connection to the PostgreSQL instance +kubectl create secret generic spaces-apollo-pg-ca -n upbound-system \ + --from-file=ca.crt=/path/to/ca.crt +``` + +Next, install Spaces with the necessary settings: + +```shell +export PG_URL=your-postgres-host:5432 +export PG_POOLED_URL=your-pgbouncer-host:5432 # this could be the same as above + +helm upgrade --install ... \ + --set "features.alpha.apollo.enabled=true" \ + --set "apollo.apollo.storage.postgres.create=false" \ + --set "apollo.apollo.storage.postgres.connection.url=$PG_URL" \ + --set "apollo.apollo.storage.postgres.connection.credentials.secret.name=spaces-apollo-pg-app" \ + --set "apollo.apollo.storage.postgres.connection.credentials.format=basicauth" \ + --set "apollo.apollo.storage.postgres.connection.ca.name=spaces-apollo-pg-ca" \ + --set "apollo.apollo.storage.postgres.connection.syncer.url=$PG_POOLED_URL" +``` + +### External setup with all custom credentials + +For custom credentials with Apollo Syncers or Server, create a new secret in the +`upbound-system` namespace: + +```shell +export APOLLO_SYNCER_USER=syncer +export APOLLO_SERVER_USER=apollo + +kubectl create ns upbound-system + +# A Secret containing the necessary credentials to connect to the PostgreSQL instance +kubectl create secret generic spaces-apollo-pg-app -n upbound-system \ + --from-literal=password=supersecret + +# A Secret containing the necessary CA certificate to verify the connection to the PostgreSQL instance +kubectl create secret generic spaces-apollo-pg-ca -n upbound-system \ + --from-file=ca.crt=/path/to/ca.crt + +# A Secret containing the necessary credentials for the Apollo Syncers to connect to the PostgreSQL instance. +# These will be used by all Syncers in the Space. +kubectl create secret generic spaces-apollo-pg-syncer -n upbound-system \ + --from-literal=username=$APOLLO_SYNCER_USER \ + --from-literal=password=supersecret + +# A Secret containing the necessary credentials for the Apollo Server to connect to the PostgreSQL instance. +kubectl create secret generic spaces-apollo-pg-apollo -n upbound-system \ + --from-literal=username=$APOLLO_SERVER_USER \ + --from-literal=password=supersecret +``` + +Next, install Spaces with the necessary settings: + +```shell +export PG_URL=your-postgres-host:5432 +export PG_POOLED_URL=your-pgbouncer-host:5432 # this could be the same as above + +helm ... \ + --set "features.alpha.apollo.enabled=true" \ + --set "apollo.apollo.storage.postgres.create=false" \ + --set "apollo.apollo.storage.postgres.connection.url=$PG_URL" \ + --set "apollo.apollo.storage.postgres.connection.credentials.secret.name=spaces-apollo-pg-app" \ + --set "apollo.apollo.storage.postgres.connection.credentials.format=basicauth" \ + --set "apollo.apollo.storage.postgres.connection.ca.name=spaces-apollo-pg-ca" \ + --set "apollo.apollo.storage.postgres.connection.syncer.url=$PG_POOLED_URL" \ + + #. the syncers + --set "apollo.apollo.storage.postgres.connection.syncer.credentials.format=basicauth" \ + --set "apollo.apollo.storage.postgres.connection.syncer.credentials.user=$APOLLO_SYNCER_USER" \ + --set "apollo.apollo.storage.postgres.connection.syncer.credentials.secret.name=spaces-apollo-pg-syncer" \ + + #. the server + --set "apollo.apollo.storage.postgres.connection.apollo.credentials.format=basicauth" \ + --set "apollo.apollo.storage.postgres.connection.apollo.credentials.user=$APOLLO_SERVER_USER" \ + --set "apollo.apollo.storage.postgres.connection.apollo.credentials.secret.name=spaces-apollo-pg-apollo" \ + --set "apollo.apollo.storage.postgres.connection.apollo.url=$PG_POOLED_URL" +``` + + +## Using the Query API + + +See the [Query API documentation][query-api-documentation] for more information on how to use the Query API. + + + + +[postgresql-setup]: #self-hosted-postgresql-configuration +[up-cli-installed]: /manuals/cli/overview +[query-api-documentation]: /spaces/howtos/query-api + +[helm-chart-reference]: /reference/helm-reference +[kubectl-conventions]: https://kubernetes.io/docs/reference/kubectl/generated/kubectl_get/ +[supported-ways]: https://cloudnative-pg.io/documentation/current/installation_upgrade/ +[supported-ways-1]: https://cloudnative-pg.io/documentation/current/installation_upgrade/ +[cloudnativepg-documentation]: https://cloudnative-pg.io/documentation/1.24/storage/#configuration-via-a-pvc-template +[postgresql-cluster]: https://cloudnative-pg.io/documentation/1.24/resource_management/ +[pooler]: https://cloudnative-pg.io/documentation/1.24/connection_pooling/#pod-templates +[postgresql-cluster-2]: https://cloudnative-pg.io/documentation/1.24/replication/ +[pooler-3]: https://cloudnative-pg.io/documentation/1.24/connection_pooling/#high-availability-ha +[postgresql-cluster-4]: https://cloudnative-pg.io/documentation/1.24/operator_capability_levels/#override-of-operand-images-through-the-crd +[pooler-5]: https://cloudnative-pg.io/documentation/1.24/connection_pooling/#pod-templates +[cloudnativepg-documentation-6]: https://cloudnative-pg.io/documentation/1.24/postgresql_conf/ diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/scaling-resources.md b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/scaling-resources.md new file mode 100644 index 000000000..7bb04d2c2 --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/scaling-resources.md @@ -0,0 +1,184 @@ +--- +title: Scaling vCluster and etcd Resources +weight: 950 +description: A guide for scaling vCluster and etcd resources in self-hosted Spaces +aliases: + - /all-spaces/self-hosted-spaces/scaling-resources + - /spaces/scaling-resources +--- + +In large workloads or control plane migration, you may performance impacting +resource constraints. This guide explains how to scale vCluster and `etcd` +resources for optimal performance in your self-hosted Space. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Scaling procedures are consistent across versions. + +For version-specific resource requirements and capacity planning, see the . +::: + +## Signs of resource constraints + +You may need to scale your vCluster or `etcd` resources if you observe: + +- API server timeout errors such as `http: Handler timeout` +- Error messages about `too many requests` and requests to `try again later` +- Operations like provider installation failing with errors like `cannot apply provider package secret` +- vCluster pods experiencing continuous restarts +- API performance degrades with high resource volume + + +## Scaling vCluster resources + + +The vCluster component handles Kubernetes API requests for your control planes. +Deployments with multiple control planes or providers may exceed default resource allocations. + +```yaml +# Default settings +controlPlanes.vcluster.resources.limits.cpu: "3000m" +controlPlanes.vcluster.resources.limits.memory: "3960Mi" +controlPlanes.vcluster.resources.requests.cpu: "170m" +controlPlanes.vcluster.resources.requests.memory: "1320Mi" +``` + +For larger workloads, like migrating from an existing control plane with several +providers, increase these resource limits in your Spaces `values.yaml` file. + +```yaml +controlPlanes: + vcluster: + resources: + limits: + cpu: "4000m" # Increase to 4 cores + memory: "6Gi" # Increase to 6GB memory + requests: + cpu: "500m" # Increase baseline CPU request + memory: "2Gi" # Increase baseline memory request +``` + +## Scaling `etcd` storage + +Kubernetes relies on `etcd` performance, which can lead to IOPS (input/output +operations per second) bottlenecks. Upbound allocates `50Gi` volumes for `etcd` +in cloud environments to ensure adequate IOPS performance. + +```yaml +# Default setting +controlPlanes.etcd.persistence.size: "5Gi" +``` + +For production environments or when migrating large control planes, increase +`etcd` volume size and specify an appropriate storage class: + +```yaml +controlPlanes: + etcd: + persistence: + size: "50Gi" # Recommended for production + storageClassName: "fast-ssd" # Use a high-performance storage class +``` + +### Storage class considerations + +For AWS: +- Use GP3 volumes with adequate IOPS +-. AWS GP3 volumes, IOPS scale with volume size (3000 IOPS baseline) +-. optimal performance, provision at least 32Gi to support up to 16,000 IOPS + +For GCP and Azure: +- Use SSD-based persistent disk types for optimal performance +- Consider premium storage options for high-throughput workloads + +## Scaling Crossplane resources + +Crossplane manages provider resources in your control planes. You may need to increase provider resources for larger deployments: + +```yaml +# Default settings +controlPlanes.uxp.resourcesCrossplane.requests.cpu: "370m" +controlPlanes.uxp.resourcesCrossplane.requests.memory: "400Mi" +``` + + +For environments with many providers or managed resources: + + +```yaml +controlPlanes: + uxp: + resourcesCrossplane: + limits: + cpu: "1000m" # Add CPU limit + memory: "1Gi" # Add memory limit + requests: + cpu: "500m" # Increase CPU request + memory: "512Mi" # Increase memory request +``` + +## High availability configuration + +For production environments, enable High Availability mode to ensure resilience: + +```yaml +controlPlanes: + ha: + enabled: true +``` + +## Best practices for migration scenarios + +When migrating from existing control planes into a self-hosted Space: + +1. **Pre-scale resources**: Scale up resources before performing the migration +2. **Monitor resource usage**: Watch resource consumption during and after migration with `kubectl top pods` +3. **Scale incrementally**: If issues persist, increase resources incrementally until performance stabilizes +4. **Consider storage performance**: `etcd` is sensitive to storage I/O performance + +## Helm values configuration + +Apply these settings through your Spaces Helm values file: + +```yaml +controlPlanes: + vcluster: + resources: + limits: + cpu: "4000m" + memory: "6Gi" + requests: + cpu: "500m" + memory: "2Gi" + etcd: + persistence: + size: "50Gi" + storageClassName: "gp3" # Use your cloud provider's fast storage class + uxp: + resourcesCrossplane: + limits: + cpu: "1000m" + memory: "1Gi" + requests: + cpu: "500m" + memory: "512Mi" + ha: + enabled: true #. production environments +``` + +Apply the configuration using Helm: + +```bash +helm upgrade --install spaces oci://xpkg.upbound.io/spaces-artifacts/spaces \ + -f values.yaml \ + -n upbound-system +``` + +## Considerations + +- **Provider count**: Each provider adds resource overhead - consider using provider families to optimize resource usage +- **Managed resources**: The number of managed resources impacts CPU usage more than memory +- **Vertical pod autoscaling**: Consider using vertical pod autoscaling in Kubernetes to automatically adjust resources based on usage +- **Storage performance**: Storage performance is as important as capacity for etcd +- **Network latency**: Low-latency connections between components improve performance + + diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/self-hosted-spaces-deployment.md b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/self-hosted-spaces-deployment.md new file mode 100644 index 000000000..e549e3939 --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/self-hosted-spaces-deployment.md @@ -0,0 +1,461 @@ +--- +title: Deployment Workflow +sidebar_position: 3 +description: A quickstart guide for Upbound Spaces +tier: "business" +--- +import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; + + + + + +This guide deploys a self-hosted Upbound cluster in AWS. + + + + + +This guide deploys a self-hosted Upbound cluster in Azure. + + + + + +This guide deploys a self-hosted Upbound cluster in GCP. + + + +Disconnected Spaces allows you to host control planes in your preferred environment. + +## Prerequisites + +To get started deploying your own Disconnected Space, you need: + +- An Upbound organization account string, provided by your Upbound account representative +- A `token.json` license, provided by your Upbound account representative + + + +- An AWS account and the AWS CLI + + + + + +- An Azure account and the Azure CLI + + + + + +- An GCP account and the GCP CLI + + + +:::important +Disconnected Spaces are a business critical feature of Upbound and requires a license token to successfully complete the installation. [Contact Upbound][contact-upbound] if you want to try out Upbound with Disconnected Spaces. +::: + +## Provision the hosting environment + +### Create a cluster + + + +Configure the name and target region you want the EKS cluster deployed to. + +```ini +export SPACES_CLUSTER_NAME=upbound-space-quickstart +export SPACES_REGION=us-east-1 +``` + +Provision a 3-node cluster using eksctl. + +```bash +cat < + + + +Configure the name and target region you want the AKS cluster deployed to. + +```ini +export SPACES_RESOURCE_GROUP_NAME=upbound-space-quickstart +export SPACES_CLUSTER_NAME=upbound-space-quickstart +export SPACES_LOCATION=westus +``` + +Provision a new Azure resource group. + +```bash +az group create --name ${SPACES_RESOURCE_GROUP_NAME} --location ${SPACES_LOCATION} +``` + +Provision a 3-node cluster. + +```bash +az aks create -g ${SPACES_RESOURCE_GROUP_NAME} -n ${SPACES_CLUSTER_NAME} \ + --enable-managed-identity \ + --node-count 3 \ + --node-vm-size Standard_D4s_v4 \ + --enable-addons monitoring \ + --enable-msi-auth-for-monitoring \ + --generate-ssh-keys \ + --network-plugin kubenet \ + --network-policy calico +``` + +Get the kubeconfig of your AKS cluster. + +```bash +az aks get-credentials --resource-group ${SPACES_RESOURCE_GROUP_NAME} --name ${SPACES_CLUSTER_NAME} +``` + + + + + +Configure the name and target region you want the GKE cluster deployed to. + +```ini +export SPACES_PROJECT_NAME=upbound-spaces-project +export SPACES_CLUSTER_NAME=upbound-spaces-quickstart +export SPACES_LOCATION=us-west1-a +``` + +Create a new project and set it as the current project. + +```bash +gcloud projects create ${SPACES_PROJECT_NAME} +gcloud config set project ${SPACES_PROJECT_NAME} +``` + +Provision a 3-node cluster. + +```bash +gcloud container clusters create ${SPACES_CLUSTER_NAME} \ + --enable-network-policy \ + --num-nodes=3 \ + --zone=${SPACES_LOCATION} \ + --machine-type=e2-standard-4 +``` + +Get the kubeconfig of your GKE cluster. + +```bash +gcloud container clusters get-credentials ${SPACES_CLUSTER_NAME} --zone=${SPACES_LOCATION} +``` + + + +## Configure the pre-install + +### Set your Upbound organization account details + +Set your Upbound organization account string as an environment variable for use in future steps + +```ini +export UPBOUND_ACCOUNT= +``` + +### Set up pre-install configurations + +Export the path of the license token JSON file provided by your Upbound account representative. + +```ini {copy-lines="2"} +# Change the path to where you saved the token. +export SPACES_TOKEN_PATH="/path/to/token.json" +``` + +Set the version of Spaces software you want to install. + +```ini +export SPACES_VERSION= +``` + +Set the router host and cluster type. The `SPACES_ROUTER_HOST` is the domain name that's used to access the control plane instances. It's used by the ingress controller to route requests. + +```ini +export SPACES_ROUTER_HOST="proxy.upbound-127.0.0.1.nip.io" +``` + +:::important +Make sure to replace the placeholder text in `SPACES_ROUTER_HOST` and provide a real domain that you own. +::: + + +## Install the Spaces software + + +### Install cert-manager + +Install cert-manager. + +```bash +kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.11.0/cert-manager.yaml +kubectl wait deployment -n cert-manager cert-manager-webhook --for condition=Available=True --timeout=360s +``` + + + +### Install ALB Load Balancer + +```bash +helm install aws-load-balancer-controller aws-load-balancer-controller --namespace kube-system \ + --repo https://aws.github.io/eks-charts \ + --set clusterName=${SPACES_CLUSTER_NAME} \ + --set serviceAccount.create=false \ + --set serviceAccount.name=aws-load-balancer-controller \ + --wait +``` + + + +### Install ingress-nginx + +Starting with Spaces v1.10.0, you need to configure the ingress-nginx +controller to allow SSL-passthrough mode. You can do so by passing the +`--enable-ssl-passthrough=true` command-line option to the controller. +The following Helm install command enables this with the `controller.extraArgs` +parameter: + + + +```bash +helm upgrade --install ingress-nginx ingress-nginx \ + --create-namespace --namespace ingress-nginx \ + --repo https://kubernetes.github.io/ingress-nginx \ + --version 4.12.1 \ + --set 'controller.service.type=LoadBalancer' \ + --set 'controller.extraArgs.enable-ssl-passthrough=true' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-type=external' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-scheme=internet-facing' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-nlb-target-type=ip' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-healthcheck-protocol=http' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-healthcheck-path=/healthz' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-healthcheck-port=10254' \ + --wait +``` + + + + + +```bash +helm upgrade --install ingress-nginx ingress-nginx \ + --create-namespace --namespace ingress-nginx \ + --repo https://kubernetes.github.io/ingress-nginx \ + --version 4.12.1 \ + --set 'controller.service.type=LoadBalancer' \ + --set 'controller.extraArgs.enable-ssl-passthrough=true' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/azure-load-balancer-health-probe-request-path=/healthz' \ + --wait +``` + + + + + +```bash +helm upgrade --install ingress-nginx ingress-nginx \ + --create-namespace --namespace ingress-nginx \ + --repo https://kubernetes.github.io/ingress-nginx \ + --version 4.12.1 \ + --set 'controller.service.type=LoadBalancer' \ + --set 'controller.extraArgs.enable-ssl-passthrough=true' \ + --wait +``` + + + +### Install Upbound Spaces software + +Create an image pull secret so that the cluster can pull Upbound Spaces images. + +```bash +kubectl create ns upbound-system +kubectl -n upbound-system create secret docker-registry upbound-pull-secret \ + --docker-server=https://xpkg.upbound.io \ + --docker-username="$(jq -r .accessId $SPACES_TOKEN_PATH)" \ + --docker-password="$(jq -r .token $SPACES_TOKEN_PATH)" +``` + +Log in with Helm to be able to pull chart images for the installation commands. + +```bash +jq -r .token $SPACES_TOKEN_PATH | helm registry login xpkg.upbound.io -u $(jq -r .accessId $SPACES_TOKEN_PATH) --password-stdin +``` + +Install the Spaces software. + +```bash +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "authentication.hubIdentities=true" \ + --set "authorization.hubRBAC=true" \ + --wait +``` + +### Create a DNS record + +:::important +If you chose to create a public ingress, you also need to create a DNS record for the load balancer of the public facing ingress. Do this before you create your first control plane. +::: + +Create a DNS record for the load balancer of the public facing ingress. To get the address for the Ingress, run the following: + + + +```bash +kubectl get ingress \ + -n upbound-system mxe-router-ingress \ + -o jsonpath='{.status.loadBalancer.ingress[0].hostname}' +``` + + + + + +```bash +kubectl get ingress \ + -n upbound-system mxe-router-ingress \ + -o jsonpath='{.status.loadBalancer.ingress[0].ip}' +``` + + + + + +```bash +kubectl get ingress \ + -n upbound-system mxe-router-ingress \ + -o jsonpath='{.status.loadBalancer.ingress[0].ip}' +``` + + + +If the preceding command doesn't return a load balancer address then your provider may not have allocated it yet. Once it's available, add a DNS record for the `ROUTER_HOST` to point to the given load balancer address. If it's an IPv4 address, add an A record. If it's a domain name, add a CNAME record. + +## Configure the up CLI + +With your kubeconfig pointed at the Kubernetes cluster where you installed +Upbound Spaces, create a new profile in the `up` CLI. This profile interacts +with your Space: + +```bash +up profile create --use ${SPACES_CLUSTER_NAME} --type=disconnected --organization ${UPBOUND_ACCOUNT} +``` + +Optionally, log in to your Upbound account using the new profile so you can use the Upbound Marketplace with this profile as well: + +```bash +up login +``` + + +## Connect to your Space + + +Use `up ctx` to create a kubeconfig context pointed at your new Space: + +```bash +up ctx disconnected/$(kubectl config current-context) +``` + +## Create your first control plane + +You can now create a control plane with the `up` CLI: + +```bash +up ctp create ctp1 +``` + +You can also create a control plane with kubectl: + +```yaml +cat < +```yaml +observability: + spacesCollector: + env: + - name: API_KEY + valueFrom: + secretKeyRef: + name: my-secret + key: api-key + config: + exporters: + otlphttp: + endpoint: "" + headers: + api-key: ${env:API_KEY} + exportPipeline: + logs: + - otlphttp + metrics: + - otlphttp + traces: + - otlphttp +``` + + +You can export metrics, logs, and traces from your Crossplane installation, Spaces +infrastructure (controller, API, router, etc.), provider-helm, and +provider-kubernetes. + +### Router metrics + +The Spaces router component uses Envoy as a reverse proxy and exposes detailed +metrics about request handling, circuit breakers, and connection pooling. +Upbound collects these metrics in your Space after you enable Space-level +observability. + +Envoy metrics in Upbound include: + +- **Upstream cluster metrics** - Request status codes, timeouts, retries, and latency for traffic to control planes and services +- **Circuit breaker metrics** - Connection and request circuit breaker state for both `DEFAULT` and `HIGH` priority levels +- **Downstream listener metrics** - Client connections and requests received +- **HTTP connection manager metrics** - End-to-end HTTP request processing and latency + +For a complete list of available router metrics and example PromQL queries, see the [Router metrics reference][router-ref]. + +### Router tracing + +The Spaces router generates distributed traces through OpenTelemetry integration, +providing end-to-end visibility into request flow across the system. Use these +traces to debug latency issues, understand request paths, and correlate errors +across services. + +The router uses: + +- **Protocol**: OTLP (OpenTelemetry Protocol) over gRPC +- **Service name**: `spaces-router` +- **Transport**: TLS-encrypted connection to telemetry collector + +#### Trace configuration + +Enable tracing and configure the sampling rate with the following Helm values: + +```yaml +observability: + enabled: true + tracing: + enabled: true + sampling: + rate: 0.1 # Sample 10% of new traces (0.0-1.0) +``` + +The sampling behavior depends on whether a parent trace context exists: + +- **With parent context**: If a `traceparent` header is present, the parent's + sampling decision is respected, enabling proper distributed tracing across services. +- **Root spans**:. new traces without a parent, Envoy samples based on + `x-request-id` hashing. The default sampling rate is 10%. + +#### TLS configuration for external collectors + +To send traces to an external OTLP collector, configure the endpoint and TLS settings: + +```yaml +observability: + enabled: true + tracing: + enabled: true + endpoint: "otlp-gateway.example.com" + port: 443 + tls: + caBundleSecretRef: "custom-ca-secret" +``` + +If `caBundleSecretRef` is set, the router uses the CA bundle from the referenced +Kubernetes secret. The secret must contain a key named `ca.crt` with the +PEM-encoded CA bundle. If not set, the router uses the Spaces CA for the +in-cluster collector. + +#### Custom trace tags + +The router adds custom tags to every span to enable filtering and grouping by +control plane: + +| Tag | Source | Description | +|-----|--------|-------------| +| `controlplane.id` | `x-upbound-mxp-id` header | Control plane UUID | +| `controlplane.name` | `x-upbound-mxp-host` header | Internal vcluster hostname | +| `hostcluster.id` | `x-upbound-hostcluster-id` header | Host cluster identifier | + +These tags enable queries like "show all slow requests to control plane X" or +"find errors for control planes in host cluster Y." + +#### Example trace + +The following example shows the attributes from a successful GET request: + +```text +Span: ingress +├─ Service: spaces-router +├─ Duration: 8.025ms +├─ Attributes: +│ ├─ http.method: GET +│ ├─ http.status_code: 200 +│ ├─ upstream_cluster: ctp-b2b37aaa-ee55-492c-ba0c-4d561a6325fa-api-cluster +│ ├─ controlplane.id: b2b37aaa-ee55-492c-ba0c-4d561a6325fa +│ ├─ controlplane.name: vcluster.mxp-b2b37aaa-ee55-492c-ba0c-4d561a6325fa-system +│ └─ response_size: 1827 +``` + +## Available metrics + +Space-level observability collects metrics from multiple infrastructure components: + +### Infrastructure component metrics + +- Crossplane controller metrics +- Spaces controller, API, and router metrics +- Provider metrics (provider-helm, provider-kubernetes) + +### Router metrics + +The router component exposes Envoy proxy metrics for monitoring traffic flow and +service health. Key metric categories include: + +- `envoy_cluster_upstream_rq_*` - Upstream request metrics (status codes, timeouts, retries, latency) +- `envoy_cluster_circuit_breakers_*` - Circuit breaker state and capacity +- `envoy_listener_downstream_*` - Client connection and request metrics +- `envoy_http_downstream_*` - HTTP request processing metrics + +Example query to monitor total request rate: + +```promql +sum(rate(envoy_cluster_upstream_rq_total{job="spaces-router-envoy"}[5m])) +``` + +Example query for P95 latency: + +```promql +histogram_quantile( + 0.95, + sum by (le) ( + rate(envoy_cluster_upstream_rq_time_bucket{job="spaces-router-envoy"}[5m]) + ) +) +``` + +For detailed router metrics documentation and more query examples, see the [Router metrics reference][router-ref]. + + +## OpenTelemetryCollector image + + +Control plane (`SharedTelemetry`) and Space observability deploy the same custom +OpenTelemetry Collector image. The OpenTelemetry Collector image supports +`otlhttp`, `datadog`, and `debug` exporters. + +For more information on observability configuration, review the [Helm chart reference][helm-chart-reference]. + +## Observability in control planes + +Read the [observability documentation][observability-documentation] to learn +about the features Upbound offers for collecting telemetry from control planes. + + +## Router metrics reference {#router-ref} + +To avoid overwhelming observability tools with hundreds of Envoy metrics, an +allow-list filters metrics to only the following metric families. + +### Upstream cluster metrics + +Metrics tracking requests sent from Envoy to configured upstream clusters. +Individual control planes, spaces-api, and other services are each considered +an upstream cluster. Use these metrics to monitor service health, identify +upstream errors, and measure backend latency. + +| Metric | Description | +|--------|-------------| +| `envoy_cluster_upstream_rq_xx_total` | HTTP status codes (2xx, 3xx, 4xx, 5xx) with label `envoy_response_code_class` | +| `envoy_cluster_upstream_rq_timeout_total` | Requests that timed out waiting for upstream | +| `envoy_cluster_upstream_rq_retry_limit_exceeded_total` | Requests that exhausted retry attempts | +| `envoy_cluster_upstream_rq_total` | Total upstream requests | +| `envoy_cluster_upstream_rq_time_bucket` | Latency histogram (for P50/P95/P99 calculations) | +| `envoy_cluster_upstream_rq_time_sum` | Sum of request durations | +| `envoy_cluster_upstream_rq_time_count` | Count of requests | + +### Circuit breaker metrics + + + +Metrics tracking circuit breaker state and remaining capacity. Circuit breakers +prevent cascading failures by limiting connections and concurrent requests to +unhealthy upstreams. Two priority levels exist: `DEFAULT` for watch requests and +`HIGH` for API requests. + + +| Name | Description | +|--------|-------------| +| `envoy_cluster_circuit_breakers_default_cx_open` | `DEFAULT` priority connection circuit breaker open (gauge) | +| `envoy_cluster_circuit_breakers_default_rq_open` | `DEFAULT` priority request circuit breaker open (gauge) | +| `envoy_cluster_circuit_breakers_default_remaining_cx` | Available `DEFAULT` priority connections (gauge) | +| `envoy_cluster_circuit_breakers_default_remaining_rq` | Available `DEFAULT` priority request slots (gauge) | +| `envoy_cluster_circuit_breakers_high_cx_open` | `HIGH` priority connection circuit breaker open (gauge) | +| `envoy_cluster_circuit_breakers_high_rq_open` | `HIGH` priority request circuit breaker open (gauge) | +| `envoy_cluster_circuit_breakers_high_remaining_cx` | Available `HIGH` priority connections (gauge) | +| `envoy_cluster_circuit_breakers_high_remaining_rq` | Available `HIGH` priority request slots (gauge) | + +### Downstream listener metrics + +Metrics tracking requests received from clients such as kubectl and API consumers. +Use these metrics to monitor client connection patterns, overall request volume, +and responses sent to external users. + +| Name | Description | +|--------|-------------| +| `envoy_listener_downstream_rq_xx_total` | HTTP status codes for responses sent to clients | +| `envoy_listener_downstream_rq_total` | Total requests received from clients | +| `envoy_listener_downstream_cx_total` | Total connections from clients | +| `envoy_listener_downstream_cx_active` | Currently active client connections (gauge) | + + + +### HTTP connection manager metrics + + +Metrics from Envoy's HTTP connection manager tracking end-to-end request +processing. These metrics provide a comprehensive view of the HTTP request +lifecycle including status codes and client-perceived latency. + +| Name | Description | +|--------|-------------| +| `envoy_http_downstream_rq_xx` | HTTP status codes (note: no `_total` suffix for this metric family) | +| `envoy_http_downstream_rq_total` | Total HTTP requests received | +| `envoy_http_downstream_rq_time_bucket` | Downstream request latency histogram | +| `envoy_http_downstream_rq_time_sum` | Sum of downstream request durations | +| `envoy_http_downstream_rq_time_count` | Count of downstream requests | + +[router-ref]: #router-ref +[observability-documentation]: /spaces/howtos/observability +[opentelemetry-collector]: https://opentelemetry.io/docs/collector/ +[opentelemetry-operator]: https://opentelemetry.io/docs/kubernetes/operator/ +[helm-chart-reference]: /reference/helm-reference diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/spaces-management.md b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/spaces-management.md new file mode 100644 index 000000000..3df61c306 --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/spaces-management.md @@ -0,0 +1,219 @@ +--- +title: Interacting with Disconnected Spaces +sidebar_position: 10 +description: Common operations in Spaces +--- + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Spaces management procedures are consistent across versions. + +For version compatibility details, see the . +::: + +## Spaces management + +### Create a Space + +To install an Upbound Space into a cluster, it's recommended you dedicate an entire Kubernetes cluster for the Space. You can use [up space init][up-space-init] to install an Upbound Space. Below is an example: + +```bash +up space init "v1.9.0" +``` +:::tip +For a full guide to get started with Spaces, read the [quickstart][quickstart] guide: +::: + +You can also install the helm chart for Spaces directly. In order for a Spaces install to succeed, you must install some prerequisites first and configure them. This includes: + +- UXP +- provider-helm and provider-kubernetes +- cert-manager + +Furthermore, the Spaces chart requires a pull secret, which Upbound must provide to you. + +```bash +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "v1.9.0" \ + --set "ingress.host=your-host.com" \ + --set "clusterType=eks" \ + --set "account=your-upbound-account" \ + --wait +``` +For a complete tutorial of the helm install, read one of the deployment guides for [AWS][aws], [Azure][azure] , or [GCP][gcp] which cover the step-by-step process. + +### Upgrade a Space + +To upgrade a Space from one version to the next, use [up space upgrade][up-space-upgrade]. Spaces supports upgrading from version `ver x.N.*` to version `ver x.N+1.*`. + +```bash +up space upgrade "v1.9.0" +``` + +You can also upgrade a Space by manually bumping the Helm chart version. Before +upgrading, review the release notes for any breaking changes or +special requirements: + +1. Review the release notes for the target version in the [Spaces Release Notes][spaces-release-notes] +2. Upgrade the Space by updating the helm chart version: + +```bash +helm -n upbound-system upgrade spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "v1.9.0" \ + --reuse-values \ + --wait +``` + +For major version upgrades or configuration changes, extract your current values +and adjust: + +```bash +# Extract current values to a file +helm -n upbound-system get values spaces > spaces-values.yaml + +# Upgrade with modified values +helm -n upbound-system upgrade spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "v1.9.0" \ + -f spaces-values.yaml \ + --wait +``` + +### Downgrade a Space + +To rollback a Space from one version to the previous, use [up space upgrade][up-space-upgrade-1]. Spaces supports downgrading from version `ver x.N.*` to version `ver x.N-1.*`. + +```bash +up space upgrade --rollback +``` + +You can also downgrade a Space manually using Helm by specifying an earlier version: + +```bash +helm -n upbound-system upgrade spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "v1.8.0" \ + --reuse-values \ + --wait +``` + +When downgrading, make sure to: +1. Check the [release notes][release-notes] for specific downgrade instructions +2. Verify compatibility between the downgraded Space and any control planes +3. Back up any critical data before proceeding + +### Uninstall a Space + +To uninstall a Space from a Kubernetes cluster, use [up space destroy][up-space-destroy]. A destroy operation uninstalls core components and orphans control planes and their associated resources. + +```bash +up space destroy +``` + +## Control plane management + +You can manage control planes in a Space via the [up CLI][up-cli] or the Spaces-local Kubernetes API. When you install a Space, it defines new a API type, `kind: Controlplane`, that you can use to create and manage control planes in the Space. + +### Create a control plane + +To create a control plane in a Space using `up`, run the following: + +```bash +up ctp create ctp1 +``` + +You can also declare a new control plane like the example below and apply it to your Spaces cluster: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: ctp1 + namespace: default +spec: + writeConnectionSecretToRef: + name: kubeconfig-ctp1 + namespace: default +``` + +This manifest: + +- Creates a new control plane in the space called `ctp1`. +- Publishes the kubeconfig to connect to the control plane to a secret in the Spaces cluster, called `kubeconfig-ctp1` + +### Connect to a control plane + +To connect to a control plane in a Space using `up`, run the following: + +```bash +up ctp connect new-control-plane +``` + +The command changes your kubeconfig's current context to the control plane you specify. If you want to change your kubeconfig back to a previous context, run: + +```bash +up ctp disconnect +``` + +If you configured your control plane to publish connection details, you can also access it this way. Once the control plane is ready, use the secret (containing connection details) to connect to the API server of your control plane. + +```bash +kubectl get secret -n default -o jsonpath='{.data.kubeconfig}' | base64 -d > /tmp/.yaml +``` + +Reference the kubeconfig whenever you want to interact directly with the API server of the control plane (vs the Space's API server): + +```bash +kubectl get providers --kubeconfig=/tmp/.yaml +``` + +### Configure a control plane + +Spaces offers a built-in feature that allows you to connect a control plane to a Git source. This experience is like when a control plane runs in [Upbound's SaaS environment][upbound-s-saas-environment]. Upbound recommends using the built-in Git integration to drive configuration of your control planes in a Space. + +Learn more in the [Spaces Git integration][spaces-git-integration] documentation. + +### List control planes + +To list all control planes in a Space using `up`, run the following: + +```bash +up ctp list +``` + +Or you can use Kubernetes-style semantics to list the control plane: + +```bash +kubectl get controlplanes +``` + + +### Delete a control plane + +To delete a control plane in a Space using `up`, run the following: + +```bash +up ctp delete ctp1 +``` + +Or you can use Kubernetes-style semantics to delete the control plane: + +```bash +kubectl delete controlplane ctp1 +``` + + +[up-space-init]: /reference/cli-reference +[quickstart]: / +[aws]: /spaces/howtos/self-hosted/self-hosted-spaces-deployment +[azure]:/spaces/howtos/self-hosted/self-hosted-spaces-deployment +[gcp]:/spaces/howtos/self-hosted/self-hosted-spaces-deployment +[up-space-upgrade]: /reference/cli-reference +[spaces-release-notes]: /reference/release-notes/spaces +[up-space-upgrade-1]: /reference/cli-reference +[release-notes]: /reference/release-notes/spaces +[up-space-destroy]: /reference/cli-reference +[up-cli]: /reference/cli-reference +[upbound-s-saas-environment]: /spaces/howtos/self-hosted/spaces-management +[spaces-git-integration]: /spaces/howtos/self-hosted/gitops diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/troubleshooting.md b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/troubleshooting.md new file mode 100644 index 000000000..8d1ca6517 --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/troubleshooting.md @@ -0,0 +1,132 @@ +--- +title: Troubleshooting +sidebar_position: 100 +description: A guide for troubleshooting an issue that occurs in a Space +--- + +Find guidance below on how to find solutions for issues you encounter when deploying and using an Upbound Space. Use the tips below as a supplement to the observability metrics discussed in the [Observability][observability] page. + +## General tips + +Most issues fall into two general categories: + +1. issues with the Spaces management plane +2. issues on a control plane + +If your control plane doesn't reach a `Ready` state, it's indicative of the former. If your control plane is in a created and running state, but resources aren't reconciling, it's indicative of the latter. + +### Spaces component layout + +Run `kubectl get pods -A` against the cluster hosting a Space. You should see a variety of pods across several namespaces. It should look something like this: + +```bash +NAMESPACE NAME READY STATUS RESTARTS AGE +cert-manager cert-manager-6d6769565c-mc5df 1/1 Running 0 25m +cert-manager cert-manager-cainjector-744bb89575-nw4fg 1/1 Running 0 25m +cert-manager cert-manager-webhook-759d6dcbf7-ps4mq 1/1 Running 0 25m +ingress-nginx ingress-nginx-controller-7f8ccfccc6-6szlp 1/1 Running 0 25m +kube-system coredns-5d78c9869d-4p477 1/1 Running 0 26m +kube-system coredns-5d78c9869d-pdxt6 1/1 Running 0 26m +kube-system etcd-kind-control-plane 1/1 Running 0 26m +kube-system kindnet-8s7pq 1/1 Running 0 26m +kube-system kube-apiserver-kind-control-plane 1/1 Running 0 26m +kube-system kube-controller-manager-kind-control-plane 1/1 Running 0 26m +kube-system kube-proxy-l68r8 1/1 Running 0 26m +kube-system kube-scheduler-kind-control-plane 1/1 Running 0 26m +local-path-storage local-path-provisioner-6bc4bddd6b-qsdjt 1/1 Running 0 26m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system coredns-5dc69d6447-f56rh-x-kube-system-x-vcluster 1/1 Running 0 21m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system crossplane-6b6d67bc66-6b8nx-x-upbound-system-x-vcluster 1/1 Running 0 20m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system crossplane-rbac-manager-78f6fc7cb4-pjkhc-x-upbound-s-12253c3c4e 1/1 Running 0 20m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system kube-state-metrics-7f8f4dcc5b-8p8c4 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system mxp-gateway-68f546b9c8-xnz5j-x-upbound-system-x-vcluster 1/1 Running 0 20m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system mxp-ksm-config-54655667bb-hv9br 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system mxp-readyz-5f7f97d967-b98bw 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system otlp-collector-56d7d46c8d-g5sh5-x-upbound-system-x-vcluster 1/1 Running 0 20m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-67c9fb8959-ppb2m 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-api-6bfbccc49d-ffgpj 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-controller-7cc6855656-8c46b 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-etcd-0 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vector-754b494b84-wljw4 1/1 Running 0 22m +mxp-system mxp-charts-chartmuseum-7587f77558-8tltb 1/1 Running 0 23m +upbound-system crossplane-b4dc7b4c9-6hjh5 1/1 Running 0 25m +upbound-system crossplane-contrib-provider-helm-ce18dd03e6e4-7945d8985-4gcwr 1/1 Running 0 24m +upbound-system crossplane-contrib-provider-kubernetes-1f1e32c1957d-577756gs2x4 1/1 Running 0 24m +upbound-system crossplane-rbac-manager-d8cb49cbc-gbvvf 1/1 Running 0 25m +upbound-system spaces-controller-6647677cf9-5zl5q 1/1 Running 0 24m +upbound-system spaces-router-bc78c96d7-kzts2 2/2 Running 0 24m +``` + +What you are seeing is: + +- Pods in the `upbound-system` namespace are components required to run the management plane of the Space. This includes the `spaces-controller`, `spaces-router`, and install of UXP. +- Pods in the `mxp-{GUID}-system` namespace are components that collectively power a control plane. Notable call outs include pod names that look like `vcluster-api-{GUID}` and `vcluster-controller-{GUID}`, which are integral components of a control plane. +- Pods in other notable namespaces, including `cert-manager` and `ingress-nginx`, are prerequisite components that support a Space's successful operation. + + + +### Troubleshooting tips for the Spaces management plane + +Start by getting the status of all the pods in a Space: + +1. Make sure the current context of your kubeconfig points at the Kubernetes cluster hosting your Space +2. Get the status of all the pods in the Space: +```bash +kubectl get pods -A +``` +3. Scan the `Status` column to see if any of the pods report a status besides `Running`. +4. Scan the `Restarts` column to see if any of the pods have restarted. +5. If you notice a Status other than `Running` or see pods that restarted, you should investigate their events by running +```bash +kubectl describe pod -n +``` + +Next, inspect the status of objects and releases: + +1. Make sure the current context of your kubeconfig points at the Kubernetes cluster hosting your Space +2. Inspect the objects in your Space. If any are unhealthy, describe those objects to get the events: +```bash +kubectl get objects +``` +3. Inspect the releases in your Space. If any are unhealthy, describe those releases to get the events: +```bash +kubectl get releases +``` + +### Troubleshooting tips for control planes in a Space + +General troubleshooting in a control plane starts by fetching the events of the control plane: + +1. Make sure the current context of your kubeconfig points at the Kubernetes cluster hosting your Space +2. Run the following to fetch your control planes. +```bash +kubectl get ctp +``` +3. Describe the control plane by providing its name, found in the preceding instruction. +```bash +kubectl describe controlplanes.spaces.upbound.io +``` + +## Issues + + +### Your control plane is stuck in a 'creating' state + +#### Error: unknown field "ports" in io.k8s.api.networking.v1.NetworkPolicySpec + +This error is emitted by a Helm release named `control-plane-host-policies` attempting to be installed by the Spaces software. The full error is: + +_CannotCreateExternalResource failed to install release: unable to build kubernetes objects from release manifest: error validating "": error validating data: ValidationError(NetworkPolicy.spec): unknown field "ports" in io.k8s.api.networking.v1.NetworkPolicySpec_ + +This error may be caused by running a Space on an earlier version of Kubernetes than is supported (`v1.26 or later`). To resolve this issue, upgrade the host Kubernetes cluster version to 1.25 or later. + +### Your Spaces install fails + +#### Error: You tried to install a Space on a previous Crossplane installation + +If you try to install a Space on an existing cluster that previously had Crossplane or UXP on it, you may encounter errors. Due to how the Spaces installer tests for the presence of UXP, it may detect orphaned CRDs that weren't cleaned up by the previous uninstall of Crossplane. You may need to manually [remove old Crossplane CRDs][remove-old-crossplane-crds] for the installer to properly detect the UXP prerequisite. + + + + +[observability]: /spaces/howtos/observability +[remove-old-crossplane-crds]: https://docs.crossplane.io/latest/guides/uninstall-crossplane/ diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/use-argo.md b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/use-argo.md new file mode 100644 index 000000000..d58f7db44 --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/use-argo.md @@ -0,0 +1,228 @@ +--- +title: Use ArgoCD Plugin +sidebar_position: 15 +description: A guide for integrating Argo with control planes in a Space. +aliases: + - /all-spaces/self-hosted-spaces/use-argo + - /deploy/disconnected-spaces/use-argo-flux + - /all-spaces/self-hosted-spaces/use-argo-flux + - /connect/use-argo +--- + + +:::info API Version Information +This guide covers the ArgoCD Plugin, a preview feature available in all supported versions (v1.9-v1.14+) for self-hosted deployments. + +For details on GitOps patterns and related features across versions, see [Automation and GitOps Overview](/spaces/howtos/automation-and-gitops/overview/). +::: + +:::important +This feature is in preview and is off by default. To enable, set `features.alpha.argocdPlugin.enabled=true` when installing Spaces: + +```bash +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "features.alpha.argocdPlugin.enabled=true" +``` +::: + +Spaces provides an optional plugin to assist with integrating a control plane in a Space with Argo CD. You must enable the plugin for the entire Space at Spaces install or upgrade time. The plugin's job is to propagate the connection details of each control plane in a Space to Argo CD. By default, Upbound stores these connection details in a Kubernetes secret named after the control plane. To run Argo CD across multiple namespaces, Upbound recommends enabling the `features.alpha.argocdPlugin.useUIDFormatForCTPSecrets` flag to use a UID-based format for secret names to avoid conflicts. + +:::tip +For general guidance on integrating Upbound with GitOps flows, see [GitOps with Control Planes][gitops-with-control-planes]. +::: + +## On cluster Argo CD + +If you are running Argo CD on the same cluster as the Space, run the following to enable the plugin: + + + + + + +```bash {hl_lines="3-4"} +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "features.alpha.argocdPlugin.enabled=true" \ + --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ + --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" +``` + + + + + +```bash {hl_lines="7-8"} +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "features.alpha.argocdPlugin.enabled=true" \ + --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ + --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ + --wait +``` + + + + + + +The important flags are: + +- `features.alpha.argocdPlugin.enabled=true` +- `features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true` +- `features.alpha.argocdPlugin.target.secretNamespace=argocd` + +The first flag enables the feature and the second indicates the namespace on the cluster where you installed Argo CD. + +Be sure to [configure Argo][configure-argo] after it's installed. + +## External cluster Argo CD + +If you are running Argo CD on an external cluster from where you installed your Space, you need to provide some extra flags: + + + + + + +```bash {hl_lines="3-7"} +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "features.alpha.argocdPlugin.enabled=true" \ + --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ + --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ + --set "features.alpha.argocdPlugin.target.externalCluster.enabled=true" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig" +``` + + + + + +```bash {hl_lines="7-11"} +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "features.alpha.argocdPlugin.enabled=true" \ + --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ + --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ + --set "features.alpha.argocdPlugin.target.externalCluster.enabled=true" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig" \ + --wait +``` + + + + + +```bash +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "features.alpha.argocdPlugin.enabled=true" \ + --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ + --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ + --set "features.alpha.argocdPlugin.target.externalCluster.enabled=true" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig" \ + --wait +``` + +The extra flags are: + +- `features.alpha.argocdPlugin.target.externalCluster.enabled=true` +- `features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true` +- `features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster` +- `features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig` + +These flags tell the plugin (running in Spaces) where your Argo CD instance is. After you've done this at install-time, you also need to create a `Secret` on the Spaces cluster. This secret must contain a kubeconfig pointing to your Argo CD instance. The secret needs to be in the same namespace as the `spaces-controller`, which is `upbound-system`. + +Once you enable the plugin and configure it, the plugin automatically propagates connection details for your control planes to your Argo CD instance. You can then target the control plane and use Argo to sync Crossplane-related objects to it. + +Be sure to [configure Argo][configure-argo-1] after it's installed. + +## Configure Argo + +Argo's default configuration causes it to try to query for resource kinds that don't exist in control planes. You should configure Argo's [general configmap][general-configmap] to include the resource group/kinds which make sense in the context of control planes. example, the concept of `nodes` isn't exposed in control planes. + +To configure Argo CD, connect to the cluster where you've installed it and edit the configmap: + +```bash +kubectl edit configmap argocd-cm -n argocd +``` + +Adjust the resource inclusions and exclusions under the `data` field of the configmap: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-cm + namespace: argocd +data: + resource.exclusions: | + - apiGroups: + - "*" + kinds: + - "*" + clusters: + - "*" + resource.inclusions: | + - apiGroups: + - "*" + kinds: + - Provider + - Configuration + clusters: + - "*" +``` + +The preceding configuration causes Argo to exclude syncing **all** resource group/kinds--except Crossplane `providers` and `configurations`--for **all** control planes. You're encouraged to adjust the `resource.inclusions` to include the types that make sense for your control plane, such as an `XRD` you've built with Crossplane. You're also encouraged to customize the `clusters` pattern to selectively apply these exclusions/inclusions to control planes (for example, `control-plane-prod-*`). + +## Control plane connection secrets + +To deploy control planes through Argo CD, you need to configure the `writeConnectionSecretToRef` field in your control plane spec. This field specifies where to store the control plane's `kubeconfig` and makes connection details available to Argo CD. + +### Basic Configuration + +In your control plane manifest, include the `writeConnectionSecretToRef` field: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: my-control-plane + namespace: my-control-plane-group +spec: + writeConnectionSecretToRef: + name: kubeconfig-my-control-plane + namespace: my-control-plane-group + # ... other control plane configuration +``` + +### Parameters + +The `writeConnectionSecretToRef` field requires two parameters: + +- `name`: A unique name for the secret containing the kubeconfig (`kubeconfig-my-control-plane`) +- `namespace`: The Kubernetes namespace where you store the secret, which must match the metadata namespace. The system copies it into the `argocd` namespace when you set the `features.alpha.argocdPlugin.target.secretNamespace=argocd` configuration parameter. + +Control plane labels automatically propagate to the connection secret, which allows you to use label selectors in Argo CD for automated discovery and management. + +This configuration enables Argo CD to automatically discover and manage resources on your control planes. + + +[gitops-with-control-planes]: /spaces/howtos/cloud-spaces/gitops +[configure-argo]: #configure-argo +[configure-argo-1]: #configure-argo +[general-configmap]: https://argo-cd.readthedocs.io/en/stable/operator-manual/argocd-cm-yaml/ diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/workload-id/_category_.json b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/workload-id/_category_.json new file mode 100644 index 000000000..c5ecc93f6 --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/workload-id/_category_.json @@ -0,0 +1,11 @@ +{ + "label": "Workload Identity Configuration", + "position": 2, + "collapsed": true, + "customProps": { + "plan": "business" + } + +} + + diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/workload-id/backup-restore-config.md b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/workload-id/backup-restore-config.md new file mode 100644 index 000000000..935ca69ec --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/workload-id/backup-restore-config.md @@ -0,0 +1,384 @@ +--- +title: Backup and Restore Workload ID +weight: 1 +description: Configure workload identity for Spaces Backup and Restore +--- +import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; + + + + + + + +Workload-identity authentication lets you use access policies to grant temporary +AWS credentials to your Kubernetes pod with a service account. Assigning IAM roles and service accounts allows the pod to assume the IAM role dynamically and much more securely than static credentials. + +This guide walks you through creating an IAM trust role policy and applying it +to your EKS cluster to handle backup and restore storage. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary Azure credentials to your Kubernetes pod based on +a service account. Assigning managed identities and service accounts allows the pod to +authenticate with Azure resources dynamically and much more securely than static credentials. + +This guide walks you through creating a managed identity and federated credential for your AKS +cluster to handle backup and restore storage. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary GCP credentials to your Kubernetes pod based on +a service account. Assigning IAM roles and service accounts allows the pod to +access cloud resources dynamically and much more securely than static credentials. + +This guide walks you through configuring workload identity for your GKE +cluster to handle backup and restore storage. + + + +## Prerequisites + + +To set up a workload-identity, you'll need: + + +- A self-hosted Space cluster +- Administrator access in your cloud provider +- Helm and `kubectl` + +## About the backup and restore component + +The `mxp-controller` component handles backup and restore workloads. It needs to +access your cloud storage to store and retrieve backups. By default, this +component runs in each control plane's host namespace. + +## Configuration + + + +Upbound supports workload-identity configurations in AWS with IAM Roles for +Service Accounts and EKS pod identity association. + +#### IAM Roles for Service Accounts (IRSA) + +With IRSA, you can associate a Kubernetes service account in an EKS cluster with +an AWS IAM role. Upbound authenticates workloads with that service account as +the IAM role using temporary credentials instead of static role credentials. +IRSA relies on AWS `AssumeRoleWithWebIdentity` `STS` to exchange OIDC ID tokens with +the IAM role's temporary credentials. IRSA uses the `eks.amazon.aws/role-arn` +annotation to link the service account and the IAM role. + +First, create an IAM role with appropriate permissions to access your S3 bucket: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:ListBucket", + "s3:DeleteObject" + ], + "Resource": [ + "arn:aws:s3:::${YOUR_BACKUP_BUCKET}", + "arn:aws:s3:::${YOUR_BACKUP_BUCKET}/*" + ] + } + ] +} +``` + +Next, ensure your EKS cluster has an OIDC identity provider: + +```shell +eksctl utils associate-iam-oidc-provider --cluster ${YOUR_CLUSTER_NAME} --approve +``` + +Configure the IAM role trust policy with the namespace for each +provisioned control plane. + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:oidc-provider/${YOUR_OIDC_PROVIDER}" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + "${YOUR_OIDC_PROVIDER}:aud": "sts.amazonaws.com", + "${YOUR_OIDC_PROVIDER}:sub": "system:serviceaccount:${YOUR_NAMESPACE}:mxp-controller" + } + } + } + ] +} +``` + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the Backup and Restore component: + +```shell +--set controlPlanes.mxpController.serviceAccount.annotations."eks\.amazonaws\.com/role-arn"="${SPACES_BR_IAM_ROLE_ARN}" +``` + +This command allows the backup and restore component to authenticate with your +dedicated IAM role in your EKS cluster environment. + +#### EKS pod identities + +Upbound also supports EKS Pod Identity configuration. EKS Pod Identities allow +you to create a pod identity association with your Kubernetes namespace, a +service account, and an IAM role, which allows the EKS control plane to +automatically handle the credential exchange. + +First, create an IAM role with appropriate permissions to access your S3 bucket: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:ListBucket", + "s3:DeleteObject" + ], + "Resource": [ + "arn:aws:s3:::${YOUR_BACKUP_BUCKET}", + "arn:aws:s3:::${YOUR_BACKUP_BUCKET}/*" + ] + } + ] +} +``` + +When you install or upgrade your Space with Helm, add the backup/restore values: + +```shell +helm upgrade spaces spaces-helm-chart \ + --set "billing.enabled=true" \ + --set "backup.enabled=true" \ + --set "backup.storage.provider=aws" \ + --set "backup.storage.aws.region= ${YOUR_AWS_REGION}" \ + --set "backup.storage.aws.bucket= ${YOUR_BACKUP_BUCKET}" +``` + +After Upbound provisions your control plane, create a Pod Identity Association +with the `aws` CLI: + +```shell +aws eks create-pod-identity-association \ + --cluster-name ${YOUR_CLUSTER_NAME} \ + --namespace ${YOUR_CONTROL_PLANE_NAMESPACE} \ + --service-account mxp-controller \ + --role-arn arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/backup-restore-role +``` + + + + + +Upbound supports workload-identity configurations in Azure with Azure's built-in +workload identity feature. + +#### Prepare your cluster + +First, enable the OIDC issuer and workload identity in your AKS cluster: + +```shell +az aks update --resource-group ${YOUR_RESOURCE_GROUP} --name ${YOUR_AKS_CLUSTER_NAME} --enable-oidc-issuer --enable-workload-identity +``` + +Next, find and store the OIDC issuer URL as an environment variable: + +```shell +export AKS_OIDC_ISSUER="$(az aks show --name ${YOUR_AKS_CLUSTER_NAME} --resource-group ${YOUR_RESOURCE_GROUP} --query "oidcIssuerProfile.issuerUrl" --output tsv)" +``` + +#### Create a User-Assigned Managed Identity + +Create a new managed identity to associate with the backup and restore component: + +```shell +az identity create --name backup-restore-identity --resource-group ${YOUR_RESOURCE_GROUP} --location ${YOUR_LOCATION} +``` + +Retrieve the client ID and store it as an environment variable: + +```shell +export USER_ASSIGNED_CLIENT_ID="$(az identity show --name backup-restore-identity --resource-group ${YOUR_RESOURCE_GROUP} --query clientId -otsv)" +``` + +Grant the managed identity you created to access your Azure Storage account: + +```shell +az role assignment create \ + --role "Storage Blob Data Contributor" \ + --assignee ${USER_ASSIGNED_CLIENT_ID} \ + --scope /subscriptions/${YOUR_SUBSCRIPTION_ID}/resourceGroups/${YOUR_RESOURCE_GROUP}/providers/Microsoft.Storage/storageAccounts/${YOUR_STORAGE_ACCOUNT} +``` + +#### Apply the managed identity role + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the backup and restore component: + +```shell +--set controlPlanes.mxpController.serviceAccount.annotations."azure\.workload\.identity/client-id"="${YOUR_USER_ASSIGNED_CLIENT_ID}" +--set controlPlanes.mxpController.pod.customLabels."azure\.workload\.identity/use"="true" +``` + +#### Create a Federated Identity credential + +```shell +az identity federated-credential create \ + --name backup-restore-federated-identity \ + --identity-name backup-restore-identity \ + --resource-group ${YOUR_RESOURCE_GROUP} \ + --issuer ${AKS_OIDC_ISSUER} \ + --subject system:serviceaccount:${YOUR_CONTROL_PLANE_NAMESPACE}:mxp-controller +``` + + + + + +Upbound supports workload-identity configurations in GCP with IAM principal +identifiers and service account impersonation. + +#### Prepare your cluster + +First, enable Workload Identity Federation on your GKE cluster: + +```shell +gcloud container clusters update ${YOUR_CLUSTER_NAME} \ + --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ + --region=${YOUR_REGION} +``` + +#### Create a Google Service Account + +Create a service account for the backup and restore component: + +```shell +gcloud iam service-accounts create backup-restore-sa \ + --display-name "Backup Restore Service Account" \ + --project ${YOUR_PROJECT_ID} +``` + +Grant the service account access to your Google Cloud Storage bucket: + +```shell +gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ + --member "serviceAccount:backup-restore-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" \ + --role "roles/storage.objectAdmin" +``` + +#### Configure Workload Identity + +Create an IAM binding to grant the Kubernetes service account access to the Google service account: + +```shell +gcloud iam service-accounts add-iam-policy-binding \ + backup-restore-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com \ + --role roles/iam.workloadIdentityUser \ + --member "serviceAccount:${YOUR_PROJECT_ID}.svc.id.goog[${YOUR_CONTROL_PLANE_NAMESPACE}/mxp-controller]" +``` + +#### Apply the service account configuration + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the backup and restore component: + +```shell +--set controlPlanes.mxpController.serviceAccount.annotations."iam\.gke\.io/gcp-service-account"="backup-restore-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" +``` + + + +## Verify your configuration + +After you apply the configuration use `kubectl` to verify the service account +has the correct annotation: + +```shell +kubectl get serviceaccount mxp-controller -n ${YOUR_CONTROL_PLANE_NAMESPACE} -o yaml +``` + +Verify the `mxp-controller` pod is running: + +```shell +kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep mxp-controller +``` + +## Restart workload + +You must manually restart a workload's pod when you add the workload identity annotations to the running pod's service account. + + + +This restart enables the EKS pod identity webhook to inject the necessary +environment for using IRSA. + + + + + +This restart enables the workload identity webhook to inject the necessary +environment for using Azure workload identity. + + + + + +This restart enables the workload identity webhook to inject the necessary +environment for using GCP workload identity. + + + +```shell +kubectl rollout restart deployment mxp-controller -n ${YOUR_CONTROL_PLANE_NAMESPACE} +``` + +## Use cases + + +Configuring backup and restore with workload identity eliminates the need for +static credentials in your cluster and the overhead of credential rotation. +These benefits are helpful in: + +* Disaster recovery scenarios +* Control plane migration +* Compliance requirements +* Rollbacks after unsuccessful upgrades + +## Next steps + +Now that you have a workload identity configured for the backup and restore +component, visit the [Backup Configuration][backup-restore-guide] documentation. + +Other workload identity guides are: +* [Billing][billing] +* [Shared Secrets][secrets] + +[backup-restore-guide]: /spaces/howtos/backup-and-restore +[billing]: /spaces/howtos/self-hosted/workload-id/billing-config +[secrets]: /spaces/howtos/self-hosted/workload-id/eso-config diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/workload-id/billing-config.md b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/workload-id/billing-config.md new file mode 100644 index 000000000..323a6122f --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/workload-id/billing-config.md @@ -0,0 +1,454 @@ +--- +title: Billing Workload ID +weight: 1 +description: Configure workload identity for Spaces Billing +--- +import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; + + + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary AWS credentials to your Kubernetes pod based on +a service account. Assigning IAM roles and service accounts allows the pod to +assume the IAM role dynamically and much more securely than static credentials. + +This guide walks you through creating an IAM trust role policy and applying it to your EKS +cluster for billing in your Space cluster. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary Azure credentials to your Kubernetes pod based on +a service account. Assigning managed identities and service accounts allows the pod to +authenticate with Azure resources dynamically and much more securely than static credentials. + +This guide walks you through creating a managed identity and federated credential for your AKS +cluster for billing in your Space cluster. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary GCP credentials to your Kubernetes pod based on +a service account. Assigning IAM roles and service accounts allows the pod to +access cloud resources dynamically and much more securely than static +credentials. + +This guide walks you through configuring workload identity for your GKE +cluster's billing component. + + + +## Prerequisites + + +To set up a workload-identity, you'll need: + + +- A self-hosted Space cluster +- Administrator access in your cloud provider +- Helm and `kubectl` + +## About the billing component + +The `vector.dev` component handles billing metrics collection in spaces. It +stores account data in your cloud storage. By default, this component runs in +each control plane's host namespace. + +## Configuration + + + +Upbound supports workload-identity configurations in AWS with IAM Roles for +Service Accounts and EKS pod identity association. + +#### IAM Roles for Service Accounts (IRSA) + +With IRSA, you can associate a Kubernetes service account in an EKS cluster with +an AWS IAM role. Upbound authenticates workloads with that service account as +the IAM role using temporary credentials instead of static role credentials. +IRSA relies on AWS `AssumeRoleWithWebIdentity` `STS` to exchange OIDC ID tokens with +the IAM role's temporary credentials. IRSA uses the `eks.amazon.aws/role-arn` +annotation to link the service account and the IAM role. + +**Create an IAM role and trust policy** + +First, create an IAM role appropriate permissions to access your S3 bucket: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:ListBucket", + "s3:DeleteObject" + ], + "Resource": [ + "arn:aws:s3:::${YOUR_BILLING_BUCKET}", + "arn:aws:s3:::${YOUR_BILLING_BUCKET}/*" + ] + } + ] +} +``` + +You must configure the IAM role trust policy with the exact match for each +provisioned control plane. An example of a trust policy for a single control +plane is below: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:oidc-provider/${YOUR_OIDC_PROVIDER}" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + ":aud": "sts.amazonaws.com", + ":sub": "system:serviceaccount:${YOUR_NAMESPACE}:vector" + } + } + } + ] +} +``` + +**Configure the EKS OIDC provider** + +Next, ensure your EKS cluster has an OIDC identity provider: + +```shell +eksctl utils associate-iam-oidc-provider --cluster ${YOUR_CLUSTER_NAME} --approve +``` + +**Apply the IAM role** + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the Billing component: + +```shell +--set "billing.enabled=true" +--set "billing.storage.provider=aws" +--set "billing.storage.aws.region=${YOUR_AWS_REGION}" +--set "billing.storage.aws.bucket=${YOUR_BILLING_BUCKET}" +--set "billing.storage.secretRef.name=" +--set controlPlanes.vector.serviceAccount.customAnnotations."eks\.amazonaws\.com/role-arn"="arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_BILLING_ROLE_NAME}" +``` + +:::important +You **must** set the `billing.storage.secretRef.name` to an empty string to +enable workload identity for the billing component +::: + +#### EKS pod identities + +Upbound also supports EKS Pod Identity configuration. EKS Pod Identities allow +you to create a pod identity association with your Kubernetes namespace, a +service account, and an IAM role, which allows the EKS control plane to +automatically handle the credential exchange. + +**Create an IAM role** + +First, create an IAM role appropriate permissions to access your S3 bucket: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:ListBucket" + ], + "Resource": [ + "arn:aws:s3:::${YOUR_BILLING_BUCKET}", + "arn:aws:s3:::${YOUR_BILLING_BUCKET}/*" + ] + } + ] +} +``` + +**Configure your Space with Helm** + +When you install or upgrade your Space with Helm, add the billing values: + +```shell +helm upgrade spaces spaces-helm-chart \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=aws" \ + --set "billing.storage.aws.region=${YOUR_AWS_REGION}" \ + --set "billing.storage.aws.bucket=${YOUR_BILLING_BUCKET}" \ + --set "billing.storage.secretRef.name=" +``` + +**Create a Pod Identity Association** + +After Upbound provisions your control plane, create a Pod Identity Association +with the `aws` CLI: + +```shell +aws eks create-pod-identity-association \ + --cluster-name ${YOUR_CLUSTER_NAME} \ + --namespace ${YOUR_CONTROL_PLANE_NAMESPACE} \ + --service-account vector \ + --role-arn arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_BILLING_ROLE_NAME} +``` + + + + + +Upbound supports workload-identity configurations in Azure with Azure's built-in +workload identity feature. + +First, enable the OIDC issuer and workload identity in your AKS cluster: + +```shell +az aks update --resource-group ${YOUR_RESOURCE_GROUP} --name ${YOUR_AKS_CLUSTER_NAME} --enable-oidc-issuer --enable-workload-identity +``` + +Next, find and store the OIDC issuer URL as an environment variable: + +```shell +export AKS_OIDC_ISSUER="$(az aks show --name ${YOUR_AKS_CLUSTER_NAME} --resource-group ${YOUR_RESOURCE_GROUP} --query "oidcIssuerProfile.issuerUrl" --output tsv)" +``` + +Create a new managed identity to associate with the billing component: + +```shell +az identity create --name billing-identity --resource-group ${YOUR_RESOURCE_GROUP} --location ${YOUR_LOCATION} +``` + +Retrieve the client ID and store it as an environment variable: + +```shell +export USER_ASSIGNED_CLIENT_ID="$(az identity show --name billing-identity --resource-group ${YOUR_RESOURCE_GROUP} --query clientId -otsv)" +``` + +Grant the managed identity you created to access your Azure Storage account: + +```shell +az role assignment create --role "Storage Blob Data Contributor" --assignee $USER_ASSIGNED_CLIENT_ID --scope /subscriptions/${YOUR_SUBSCRIPTION_ID}/resourceGroups/${YOUR_RESOURCE_GROUP}/providers/Microsoft.Storage/storageAccounts/${YOUR_STORAGE_ACCOUNT} +``` + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the billing component: + +```shell +--set "billing.enabled=true" +--set "billing.storage.provider=azure" +--set "billing.storage.azure.storageAccount=${SPACES_BILLING_STORAGE_ACCOUNT}" +--set "billing.storage.azure.container=${SPACES_BILLING_STORAGE_CONTAINER}" +--set "billing.storage.secretRef.name=" +--set controlPlanes.vector.serviceAccount.customAnnotations."azure\.workload\.identity/client-id"="${SPACES_BILLING_APP_ID}" +--set controlPlanes.vector.pod.customLabels."azure\.workload\.identity/use"="true" +``` + +Create a federated credential to establish trust between the managed identity +and your AKS OIDC provider: + +```shell +az identity federated-credential create \ + --name billing-federated-identity \ + --identity-name billing-identity \ + --resource-group ${YOUR_RESOURCE_GROUP} \ + --issuer ${AKS_OIDC_ISSUER} \ + --subject system:serviceaccount:${YOUR_CONTROL_PLANE_NAMESPACE}:vector +``` + + + + + +Upbound supports workload-identity configurations in GCP with IAM principal +identifiers or service account impersonation. + +#### IAM principal identifiers + +IAM principal identifiers allow you to grant permissions directly to +Kubernetes service accounts without additional annotation. Upbound recommends +this approach for ease-of-use and flexibility. + +First, enable Workload Identity Federation on your GKE cluster: + +```shell +gcloud container clusters update ${YOUR_CLUSTER_NAME} \ + --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ + --region=${YOUR_REGION} +``` + +Next, configure your Spaces installation with the Spaces Helm chart parameters: + +```shell +--set "billing.enabled=true" +--set "billing.storage.provider=gcp" +--set "billing.storage.gcp.bucket=${YOUR_BILLING_BUCKET}" +--set "billing.storage.secretRef.name=" +``` + +:::important +You **must** set the `billing.storage.secretRef.name` to an empty string to +enable workload identity for the billing component. +::: + +Grant the necessary permissions to your Kubernetes service account: + +```shell +gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ + --member="principalSet://iam.googleapis.com/projects/${YOUR_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${YOUR_PROJECT_ID}.svc.id.goog/attribute.kubernetes_namespace/${YOUR_CONTROL_PLANE_NAMESPACE}/attribute.kubernetes_service_account/vector" \ + --role="roles/storage.objectAdmin" +``` + +Enable uniform bucket-level access on your storage bucket: + +```shell +gcloud storage buckets update gs://${YOUR_BILLING_BUCKET} --uniform-bucket-level-access +``` + +#### Service account impersonation + +Service account impersonation allows you to link a Kubernetes service account to +a GCP service account. The Kubernetes service account assumes the permissions of +the GCP service account you specify. + +Enable workload id federation on your GKE cluster: + +```shell +gcloud container clusters update ${YOUR_CLUSTER_NAME} \ + --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ + --region=${YOUR_REGION} +``` + +Next, create a dedicated service account for your billing operations: + +```shell +gcloud iam service-accounts create billing-sa \ + --project=${YOUR_PROJECT_ID} +``` + +Grant storage permissions to the service account you created: + +```shell +gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ + --member="serviceAccount:billing-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" \ + --role="roles/storage.objectAdmin" +``` + +Link the Kubernetes service account to the GCP service account: + +```shell +gcloud iam service-accounts add-iam-policy-binding \ + billing-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com \ + --role="roles/iam.workloadIdentityUser" \ + --member="serviceAccount:${YOUR_PROJECT_ID}.svc.id.goog[${YOUR_CONTROL_PLANE_NAMESPACE}/vector]" +``` + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the billing component: + +```shell +--set "billing.enabled=true" +--set "billing.storage.provider=gcp" +--set "billing.storage.gcp.bucket=${YOUR_BILLING_BUCKET}" +--set "billing.storage.secretRef.name=" +--set controlPlanes.vector.serviceAccount.customAnnotations."iam\.gke\.io/gcp-service-account"="billing-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" +``` + + + +## Verify your configuration + +After you apply the configuration use `kubectl` to verify the service account +has the correct annotation: + +```shell +kubectl get serviceaccount vector -n ${YOUR_CONTROL_PLANE_NAMESPACE} -o yaml +``` + +Verify the `vector` pod is running: + +```shell +kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep vector +``` + +## Restart workload + + + +You must manually restart a workload's pod when you add the +`eks.amazonaws.com/role-arn key` annotation to the running pod's service +account. + +This restart enables the EKS pod identity webhook to inject the necessary +environment for using IRSA. + + + + + +You must manually restart a workload's pod when you add the workload identity annotations to the running pod's service account. + +This restart enables the workload identity webhook to inject the necessary +environment for using Azure workload identity. + + + + + +GCP workload identity doesn't require pod restarts after configuration changes. +If you do need to restart the workload, use the `kubectl` command to force the +component restart: + + + +```shell +kubectl rollout restart deployment vector +``` + + +## Use cases + + +Using workload identity authentication for billing eliminates the need for static +credentials in your cluster as well as the overhead of credential rotation. +These benefits are helpful in: + +* Resource usage tracking across teams/projects +* Cost allocation for multi-tenant environments +* Financial auditing requirements +* Capacity billing and resource optimization +* Automated billing workflows + +## Next steps + +Now that you have workload identity configured for the billing component, visit +the [Billing guide][billing-guide] for more information. + +Other workload identity guides are: +* [Backup and restore][backuprestore] +* [Shared Secrets][secrets] + +[billing-guide]: /spaces/howtos/self-hosted/billing +[backuprestore]: /spaces/howtos/self-hosted/workload-id/backup-restore-config +[secrets]: /spaces/howtos/self-hosted/workload-id/eso-config diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/workload-id/eso-config.md b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/workload-id/eso-config.md new file mode 100644 index 000000000..c1418c171 --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/self-hosted/workload-id/eso-config.md @@ -0,0 +1,503 @@ +--- +title: Shared Secrets Workload ID +weight: 1 +description: Configure workload identity for Spaces Shared Secrets +--- +import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; + + + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary AWS credentials to your Kubernetes pod based on +a service account. Assigning IAM roles and service accounts allows the pod to +assume the IAM role dynamically and much more securely than static credentials. + +This guide walks you through creating an IAM trust role policy and applying it to your EKS +cluster for secret sharing with Kubernetes. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary Azure credentials to your Kubernetes pod based on +a service account. Assigning managed identities and service accounts allows the pod to +authenticate with Azure resources dynamically and much more securely than static credentials. + +This guide walks you through creating a managed identity and federated credential for your AKS +cluster for shared secrets in your Space cluster. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary GCP credentials to your Kubernetes pod based on +a service account. Assigning IAM roles and service accounts allows the pod to +access cloud resources dynamically and much more securely than static +credentials. + +This guide walks you through configuring workload identity for your GKE +cluster's Shared Secrets component. + + + +## Prerequisites + + +To set up a workload-identity, you'll need: + + +- A self-hosted Space cluster +- Administrator access in your cloud provider +- Helm and `kubectl` + + +## About the Shared Secrets component + + + + +The External Secrets Operator (ESO) runs in each control plane's host namespace as `external-secrets-controller`. It needs to access +your external secrets management service like AWS Secrets Manager. + +To configure your shared secrets workflow controller, you must: + +* Annotate the Kubernetes service account to associate it with a cloud-side + principal (such as an IAM role, service account, or enterprise application). The workload must then + use this service account. +* Label the workload (pod) to allow the injection of a temporary credential set, + enabling authentication. + + + + + +The External Secrets Operator (ESO) component runs in each control plane's host +namespace as `external-secrets-controller`. It synchronizes secrets from +external APIs into Kubernetes secrets. Shared secrets allow you to manage +credentials outside your Kubernetes cluster while making them available to your +application + + + + + +The External Secrets Operator (ESO) component runs in each control plane's host +namespace as `external-secrets-controller`. It synchronizes secrets from +external APIs into Kubernetes secrets. Shared secrets allow you to manage +credentials outside your Kubernetes cluster while making them available to your +application + + + +## Configuration + + + +Upbound supports workload-identity configurations in AWS with IAM Roles for +Service Accounts or EKS pod identity association. + +#### IAM Roles for Service Accounts (IRSA) + +With IRSA, you can associate a Kubernetes service account in an EKS cluster with +an AWS IAM role. Upbound authenticates workloads with that service account as +the IAM role using temporary credentials instead of static role credentials. +IRSA relies on AWS `AssumeRoleWithWebIdentity` `STS` to exchange OIDC ID tokens with +the IAM role's temporary credentials. IRSA uses the `eks.amazon.aws/role-arn` +annotation to link the service account and the IAM role. + +**Create an IAM role and trust policy** + +First, create an IAM role with appropriate permissions to access AWS Secrets Manager: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "secretsmanager:GetSecretValue", + "secretsmanager:DescribeSecret", + "ssm:GetParameter" + ], + "Resource": [ + "arn:aws:secretsmanager:${YOUR_REGION}:${YOUR_AWS_ACCOUNT_ID}:secret:${YOUR_SECRET_PREFIX}*", + "arn:aws:ssm:${YOUR_REGION}:${YOUR_AWS_ACCOUNT_ID}:parameter/${YOUR_PARAMETER_PREFIX}*" + ] + } + ] +} +``` + +You must configure the IAM role trust policy with the exact match for each +provisioned control plane. An example of a trust policy for a single control +plane is below: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:oidc-provider/${YOUR_OIDC_PROVIDER}" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + ":aud": "sts.amazonaws.com" + }, + "StringLike": { + ":sub": "system:serviceaccount:*:external-secrets-controller" + } + } + } + ] +} +``` + +**Configure the EKS OIDC provider** + +Next, ensure your EKS cluster has an OIDC identity provider: + +```shell +eksctl utils associate-iam-oidc-provider --cluster ${YOUR_CLUSTER_NAME} --approve +``` + +**Apply the IAM role** + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the shared secrets component: + +```yaml +--set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."eks\.amazonaws\.com/role-arn"="arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_ESO_ROLE_NAME}" +``` + +This command allows the shared secrets component to authenticate with your +dedicated IAM role in your EKS cluster environment. + +#### EKS pod identities + +Upbound also supports EKS Pod Identity configuration. EKS Pod Identities allow +you to create a pod identity association with your Kubernetes namespace, a +service account, and an IAM role, which allows the EKS control plane to +automatically handle the credential exchange. + +**Create an IAM role** + +First, create an IAM role with appropriate permissions to access AWS Secrets Manager: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "secretsmanager:GetSecretValue", + "secretsmanager:DescribeSecret", + "ssm:GetParameter" + ], + "Resource": [ + "arn:aws:secretsmanager:${YOUR_AWS_REGION}:${YOUR_AWS_ACCOUNT_ID}:secret:${YOUR_SECRET_PREFIX}*", + "arn:aws:ssm:${YOUR_AWS_REGION}:${YOUR_AWS_ACCOUNT_ID}:parameter/${YOUR_PARAMETER_PREFIX}*" + ] + } + ] +} +``` + +**Configure your Space with Helm** + +When you install or upgrade your Space with Helm, add the shared secrets value: + +```shell +helm upgrade spaces spaces-helm-chart \ + --set "sharedSecrets.enabled=true" +``` + +**Create a Pod Identity Association** + +After Upbound provisions your control plane, create a Pod Identity Association +with the `aws` CLI: + +```shell +aws eks create-pod-identity-association \ + --cluster-name ${YOUR_CLUSTER_NAME} \ + --namespace ${YOUR_CONTROL_PLANE_NAMESPACE} \ + --service-account external-secrets-controller \ + --role-arn arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_ROLE_NAME} +``` + + + + + +Upbound supports workload-identity configurations in Azure with Azure's built-in +workload identity feature. + +First, enable the OIDC issuer and workload identity in your AKS cluster: + +```shell +az aks update --resource-group ${YOUR_RESOURCE_GROUP} --name ${YOUR_AKS_CLUSTER_NAME} --enable-oidc-issuer --enable-workload-identity +``` + +Next, find and store the OIDC issuer URL as an environment variable: + +```shell +export AKS_OIDC_ISSUER="$(az aks show --name ${YOUR_AKS_CLUSTER_NAME} --resource-group ${YOUR_RESOURCE_GROUP} --query "oidcIssuerProfile.issuerUrl" --output tsv)" +``` + +Create a new managed identity to associate with the shared secrets component: + +```shell +az identity create --name secrets-identity --resource-group ${YOUR_RESOURCE_GROUP} --location ${YOUR_LOCATION} +``` + +Retrieve the client ID and store it as an environment variable: + +```shell +export USER_ASSIGNED_CLIENT_ID="$(az identity show --name secrets-identity --resource-group ${YOUR_RESOURCE_GROUP} --query clientId -otsv)" +``` + +Grant the managed identity you created to access your Azure Storage account: + +```shell +az keyvault set-policy --name ${YOUR_KEY_VAULT_NAME} \ + --resource-group ${YOUR_RESOURCE_GROUP} \ + --object-id $(az identity show --name secrets-identity --resource-group ${YOUR_RESOURCE_GROUP} --query principalId -otsv) \ + --secret-permissions get list +``` + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the shared secrets component: + +```shell +--set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."azure\.workload\.identity/client-id"="${USER_ASSIGNED_CLIENT_ID}" +--set controlPlanes.sharedSecrets.pod.customLabels."azure\.workload\.identity/use"="true" +``` + +Next, create a federated credential to establish trust between the managed identity +and your AKS OIDC provider: + +```shell +az identity federated-credential create \ + --name secrets-federated-identity \ + --identity-name secrets-identity \ + --resource-group ${YOUR_RESOURCE_GROUP} \ + --issuer ${AKS_OIDC_ISSUER} \ + --subject system:serviceaccount:${YOUR_CONTROL_PLANE_NAMESPACE}:external-secrets-controller +``` + + + + + +Upbound supports workload-identity configurations in GCP with IAM principal +identifiers or service account impersonation. + +#### IAM principal identifiers + +IAM principal identifiers allow you to grant permissions directly to +Kubernetes service accounts without additional annotation. Upbound recommends +this approach for ease-of-use and flexibility. + +First, enable Workload Identity Federation on your GKE cluster: + +```shell +gcloud container clusters update ${YOUR_CLUSTER_NAME} \ + --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ + --region=${YOUR_REGION} +``` + +Next, grant the necessary permissions to your Kubernetes service account: + +```shell +gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ + --member="principalSet://iam.googleapis.com/projects/${YOUR_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${YOUR_PROJECT_ID}.svc.id.goog/attribute.kubernetes_namespace/${YOUR_CONTROL_PLANE_NAMESPACE}/attribute.kubernetes_service_account/external-secrets-controller" \ + --role="roles/secretmanager.secretAccessor" +``` + +#### Service account impersonation + +Service account impersonation allows you to link a Kubernetes service account to +a GCP service account. The Kubernetes service account assumes the permissions of +the GCP service account you specify. + +Enable workload id federation on your GKE cluster: + +```shell +gcloud container clusters update ${YOUR_CLUSTER_NAME} \ + --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ + --region=${YOUR_REGION} +``` + +Next, create a dedicated service account for your secrets operations: + +```shell +gcloud iam service-accounts create secrets-sa \ + --project=${YOUR_PROJECT_ID} +``` + +Grant secret access permissions to the service account you created: + +```shell +gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ + --member="serviceAccount:secrets-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" \ + --role="roles/secretmanager.secretAccessor" +``` + +Link the Kubernetes service account to the GCP service account: + +```shell +gcloud iam service-accounts add-iam-policy-binding \ + secrets-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com \ + --role="roles/iam.workloadIdentityUser" \ + --member="serviceAccount:${YOUR_PROJECT_ID}.svc.id.goog[${YOUR_CONTROL_PLANE_NAMESPACE}/external-secrets-controller]" +``` + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the shared secrets component: + +```shell +--set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."iam\.gke\.io/gcp-service-account"="secrets-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" +``` + + + +## Verify your configuration + +After you apply the configuration use `kubectl` to verify the service account +has the correct annotation: + +```shell +kubectl get serviceaccount external-secrets-controller -n ${YOUR_CONTROL_PLANE_NAMESPACE} -o yaml +``` + + + +Verify the `external-secrets` pod is running correctly: + +```shell +kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep external-secrets +``` + + + + + +Verify the External Secrets Operator pod is running correctly: + +```shell +kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep external-secrets +``` + + + + + +Verify the `external-secrets` pod is running correctly: + +```shell +kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep external-secrets +``` + + + +## Restart workload + + + +You must manually restart a workload's pod when you add the +`eks.amazonaws.com/role-arn key` annotation to the running pod's service +account. + +This restart enables the EKS pod identity webhook to inject the necessary +environment for using IRSA. + + + + + +You must manually restart a workload's pod when you add the workload identity annotations to the running pod's service account. + +This restart enables the workload identity webhook to inject the necessary +environment for using Azure workload identity. + + + + + +GCP workload identity doesn't require pod restarts after configuration changes. +If you do need to restart the workload, use the `kubectl` command to force the +component restart: + + + +```shell +kubectl rollout restart deployment external-secrets +``` + +## Use cases + + + + +Shared secrets with workload identity eliminates the need for static credentials +in your cluster. These benefits are particularly helpful in: + +* Secure application credentials management +* Database connection string storage +* API token management +* Compliance with secret rotation security standards +* Multi-environment configuration with centralized secret management + + + + + +Using workload identity authentication for shared secrets eliminates the need for static +credentials in your cluster as well as the overhead of credential rotation. +These benefits are particularly helpful in: + +* Secure application credentials management +* Database connection string storage +* API token management +* Compliance with secret rotation security standards + + + + + +Configuring the external secrets operator with workload identity eliminates the need for +static credentials in your cluster and the overhead of credential rotation. +These benefits are particularly helpful in: + +* Secure application credentials management +* Database connection string storage +* API token management +* Compliance with secret rotation security standards + + + +## Next steps + +Now that you have workload identity configured for the shared secrets component, visit +the [Shared Secrets][eso-guide] guide for more information. + +Other workload identity guides are: +* [Backup and restore][backuprestore] +* [Billing][billing] + +[eso-guide]: /spaces/howtos/secrets-management +[backuprestore]: /spaces/howtos/self-hosted/workload-id/backup-restore-config +[billing]: /spaces/howtos/self-hosted/workload-id/billing-config diff --git a/spaces_versioned_docs/version-v1.10/howtos/simulations.md b/spaces_versioned_docs/version-v1.10/howtos/simulations.md new file mode 100644 index 000000000..26cb0e657 --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/howtos/simulations.md @@ -0,0 +1,110 @@ +--- +title: Simulate changes to your Control Plane Projects +sidebar_position: 100 +description: Use the Up CLI to mock operations before deploying to your environments. +--- + +:::info API Version Information +This guide covers Simulations, available in v1.10+ (GA since v1.13). version-specific availability and features, see the . + +For API specifications on the Simulations CRD, see the [Spaces API Reference](../../../reference/apis/spaces-api/). +::: + +:::important +The Simulations feature is in private preview. more information, [reach out to Upbound][reach-out-to-upbound]. +::: + +Control plane simulations allow you to preview changes to your resources before +applying them to your control planes. Like a plan or dry-run operation, +simulations expose the impact of updates to compositions or claims without +changing your actual resources. + +A control plane simulation creates a temporary copy of your control plane and +returns a preview of the desired changes. The simulation change plan helps you +reduce the risk of unexpected behavior based on your changes. + +## Simulation benefits + +Control planes are dynamic systems that automatically reconcile resources to +match your desired state. Simulations provide visibility into this +reconciliation process by showing: + + +* New resources to create +* Existing resources to change +* Existing resources to delete +* How configuration changes propagate through the system + +These insights are crucial when planning complex changes or upgrading Crossplane +packages. + +## Requirements + +Simulations are available to select customers on Upbound Cloud with Team +Tier or higher. more information, [reach out to Upbound][reach-out-to-upbound-1]. + +## How to simulate your control planes + +Before you start a simulation, build your project and use the `up +project run` command to run your control plane. + +Use the `up project simulate` command with your control plane name to start the +simulation: + +```ini {copy-lines="all"} +up project simulate --complete-after=60s --terminate-on-finish +``` + +The `complete-after` flag determines how long to run the simulation before it completes and calculates the results. Depending on the change, a simulation may not complete within your defined interval leaving unaffected resources as `unchanged`. + +The `terminate-on-finish` flag terminates the simulation after the time +you set - deleting the control plane that ran the simulation. + +At the end of your simulation, your CLI returns: +* A summary of the resources created, modified, or deleted +* Diffs for each resource affected + +## View your simulation in the Upbound Console +You can also view your simulation results in the Upbound Console: + +1. Navigate to your base control plane in the Upbound Console +2. Select the "Simulations" tab in the menu +3. Select a simulation object for a change list of all + resources affected. + +The Console provides visual indications of changes: + +- Created Resources: Marked with green +- Modified Resources: Marked with yellow +- Deleted Resources: Marked with red +- Unchanged Resources: Displayed in gray + +![Upbound Console Simulation](/img/simulations.png) + +## Considerations + +Simulations is a **private preview** feature. + +Be aware of the following limitations: + +- Simulations can't predict the exact behavior of external systems due to the + complexity and non-deterministic reconciliation pattern in Crossplane. + +- The only completion criteria for a simulation is time. Your simulation may not + receive a conclusive result within that interval. Upbound recommends the + default `60s` value. + +- Providers don't run in simulations. Simulations can't compose resources that + rely on the status of Managed Resources. + + +The Upbound team is working to improve these limitations. Your feedback is always appreciated. + +## Next steps + +For more information, follow the [tutorial][tutorial] on Simulations. + + +[tutorial]: /manuals/cli/howtos/simulations +[reach-out-to-upbound]: https://www.upbound.io/contact-us +[reach-out-to-upbound-1]: https://www.upbound.io/contact-us diff --git a/spaces_versioned_docs/version-v1.10/overview/_category_.json b/spaces_versioned_docs/version-v1.10/overview/_category_.json new file mode 100644 index 000000000..54bb16430 --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/overview/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Overview", + "position": 0 +} diff --git a/spaces_versioned_docs/version-v1.10/overview/index.md b/spaces_versioned_docs/version-v1.10/overview/index.md new file mode 100644 index 000000000..7b79f6e44 --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/overview/index.md @@ -0,0 +1,14 @@ +--- +title: Spaces Overview +sidebar_position: 0 +--- + +# Upbound Spaces + +Welcome to the Upbound Spaces documentation. This section contains comprehensive documentation for Spaces API and Spaces operations across all supported versions (v1.9 through v1.15). + +## Get Started + +- **[Concepts](/spaces/concepts/control-planes)** - Core concepts for Spaces +- **[How-To Guides](/spaces/howtos/auto-upgrade)** - Step-by-step guides for operating Spaces +- **[API Reference](/spaces/reference/)** - API specifications and resources diff --git a/spaces_versioned_docs/version-v1.10/reference/_category_.json b/spaces_versioned_docs/version-v1.10/reference/_category_.json new file mode 100644 index 000000000..4a6a139c4 --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/reference/_category_.json @@ -0,0 +1,5 @@ +{ + "label": "Spaces API", + "position": 1, + "collapsed": true +} diff --git a/spaces_versioned_docs/version-v1.10/reference/index.md b/spaces_versioned_docs/version-v1.10/reference/index.md new file mode 100644 index 000000000..5e68b0768 --- /dev/null +++ b/spaces_versioned_docs/version-v1.10/reference/index.md @@ -0,0 +1,72 @@ +--- +title: Spaces API Reference +description: Documentation for the Spaces API resources (v1.15 - Latest) +sidebar_position: 1 +--- +import CrdDocViewer from '@site/src/components/CrdViewer'; + + +This page documents the Custom Resource Definitions (CRDs) for the Spaces API. + + +## Control Planes +### Control Planes + + +## Observability +### Shared Telemetry Configs + + +## `pkg` +### Controller Revisions + + +### Controller Runtime Configs + + +### Controllers + + +### Remote Configuration Revisions + + +### Remote Configurations + + +## Policy +### Shared Upbound Policies + + +## References +### Referenced Objects + + +## Scheduling +### Environments + + +## Secrets +### Shared External Secrets + + +### Shared Secret Stores + + +## Simulations + + +## Spaces Backups +### Backups + + +### Backup Schedules + + +### Shared Backup Configs + + +### Shared Backups + + +### Shared Backup Schedules + diff --git a/spaces_versioned_docs/version-v1.11/concepts/_category_.json b/spaces_versioned_docs/version-v1.11/concepts/_category_.json new file mode 100644 index 000000000..4b8667e29 --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/concepts/_category_.json @@ -0,0 +1,7 @@ +{ + "label": "Concepts", + "position": 2, + "collapsed": true +} + + diff --git a/spaces_versioned_docs/version-v1.11/concepts/control-planes.md b/spaces_versioned_docs/version-v1.11/concepts/control-planes.md new file mode 100644 index 000000000..7066343de --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/concepts/control-planes.md @@ -0,0 +1,227 @@ +--- +title: Control Planes +weight: 1 +description: An overview of control planes in Upbound +--- + + +Control planes in Upbound are fully isolated Crossplane control plane instances that Upbound manages for you. This means: + +- the underlying lifecycle of infrastructure (compute, memory, and storage) required to power your instance. +- scaling of the infrastructure. +- the maintenance of the core Crossplane components that make up a control plane. + +This lets users focus on building their APIs and operating their control planes, while Upbound handles the rest. Each control plane has its own dedicated API server connecting users to their control plane. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). + +For detailed information about Control Plane API specifications and CRD fields, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +## Control plane architecture + +![Managed Control Plane Architecture](/img/mcp.png) + +Along with underlying infrastructure, Upbound manages the Crossplane system components. You don't need to manage the Crossplane API server or core resource controllers because Upbound manages your control plane lifecycle from creation to deletion. + +### Crossplane API + +Each control plane offers a unified endpoint. You interact with your control plane through Kubernetes and Crossplane API calls. Each control plane runs a Kubernetes API server to handle API requests. You can make API calls in the following ways: + +- Direct calls: HTTP/gRPC +- Indirect calls: the up CLI, Kubernetes clients such as kubectl, or the Upbound Console. + +Like in Kubernetes, the API server is the hub for all communication for the control plane. All internal components such as system processes and provider controllers act as clients of the API server. + +Your API requests tell Crossplane your desired state for the resources your control plane manages. Crossplane attempts to constantly maintain that state. Crossplane lets you configure objects in the API either imperatively or declaratively. + +### Crossplane versions and features + +Upbound automatically upgrades Crossplane system components on control planes to new Crossplane versions for updated features and improvements in the open source project. With [automatic upgrades][automatic-upgrades], you choose the cadence that Upbound automatically upgrades the system components in your control plane. You can also choose to manually upgrade your control plane to a different Crossplane version. + +For detailed information on versions and upgrades, refer to the [release notes][release-notes] and the automatic upgrade documentation. If you don't enroll a control plane in a release channel, Upbound doesn't apply automatic upgrades. + +Features considered "alpha" in Crossplane are by default not supported in a control plane unless otherwise specified. + +### Hosting environments + +Every control plane in Upbound belongs to a [control plane group][control-plane-group]. Control plane groups are a logical grouping of one or more control planes with shared objects (such as secrets or backup configuration). Every group resides in a [Space][space] in Upbound, which are hosting environments for control planes. + +Think of a Space as being conceptually the same as an AWS, Azure, or GCP region. Regardless of the Space type you run a control plane in, the core experience is identical. + +## Management + +### Create a control plane + +You can create a new control plane from the Upbound Console, [up CLI][up-cli], or with Kubernetes clients such as `kubectl`. + + + + + +To use the CLI, run the following: + +```shell +up ctp create +``` + +To learn more about control plane-related commands in `up`, go to the [CLI reference][cli-reference] documentation. + + + +You can create and manage control planes declaratively in Upbound. Before you +begin, ensure you're logged into Upbound and set the correct context: + +```bash +up login +# Example: acmeco/upbound-gcp-us-west-1/default +up ctx ${yourOrganization}/${yourSpace}/${yourGroup} +```` + +```yaml +#controlplane-a.yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: controlplane-a +spec: + crossplane: + autoUpgrade: + channel: Rapid +``` + +```bash +kubectl apply -f controlplane-a.yaml +``` + + + + + +### Connect directly to your control plane + +Each control plane offers a unified endpoint. You interact with your control plane through Kubernetes and Crossplane API calls. Each control plane runs a Kubernetes API server to handle API requests. + +You can connect to a control plane's API server directly via the up CLI. Use the [`up ctx`][up-ctx] command to set your kubeconfig's current context to a control plane: + +```shell +# Example: acmeco/upbound-gcp-us-west-1/default/ctp1 +up ctx ${yourOrganization}/${yourSpace}/${yourGroup}/${yourControlPlane} +``` + +To disconnect from your control plane and revert your kubeconfig's current context to the previous entry, run the following: + +```shell +up ctx .. +``` + +You can also generate a `kubeconfig` file for a control plane with [`up ctx -f`][up-ctx-f]. + +```shell +up ctx ${yourOrganization}/${yourSpace}/${yourGroup}/${yourControlPlane} -f - > ctp-kubeconfig.yaml +``` + +:::tip +To learn more about how to use `up ctx` to navigate different contexts in Upbound, read the [CLI documentation][cli-documentation]. +::: + +## Configuration + +When you create a new control plane, Upbound provides you with a fully isolated instance of Crossplane. Configure your control plane by installing packages that extend its capabilities, like to create and manage the lifecycle of new types of infrastructure resources. + +You're encourage to install any available Crossplane package type (Providers, Configurations, Functions) available in the [Upbound Marketplace][upbound-marketplace] on your control planes. + +### Install packages + +Below are a couple ways to install Crossplane packages on your control plane. + + + + + + +Use the `up` CLI to install Crossplane packages from the [Upbound Marketplace][upbound-marketplace-1] on your control planes. Connect directly to your control plane via `up ctx`. Then, to install a provider: + +```shell +up ctp provider install xpkg.upbound.io/upbound/provider-family-aws +``` + +To install a Configuration: + +```shell +up ctp configuration install xpkg.upbound.io/upbound/platform-ref-aws +``` + +To install a Function: + +```shell +up ctp function install xpkg.upbound.io/crossplane-contrib/function-kcl +``` + + +You can use kubectl to directly apply any Crossplane manifest. Below is an example for installing a Crossplane provider: + +```yaml +cat < + + + +For production-grade scenarios, it's recommended you configure your control plane declaratively via Git plus a Continuous Delivery (CD) Engine such as Argo. guidance on this topic, read [GitOps with control planes][gitops-with-control-planes]. + + + + + + +### Configure Crossplane ProviderConfigs + +#### ProviderConfigs with OpenID Connect + +Use OpenID Connect (`OIDC`) to authenticate to Upbound control planes without credentials. OIDC lets your control plane exchange short-lived tokens directly with your cloud provider. Read how to [connect control planes to external services][connect-control-planes-to-external-services] to learn more. + +#### Generic ProviderConfigs + +The Upbound Console doesn't allow direct editing of ProviderConfigs that don't support `Upbound` authentication. To edit these ProviderConfigs on your control plane, connect to the control plane directly by following the instructions in the previous section and using `kubectl`. + +### Configure secrets + +Upbound gives users the ability to configure the synchronization of secrets from external stores into control planes. Configure this capability at the group-level, explained in the [Spaces documentation][spaces-documentation]. + +### Configure backups + +Upbound gives users the ability to configure backup schedules, take impromptu backups, and conduct self-service restore operations. Configure this capability at the group-level, explained in the [Spaces documentation][spaces-documentation-1]. + +### Configure telemetry + + +Upbound gives users the ability to configure the collection of telemetry (logs, metrics, and traces) in their control planes. Using Upbound's built-in [OTEL][otel] support, you can stream this data out to your preferred observability solution. Configure this capability at the group-level, explained in the [Spaces documentation][spaces-documentation-2]. + + + +[automatic-upgrades]: /spaces/howtos/auto-upgrade +[release-notes]: https://github.com/upbound/universal-crossplane/releases +[control-plane-group]: /spaces/concepts/groups +[space]: /spaces/overview +[up-cli]: /reference/cli-reference +[cli-reference]: /reference/cli-reference +[up-ctx]: /reference/cli-reference +[up-ctx-f]: /reference/cli-reference +[cli-documentation]: /manuals/cli/concepts/contexts +[upbound-marketplace]: https://marketplace.upbound.io +[upbound-marketplace-1]: https://marketplace.upbound.io +[gitops-with-control-planes]: /spaces/howtos/cloud-spaces/gitops +[connect-control-planes-to-external-services]: /manuals/platform/howtos/oidc +[spaces-documentation]: /spaces/howtos/secrets-management +[spaces-documentation-1]: /spaces/howtos/backup-and-restore +[otel]: https://otel.com +[spaces-documentation-2]: /spaces/howtos/observability diff --git a/spaces_versioned_docs/version-v1.11/concepts/deployment-modes.md b/spaces_versioned_docs/version-v1.11/concepts/deployment-modes.md new file mode 100644 index 000000000..f5e718f88 --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/concepts/deployment-modes.md @@ -0,0 +1,53 @@ +--- +title: Deployment Modes +sidebar_position: 10 +description: An overview of deployment modes for Spaces +--- + +Upbound Spaces can be deployed and used in a variety of modes: + +- **Cloud Spaces:** Multi-tenant Upbound-hosted, Upbound-managed Space environment. Cloud Spaces provide a typical SaaS experience. +- **[Dedicated Spaces][dedicated-spaces]:** Single-tenant Upbound-hosted, Upbound-managed Space environment. Dedicated Spaces provide a SaaS experience, with additional isolation guarantees that your workloads run in a fully isolated context. +- **[Managed Spaces][managed-spaces]:** Single-tenant customer-hosted, Upbound-managed Space environment. Managed Spaces provide a SaaS-like experience, with additional guarantees of all hosting infrastructure being served from your own cloud account. +- **[Self-Hosted Spaces][self-hosted-spaces]:** Single-tenant customer-hosted, customer-managed Space environment. This is a fully self-hosted, self-managed software experience for using Spaces. Upbound delivers the Spaces software and you run it yourself. + +The Upbound platform uses a federated model to connect each Space back to a +central service called the [Upbound Console][console], which is deployed and +managed by Upbound. + +By default, customers have access to a set of Cloud Spaces. + +## Supported clouds + +You can use host Upbound Spaces on Amazon Web Services (AWS), Microsoft Azure, +and Google Cloud Platform (GCP). Regardless of the hosting platform, you can use +Spaces to deploy control planes that manage the lifecycle of your resources. + +## Supported regions + +This table lists the cloud service provider regions supported by Upbound. + +### GCP + +| Region | Location | +| --- | --- | +| `us-west-1` | Western US (Oregon) +| `us-central-1` | Central US (Iowa) +| `eu-west-3` | Eastern Europe (Frankfurt) + +### AWS + +| Region | Location | +| --- | --- | +| `us-east-1` | Eastern US (Northern Virginia) + +### Azure + +| Region | Location | +| --- | --- | +| `us-east-1` | Eastern US (Iowa) + +[dedicated-spaces]: /spaces/howtos/cloud-spaces/dedicated-spaces-deployment +[managed-spaces]: /spaces/howtos/self-hosted/managed-spaces-deployment +[self-hosted-spaces]: /spaces/howtos/self-hosted/self-hosted-spaces-deployment +[console]: /manuals/console/upbound-console/ diff --git a/spaces_versioned_docs/version-v1.11/concepts/groups.md b/spaces_versioned_docs/version-v1.11/concepts/groups.md new file mode 100644 index 000000000..d2ccacdb3 --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/concepts/groups.md @@ -0,0 +1,115 @@ +--- +title: Control Plane Groups +sidebar_position: 2 +description: An introduction to the Control Plane Groups in Upbound +plan: "enterprise" +--- + + + +In Upbound, Control Plane Groups (or just, 'groups') are a logical grouping of one or more control planes with shared resources like [secrets][secrets] or [backups][backups]. It's a mechanism for isolating these groups of resources within a single [Space][space]. All role-based access control in Upbound happens at the control plane group-level. + +## When to use multiple groups + +You should use groups in environments where there's a need to have Crossplane manage infrastructure across multiple cloud accounts or projects. For users who only need to deploy and manage resources in a couple cloud accounts, you shouldn't need to think about groups at all. + +Groups are a way to divide access in Upbound between multiple teams. Think of a group as being analogous to a Kubernetes _namespace_. + +## The 'default' group + +Every Cloud Space in Upbound has a group named _default_ available. + +## Working with groups + +### View groups + +You can list groups in a Space using: + +```shell +up group list +``` + +If you're operating in a single-tenant Space and have access to the underlying cluster, you can list namespaces that have the group label: + +```shell +kubectl get namespaces -l spaces.upbound.io/group=true +``` + +### Set the group for a request + +Several commands in _up_ have a group context. To set the group for a request, use the `--group` flag: + +```shell +up ctp list --group=team1 +``` +```shell +up ctp create new-ctp --group=team2 +``` + +### Set the group preference + +The _up_ CLI operates upon a single [Upbound context][upbound-context]. Whatever context gets set is then used as the preference for other commands. An Upbound context is capable of pointing at a variety of altitudes: + +1. A Space in Upbound +2. A group within a Space +3. a control plane within a group + +To set the group preference, use `up ctx` to choose a group as your preferred Upbound context. For example: + +```shell +# This sets the context for the up CLI to the default group in an Upbound-managed Cloud Space (gcp-us-west-1) for an organization called 'acmeco' +up ctx acmeco/upbound-gcp-us-west-1/default/ +``` + +### Create a group + +To create a group, login to Upbound and set your context to your desired Space: + +```shell +up login +up ctx '/' +# Example: up ctx acmeco/upbound-gcp-us-west-1 +``` + + +Create a group: + +```shell +up group create my-new-group +``` + +### Delete a group + +To delete a group, login to Upbound and set your context to your desired Space: + +```shell +up login +up ctx '/' +# Example: up ctx acmeco/upbound-gcp-us-west-1 +``` + +Delete a group: + +```shell +up group delete my-new-group +``` + +### Protected groups + +Once a control plane gets created in a group, Upbound enforces a protection policy on the group. Upbound prevents accidental deletion of the group. To delete a group that has control planes in it, you should first delete all control planes in the group. + +## Groups in the context of single-tenant Spaces + +Upbound offers a variety of deployment models to use the product. If you deploy your own single-tenant Upbound Space (whether connected or disconnected), you're self-hosting Upbound software in a Kubernetes cluster. In these environments, a control plane group maps to a corresponding namespace in the cluster which hosts the Space. + +Most Kubernetes clusters come with some set of predefined namespaces. Because a group maps to a corresponding Kubernetes namespace, whenever a group gets created, there too must be a Kubernetes namespace accordingly. When the Spaces software is newly installed, no groups exist. You _can_ elevate a Kubernetes namespace to become a group by doing the following: + +1. Creating a group with the same name as a preexisting Kubernetes namespace +2. Creating a control plane in a preexisting Kubernetes namespace +3. Labeling a Kubernetes namespace with the label `spaces.upbound.io/group=true` + + +[secrets]: /spaces/howtos/secrets-management +[backups]: /spaces/howtos/self-hosted/workload-id/backup-restore-config/ +[space]: /spaces/overview +[upbound-context]: /manuals/cli/concepts/contexts diff --git a/spaces_versioned_docs/version-v1.11/howtos/_category_.json b/spaces_versioned_docs/version-v1.11/howtos/_category_.json new file mode 100644 index 000000000..d3a8547aa --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/_category_.json @@ -0,0 +1,7 @@ +{ + "label": "How-tos", + "position": 3, + "collapsed": true +} + + diff --git a/spaces_versioned_docs/version-v1.11/howtos/api-connector.md b/spaces_versioned_docs/version-v1.11/howtos/api-connector.md new file mode 100644 index 000000000..a14468f52 --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/api-connector.md @@ -0,0 +1,413 @@ +--- +title: API Connector +weight: 90 +description: Connect Kubernetes clusters to remote Crossplane control planes for resource synchronization +aliases: + - /api-connector + - /concepts/api-connector +--- +:::info API Version Information +This guide covers API Connector, a preview feature available in all supported versions (v1.9-v1.14+). + +For related API specifications and available resources, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +:::warning +API Connector is currently in **Preview**. The feature is under active +development and subject to breaking changes. Use for testing and evaluation +purposes only. +::: + +API Connector enables seamless integration between Kubernetes application +clusters consuming APIs and remote Crossplane control planes providing and +reconciling APIs. + +You can use the API Connector to decouple where Crossplane is running (for +example in an Upbound control plane), and where APIs are consumed +(for example in an existing Kubernetes cluster). This gives you flexibility and +consistency in your control plane operations. + + + +Unlike the [Control Plane Connector](ctp-connector.md) which offers only +coarse-grained connectivity between app clusters and a control plane, API +connector offers fine-grained configuration of which APIs get offered along with +multi-cluster connectivity. + +## Architecture overview + +![API Connector Architecture](/img/api-connector.png) + +API Connector uses a **provider-consumer** model: + +- **Provider control plane**: The Upbound control plane that provides APIs and manages infrastructure. +- **Consumer cluster**: Any Kubernetes cluster where its users wants to use APIs provided by the provider control plane, without having to run Crossplane. API connector gets installed in the consumer cluster, and bidirectionally syncs API objects to the provider. + +### Key components + +**Custom Resource Definitions (CRDs)**: + + +- `ClusterConnection`: Establishes a connection from the consumer to the provider cluster. Pulls bindable CRD APIs from the provider into the consumer cluster for use. + +- `ClusterAPIBinding`: Instructs API connector to sync all API objects cluster-wide with a given API group to a given provider cluster. +- `APIBinding`: Namespaced version of `ClusterAPIBinding`. Instructs API connector to sync API objects within a given namespace and with a given API group to a given provider cluster. + + +## Prerequisites + +Before using API Connector, ensure: + +1. **Consumer cluster** has network access to the provider control plane +1. You have an license to use API connector. If you are unsure, [contact Upbound][contact] or your sales representative. + +This guide walks through how to automate connecting your cluster to an Upbound +control plane. You can also manually configure the API Connector. + +## Publishing APIs in the provider cluster + + + + +First, log in to your provider control plane, and choose which CRD APIs you want +to make accessible to the consumer cluster's. API connector only syncs +these "bindable" CRDs. + + + + + + +Use the `up` CLI to login: + +```bash +up login +``` + +Connect to your control plane: + +```bash +up ctx +``` + +Check what CRDs are available: + +```bash +kubectl get crds +``` + + +Label all CRDs you want to publish with the bindable label: + + +```bash +kubectl label crd 'connect.upbound.io/bindable'='true' --overwrite +``` + + + + +Change context to the provider cluster: +```bash +kubectl config set-context +``` + +Check what CRDs are available: +```bash +kubectl get crds +``` + + +Label all CRDs you want to publish with the bindable label + +```bash +kubectl label crd 'connect.upbound.io/bindable'='true' --overwrite +``` + + + +## Installation + + + + +The up CLI provides the simplest installation method with automatic +configuration: + +Make sure the current Kubeconfig context is set to the **provider control plane** +```bash +up ctx + +up controlplane api-connector install --consumer-kubeconfig [OPTIONS] +``` + +The command: +1. creates a Robot account (named ``) in the Upbound Cloud organization ``, +1. Gives the created robot account `admin` permissions to the provider control plane `` +1. Generates a JWT token for the robot account, and stores it in a Kubernetes Secret in the consumer cluster. +1. Installs the API connector Helm chart in the consumer cluster. +1. Creates a `ClusterConnection` object in the consumer cluster, referring to the newly generated Secret, so that API connector can authenticate successfully to the provider control plane. +1. API connector pulls all published CRDs from the previous step into the consumer cluster. + +**Example**: +```bash +up controlplane api-connector install \ + --consumer-kubeconfig ~/.kube/config \ + --consumer-context my-cluster \ + --upbound-token +``` + +This command uses provided token to authenticate with the **Provider control plane** +and create a `ClusterConnection` resource in the **Consumer cluster** to connect to the +**Provider control plane**. + +**Key Options**: +- `--consumer-kubeconfig`: Path to consumer cluster kubeconfig (required) +- `--consumer-context`: Context name for consumer cluster (required) +- `--name`: Custom name for connection resources (optional) +- `--upbound-token`: API token for authentication (optional) +- `--upgrade`: Upgrade existing installation (optional) +- `--version`: Specific version to install (optional) + + + + +For manual installation or custom configurations: + +```bash +helm upgrade --install api-connector oci://xpkg.upbound.io/spaces-artifacts/api-connector \ + --namespace upbound-system \ + --create-namespace \ + --version \ + --set consumerClusterDisplayName= +``` + +### Authentication methods + +API Connector supports two authentication methods: + + + + +For Upbound Spaces integration: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: spaces-secret + namespace: upbound-system +type: Opaque +stringData: + token: + organization: + spacesBaseURL: + controlPlaneGroupName: + controlPlaneName: +``` + + + +For direct cluster access: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: provider-kubeconfig + namespace: upbound-system +type: Opaque +data: + kubeconfig: +``` + + + + +### Connection setup + +Create a `ClusterConnection` to establish connectivity: + + + + +```yaml +apiVersion: connect.upbound.io/v1alpha1 +kind: ClusterConnection +metadata: + name: spaces-connection + namespace: upbound-system +spec: + secretRef: + kind: UpboundRobotToken + name: spaces-secret + namespace: upbound-system + crdManagement: + pullBehavior: Pull +``` + + + + +```yaml +apiVersion: connect.upbound.io/v1alpha1 +kind: ClusterConnection +metadata: + name: provider-connection + namespace: upbound-system +spec: + secretRef: + kind: KubeConfig + name: provider-kubeconfig + namespace: upbound-system + crdManagement: + pullBehavior: Pull +``` + + + + + + + +### Configuration + +Bind APIs to make them available in your consumer cluster: + +```yaml +apiVersion: connect.upbound.io/v1alpha1 +kind: ClusterAPIBinding +metadata: + name: +spec: + connectionRef: + kind: ClusterConnection + name: # Or --name value +``` + + + + +The `ClusterAPIBinding` name must match the **Resource.Group** (name of the CustomResourceDefinition) of the CRD you want to bind. + + + + +## Usage example + +After configuration, you can create API objects (in the consumer cluster) that +will be synchronized to the provider cluster: + +```yaml +apiVersion: nop.example.org/v1alpha1 +kind: NopResource +metadata: + name: my-resource + namespace: default +spec: + coolField: "Synchronized resource" + compositeDeletePolicy: Foreground +``` + +Verify the resource status: + +```bash +kubectl get nopresource my-resource -o yaml + +``` +When the `APIBound=True` condition is present, it means that the API object has +been synced to the provider cluster, and is being reconciled there. Whenever the +API object in the provider cluster gets status updates (for example +`Ready=True`), that status is synced back to the consumer cluster. + +Switch contexts to the provider cluster to see the API object being created: + +```bash +up ctx +# or kubectl config set-context +``` + +```bash +kubectl get nopresource my-resource -o yaml +``` + +Note that in the provider cluster, the API object is labeled with information on +where the API object originates from, and `connect.upbound.io/managed=true`. + +## Monitoring and troubleshooting + +### Check connection status + +```bash +kubectl get clusterconnection +``` + +Expected output: +``` +NAME STATUS MESSAGE +spaces-connection Ready Provider controlplane is available +``` + +### View available APIs + +```bash +kubectl get clusterconnection spaces-connection -o jsonpath='{.status.offeredAPIs[*].name}' +``` + +### Check API binding status + +```bash +kubectl get clusterapibinding +``` + +### Debug resource synchronization + +```bash +kubectl describe +``` + +## Removal + +### Using the up CLI + +```bash +up controlplane api-connector uninstall \ + --consumer-kubeconfig ~/.kube/config \ + --all +``` + +The `--all` flag removes all resources including connections and secrets. +Without the flag, only runtime related resources won't be removed. + +:::note +Uninstall doesn't remove any API objects in the provider control plane. If you +want to clean up all API objects there, delete all API objects from the consumer +cluster before API connector uninstallation, and wait for the objects to get +deleted. +::: + + +### Using Helm + +```bash +helm uninstall api-connector -n upbound-system +``` + +## Limitations + +- **Preview feature**: Subject to breaking changes. Not yet production grade. +- **CRD updates**: CRDs are pulled once but not automatically updated. If multiple Crossplane clusters offer the same CRD API, API changes must be synchronized out of band, for example using a [Crossplane Configuration](https://docs.crossplane.io/latest/packages/). +- **Network requirements**: Consumer cluster must have direct network access to provider cluster. +- **Wide permissions needed in consumer cluster**: Because the API connector doesn't know up front the names of the APIs it needs to reconcile, it currently runs with full "root" privileges in the consumer cluster. + +- **Connector polling**: API Connector checks for drift between the consumer and provider cluster + periodically through polling. The poll interval can be changed with the `pollInterval` Helm value. + + +## Advanced configuration + +### Multiple connections + +You can connect to multiple provider clusters simultaneously by creating multiple `ClusterConnection` resources with different names and configurations. + +[contact]: https://www.upbound.io/contact-us diff --git a/spaces_versioned_docs/version-v1.11/howtos/auto-upgrade.md b/spaces_versioned_docs/version-v1.11/howtos/auto-upgrade.md new file mode 100644 index 000000000..249056fb4 --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/auto-upgrade.md @@ -0,0 +1,131 @@ +--- +title: Automatically upgrade control planes +sidebar_position: 50 +description: How to configure automatic upgrades of Crossplane in a control plane +plan: "standard" +--- + + + +Upbound Spaces can automatically upgrade the version of Upbound Crossplane in managed control plane instances. You can edit the `spec.crossplane.autoUpgrade` field in your `ControlPlane` specification with the available release channels below. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). The `spec.crossplane.autoUpgrade` field has been available since v1.9. + +For ControlPlane API specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +| Channel | Description | Example | +|------------|-----------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------| +| **None** | Disables auto upgrades. | _Uses version specified in `spec.crossplane.version`._ | +| **Patch** | Upgrades to the latest supported patch release. | _Control plane version 1.12.2-up.2 auto upgrades to 1.12.3-up.1 upon release._ | +| **Stable** | Default setting. Upgrades to the latest supported patch release on minor version _N-1_ where N is the latest supported minor version. | _If latest supported minor version is 1.14, auto upgrades to latest patch - 1.13.2-up.3_ | +| **Rapid** | Upgrades to the latest supported patch release on the latest supported minor version. | _If the latest supported minor version is 1.14, auto upgrades to the latest patch of minor version. 1.14 upgrades to 1.14.5-up.1_ | + + +:::warning + +The `Rapid` channel is only recommended for users willing to accept the risk of new features and potentially breaking changes. + +::: + +## Examples + +The specs below are examples of how to edit the `autoUpgrade` channel in your `ControlPlane` specification. + +To run a control plane with the `Rapid` auto upgrade channel, your spec should look like this: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: example-ctp +spec: + crossplane: + autoUpgrade: + channel: Rapid + writeConnectionSecretToRef: + name: kubeconfig-example-ctp +``` + +To run a control plane with a pinned version of Crossplane, specify in the `version` field: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: example-ctp +spec: + crossplane: + version: 1.14.3-up.1 + autoUpgrade: + channel: None + writeConnectionSecretToRef: + name: kubeconfig-example-ctp +``` + +## Supported Crossplane versions + +Spaces supports the three [preceding minor versions][preceding-minor-versions] from the last supported minor version. example, if the last supported minor version is `1.14`, minor versions `1.13` and `1.12` are also supported. Versions older than the three most recent minor versions aren't supported. Only supported Crossplane versions are valid specifications for new control planes. + +Current Crossplane version support by Spaces version: + +| Spaces Version | Crossplane Version Min | Crossplane Version Max | +|:--------------:|:----------------------:|:----------------------:| +| 1.2 | 1.13 | 1.15 | +| 1.3 | 1.13 | 1.15 | +| 1.4 | 1.14 | 1.16 | +| 1.5 | 1.14 | 1.16 | +| 1.6 | 1.14 | 1.16 | +| 1.7 | 1.14 | 1.16 | +| 1.8 | 1.15 | 1.17 | +| 1.9 | 1.16 | 1.18 | +| 1.10 | 1.16 | 1.18 | +| 1.11 | 1.16 | 1.18 | +| 1.12 | 1.17 | 1.19 | + + +Upbound offers extended support for all installed Crossplane versions released within a 12 month window since the last Spaces release. Contact your Upbound sales representative for more information on version support. + + +:::warning + +If the auto upgrade channel is `Stable` or `Rapid`, the Crossplane version will always stay within the support window after auto upgrade. If set to `Patch` or `None`, the minor version may be outside the support window. You are responsible for upgrading to a supported version + +::: + +To view the support status of a control plane instance, use `kubectl get ctp`. + +```bash +kubectl get ctp +NAME CROSSPLANE VERSION SUPPORTED READY MESSAGE AGE +example-ctp 1.13.2-up.3 True True 31m + +``` + +Unsupported versions return `SUPPORTED: False`. + +```bash +kubectl get ctp +NAME CROSSPLANE VERSION SUPPORTED READY MESSAGE AGE +example-ctp 1.11.5-up.1 False True 31m + +``` + +For more information, use the `-o yaml` flag to return more information. + +```bash +kubectl get controlplanes.spaces.upbound.io example-ctp -o yaml +status: +conditions: +... +- lastTransitionTime: "2024-01-23T06:36:10Z" + message: Crossplane version 1.11.5-up.1 is outside of the support window. + Oldest supported minor version is 1.12. + reason: UnsupportedCrossplaneVersion + status: "False" + type: Supported +``` + + +[preceding-minor-versions]: /reference/usage/lifecycle/#maintenance-and-updates diff --git a/spaces_versioned_docs/version-v1.11/howtos/automation-and-gitops/_category_.json b/spaces_versioned_docs/version-v1.11/howtos/automation-and-gitops/_category_.json new file mode 100644 index 000000000..b65481af6 --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/automation-and-gitops/_category_.json @@ -0,0 +1,8 @@ +{ + "label": "Automation & GitOps", + "position": 11, + "collapsed": true, + "customProps": { + "plan": "business" + } +} diff --git a/spaces_versioned_docs/version-v1.11/howtos/automation-and-gitops/overview.md b/spaces_versioned_docs/version-v1.11/howtos/automation-and-gitops/overview.md new file mode 100644 index 000000000..57eeb15fc --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/automation-and-gitops/overview.md @@ -0,0 +1,138 @@ +--- +title: Automation and GitOps Overview +sidebar_label: Overview +sidebar_position: 1 +description: Guide to automating control plane deployments with GitOps and Argo CD +plan: "business" +--- + +Automating control plane deployments with GitOps enables declarative, version-controlled infrastructure management. This section covers integrating GitOps workflows with Upbound control planes using Argo CD and related tools. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). GitOps patterns are compatible across all versions of Upbound Spaces. + +For details on deployment models and their differences, see the [Deployment Modes](../../concepts/deployment-modes.md) concept guide. version-specific features, see the . +::: + +## What is GitOps? + +GitOps is an approach for managing infrastructure by: +- **Declaratively describing** desired system state in Git +- **Using controllers** to continuously reconcile actual state with desired state +- **Treating Git as the source of truth** for all configuration and deployments + +Upbound control planes are fully compatible with GitOps patterns and we strongly recommend integrating GitOps in the platforms you build on Upbound. + +## Key Concepts + +### Argo CD +[Argo CD](https://argo-cd.readthedocs.io/) is a popular Kubernetes-native GitOps controller. It continuously monitors Git repositories and automatically applies changes to your infrastructure when commits are detected. + +### Deployment Models + +The way you configure GitOps depends on your deployment model: + +| Aspect | Cloud Spaces | Self-Hosted Spaces | +|--------|--------------|-------------------| +| **Access Method** | Upbound API with tokens | Kubernetes native (secrets/kubeconfig) | +| **Configuration** | Kubeconfig via `up` CLI | Control plane connection secrets | +| **Setup Complexity** | More involved (API integration) | Simpler (native Kubernetes) | +| **Typical Use Case** | Managing Upbound resources | Managing workloads on control planes | + +## Getting Started + +**Choose your path based on your deployment model:** + +###. Cloud Spaces +If you're using Upbound Cloud Spaces (Dedicated or Managed): +1. Start with [GitOps with Upbound Control Planes](../cloud-spaces/gitops-on-upbound.md) +2. Learn how to integrate Argo CD with Cloud Spaces +3. Manage both control plane infrastructure and Upbound resources declaratively + +###. Self-Hosted Spaces +If you're running self-hosted Spaces: +1. Start with [GitOps with ArgoCD in Self-Hosted Spaces](../self-hosted/gitops-with-argocd.md) +2. Learn how to configure control plane connection secrets +3. Manage workloads deployed to your control planes + +## Common Workflows + +### Workflow 1: Managing Control Planes with GitOps +Create and manage control planes themselves declaratively using provider-kubernetes: + +```yaml +apiVersion: kubernetes.crossplane.io/v1alpha2 +kind: Object +metadata: + name: my-controlplane +spec: + forProvider: + manifest: + apiVersion: spaces.upbound.io/v1beta1 + kind: ControlPlane + # ... control plane configuration +``` + +### Workflow 2: Managing Workloads on Control Planes +Deploy applications and resources to control planes using standard Kubernetes GitOps patterns: + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + name: my-app +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-app + namespace: my-app +# ... deployment configuration +``` + +### Workflow 3: Managing Upbound Resources +Use provider-upbound to manage Upbound IAM and repository resources: + +- Teams +- Robots and their team memberships +- Repositories and permissions + +## Advanced Topics + +### Argo CD Plugin for Upbound +Learn more in the [ArgoCD Plugin guide](../self-hosted/use-argo.md) for enhanced integration with self-hosted Spaces. + +### Declarative Control Plane Creation +See [Declaratively create control planes](../self-hosted/declarative-ctps.md) for advanced automation patterns. + +### Consuming Control Plane APIs +Understand how to [consume control plane APIs in your app cluster](../mcp-connector-guide.md) with Argo CD. + +## Prerequisites + +Before implementing GitOps with control planes, ensure you have: + +**For Cloud Spaces:** +- Access to Upbound Cloud Spaces +- `up` CLI installed and configured +- API token with appropriate permissions +- Argo CD or similar GitOps controller running +- Familiarity with Kubernetes RBAC + +**For Self-Hosted Spaces:** +- Self-hosted Spaces deployed and running +- Argo CD deployed in your infrastructure +- Kubectl access to the cluster hosting Spaces +- Understanding of control plane architecture + +## Next Steps + +1. **Choose your deployment model** above +2. **Review the relevant getting started guide** +3. **Set up your GitOps controller** (Argo CD) +4. **Deploy your first automated control plane** +5. **Explore advanced topics** as needed + +:::tip +Start with simple deployments to test your GitOps workflow before moving to production. Use [simulations](../simulations.md) to preview changes before applying them. +::: diff --git a/spaces_versioned_docs/version-v1.11/howtos/backup-and-restore.md b/spaces_versioned_docs/version-v1.11/howtos/backup-and-restore.md new file mode 100644 index 000000000..3b8d026cb --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/backup-and-restore.md @@ -0,0 +1,530 @@ +--- +title: Backup and restore +sidebar_position: 13 +description: Configure and manage backups in your Upbound Space. +plan: "enterprise" +--- + + + +Upbound's _Shared Backups_ is a built-in backup and restore feature. Shared Backups lets you configure automatic schedules for taking snapshots of your control planes. You can restore data from these backups by making new control planes. This guide explains how to use Shared Backups for disaster recovery or upgrade scenarios. + +:::info API Version Information & Available Versions +This guide applies to **all supported versions** (v1.9-v1.15+). + +**Select your API version**: +- [Spaces API v1.15 (Latest)](../../../next/reference/apis/spaces-api/) +- [Spaces API v1.14](../../../1.14/reference/apis/spaces-api/) +- [Spaces API v1.13](../../../1.13/reference/apis/spaces-api/) +- [Spaces API v1.12](../../../1.12/reference/apis/spaces-api/) +- [Spaces API v1.11](../../../1.11/reference/apis/spaces-api/) +- [Spaces API v1.10](../../../1.10/reference/apis/spaces-api/) +- [Spaces API v1.9](../../../1.9/reference/apis/spaces-api/) + +For version support policy, see . version compatibility details, see the . +::: + +## Benefits + +The Shared Backups feature provides the following benefits: + +* Automatic backups for control planes without any operational overhead +* Backup schedules for multiple control planes in a group +* Shared Backups are available across all hosting environments of Upbound (Disconnected, Connected or Cloud Spaces) + + +## Configure a Shared Backup Config + + +[SharedBackupConfig][sharedbackupconfig] is a [group-scoped][group-scoped] resource. You should create them in a group containing one or more control planes. This resource configures the storage details and provider. Whenever a backup executes (either by schedule or manually initiated), it references a SharedBackupConfig to tell it where store the snapshot. + + +### Backup config provider + + +The `spec.objectStorage.provider` and `spec.objectStorage.config` fields configures: + +* The object storage provider +* The path to the provider +* The credentials needed to communicate with the provider + +You can only set one provider. Upbound currently supports AWS, Azure, and GCP as providers. + + +`spec.objectStorage.config` is a freeform map of configuration options for the object storage provider. See [Thanos object storage][thanos-object-storage] for more information on the formats for each supported cloud provider. `spec.bucket` and `spec.provider` overrides the required values in the config. + + + +#### AWS as a storage provider + +:::important +For Cloud Spaces, static credentials are currently the only supported auth method. +::: + +This example demonstrates how to use AWS as a storage provider for your backups: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupConfig +metadata: + name: default + namespace: default +spec: + objectStorage: + provider: AWS + bucket: spaces-backup-bucket + config: + endpoint: s3.eu-west-2.amazonaws.com + region: eu-west-2 + credentials: + source: Secret + secretRef: + name: bucket-creds + key: creds +``` + + +This example assumes you've already created an S3 bucket called "spaces-backup-bucket" in AWS `eu-west-2` region. The account credentials to access the bucket should exist in a secret of the same namespace as the Shared Backup Config. + +#### Azure as a storage provider + +:::important +For Cloud Spaces, static credentials are currently the only supported auth method. +::: + +This example demonstrates how to use Azure as a storage provider for your backups: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupConfig +metadata: + name: default + namespace: default +spec: + objectStorage: + provider: Azure + bucket: upbound-backups + config: + storage_account: upbackupstore + container: upbound-backups + endpoint: blob.core.windows.net + credentials: + source: Secret + secretRef: + name: bucket-creds + key: creds +``` + + +This example assumes you've already created an Azure storage account called `upbackupstore` and blob `upbound-backups`. The storage account key to access the blob should exist in a secret of the same namespace as the Shared Backup Config. + + +#### GCP as a storage provider + +:::important +For Cloud Spaces, static credentials are currently the only supported auth method. +::: + +This example demonstrates how to use Google Cloud Storage as a storage provider for your backups: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupConfig +metadata: + name: default + namespace: default +spec: + objectStorage: + provider: GCP + bucket: spaces-backup-bucket + credentials: + source: Secret + secretRef: + name: bucket-creds + key: creds +``` + + +This example assumes you've already created a Cloud bucket called "spaces-backup-bucket" and a service account with access to this bucket. The key file should exist in a secret of the same namespace as the Shared Backup Config. + + +## Configure a Shared Backup Schedule + + +[SharedBackupSchedule][sharedbackupschedule] is a [group-scoped][group-scoped-1] resource. You should create them in a group containing one or more control planes. This resource defines a backup schedule for control planes within its corresponding group. + +Below is an example of a Shared Backup Schedule that takes backups every day of all control planes having `environment: production` labels: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: daily-schedule + namespace: default +spec: + schedule: "@daily" + configRef: + kind: SharedBackupConfig + name: default + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +### Define a schedule + +The `spec.schedule` field is a [Cron-formatted][cron-formatted] string. Some common examples are below: + + +| Entry | Description | +| ----------------- | ------------------------------------------------------------------------------------------------- | +| `@hourly` | Run once an hour. | +| `@daily` | Run once a day. | +| `@weekly` | Run once a week. | +| `0 0/4 * * *` | Run every 4 hours. | +| `0/15 * * * 1-5` | Run every fifteenth minute on Monday through Friday. | +| `@every 1h30m10s` | Run every 1 hour, 30 minutes, and 10 seconds. Hour is the largest measurement of time for @every. | + + +### Exclude resources from the backup + +The `spec.excludedResources` field is an array of resource names to exclude from each backup. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: daily-schedule +spec: + excludedResources: + - "xclusters.aws.platformref.upbound.io" + - "xdatabase.aws.platformref.upbound.io" + - "xrolepolicyattachment.iam.aws.crossplane.io" +``` + +:::warning +You must specify resource names in lowercase "resource.group" format (for example, `xclusters.aws.platformref.upbound.io`). Using only the resource kind (for example, `XCluster`) isn't supported. +::: + +### Suspend a schedule + +Use `spec.suspend` field to suspend the schedule. It creates no new backups, but allows running backups to complete. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: daily-schedule +spec: + suspend: true +``` + +### Set the time to live + +Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: daily-schedule +spec: + ttl: 168h # Backup is garbage collected after 7 days +``` +:::tip +By default, this setting doesn't delete uploaded files. Review the next section to define +the deletion policy. +::: + +### Define the deletion policy + +Set the `spec.deletionPolicy` to define backup deletion actions, including the +deletion of the backup file from the bucket. The Deletion Policy value defaults +to `Orphan`. Set it to `Delete` to remove uploaded files in the bucket. more +information on the backup and restore process, review the [Spaces API +documentation][spaces-api-documentation]. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: daily-schedule +spec: + ttl: 168h # Backup is garbage collected after 7 days + deletionPolicy: Delete # Defaults to Orphan +``` + +### Garbage collect backups when the schedule gets deleted + +Set the `spec.useOwnerReferencesInBackup` to garbage collect associated backups when a shared schedule gets deleted. If set to true, backups are garbage collected when the schedule gets deleted. + +### Control plane selection + +To configure which control planes in a group you want to create a backup schedule for, use the `spec.controlPlaneSelector` field. You can either use `labelSelectors` or the `names` of a control plane directly. A control plane matches if any of the label selectors match. + +This example matches all control planes in the group that have `environment: production` as a label: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: my-backup-schedule +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +You can use the more complex `matchExpressions` to match labels based on an expression. This example matches control planes that have label `environment: production` or `environment: staging`: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: my-backup-schedule +spec: + controlPlaneSelector: + labelSelectors: + - matchExpressions: + - { key: environment, operator: In, values: [production,staging] } +``` + +You can also specify the names of control planes directly: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: my-backup-schedule +spec: + controlPlaneSelector: + names: + - controlplane-dev + - controlplane-staging + - controlplane-prod +``` + + +## Configure a Shared Backup + + + +[SharedBackup][sharedbackup] is a [group-scoped][group-scoped-2] resource. You should create them in a group containing one or more control planes. This resource causes a backups to occur for control planes within its corresponding group. + +Below is an example of a Shared Backup that takes a backup of all control planes having `environment: production` labels: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup + namespace: default +spec: + configRef: + kind: SharedBackupConfig + name: default + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +### Exclude resources from the backup + +The `spec.excludedResources` field is an array of resource names to exclude from each backup. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup +spec: + excludedResources: + - "xclusters.aws.platformref.upbound.io" + - "xdatabase.aws.platformref.upbound.io" + - "xrolepolicyattachment.iam.aws.crossplane.io" +``` + +:::warning +You must specify resource names in lowercase "resource.group" format (for example, `xclusters.aws.platformref.upbound.io`). Using only the resource kind (for example, `XCluster`) isn't supported. +::: + +### Set the time to live + +Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup +spec: + ttl: 168h # Backup is garbage collected after 7 days +``` + + +### Garbage collect backups on Shared Backup deletion + + + +Set the `spec.useOwnerReferencesInBackup` to define whether to garbage collect associated backups when a shared backup gets deleted. If set to true, backups are garbage collected when the shared backup gets deleted. + +### Control plane selection + +To configure which control planes in a group you want to create a backup for, use the `spec.controlPlaneSelector` field. You can either use `labelSelectors` or the `names` of a control plane directly. A control plane matches if any of the label selectors match. + +This example matches all control planes in the group that have `environment: production` as a label: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +You can use the more complex `matchExpressions` to match labels based on an expression. This example matches control planes that have label `environment: production` or `environment: staging`: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup +spec: + controlPlaneSelector: + labelSelectors: + - matchExpressions: + - { key: environment, operator: In, values: [production,staging] } +``` + +You can also specify the names of control planes directly: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup +spec: + controlPlaneSelector: + names: + - controlplane-dev + - controlplane-staging + - controlplane-prod +``` + +## Create a manual backup + +[Backup][backup] is a [group-scoped][group-scoped-3] resource that causes a single backup to occur for a control planes in its corresponding group. + +Below is an example of a manual Backup of a control plane: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: Backup +metadata: + name: my-backup + namespace: default +spec: + configRef: + kind: SharedBackupConfig + name: default + controlPlane: my-awesome-ctp + deletionPolicy: Delete +``` + +The backup specification `DeletionPolicy` defines backup deletion actions, +including the deletion of the backup file from the bucket. The `Deletion Policy` +value defaults to `Orphan`. Set it to `Delete` to remove uploaded files +in the bucket. +For more information on the backup and restore process, review the [Spaces API documentation][spaces-api-documentation-1]. + + +### Choose a control plane to backup + +The `spec.controlPlane` field defines which control plane to execute a backup against. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: Backup +metadata: + name: my-backup + namespace: default +spec: + controlPlane: my-awesome-ctp +``` + +If the control plane doesn't exist, the backup fails after multiple failed retry attempts. + +### Exclude resources from the backup + +The `spec.excludedResources` field is an array of resource names to exclude from the manual backup. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: Backup +metadata: + name: my-backup +spec: + excludedResources: + - "xclusters.aws.platformref.upbound.io" + - "xdatabase.aws.platformref.upbound.io" + - "xrolepolicyattachment.iam.aws.crossplane.io" +``` + +:::warning +You must specify resource names in lowercase "resource.group" format (for example, `xclusters.aws.platformref.upbound.io`). Using only the resource kind (for example, `XCluster`) isn't supported. +::: + +### Set the time to live + +Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: Backup +metadata: + name: my-backup +spec: + ttl: 168h # Backup is garbage collected after 7 days +``` + +## Restore a control plane from a backup + +You can restore a control plane's state from a backup. Below is an example of creating a new control plane from a previous backup called `restore-me`: + + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: my-awesome-restored-ctp + namespace: default +spec: + restore: + source: + kind: Backup + name: restore-me +``` + + +[group-scoped]: /spaces/concepts/groups +[group-scoped-1]: /spaces/concepts/groups +[group-scoped-2]: /spaces/concepts/groups +[group-scoped-3]: /spaces/concepts/groups +[sharedbackupconfig]: /reference/apis/spaces-api/latest +[thanos-object-storage]: https://thanos.io/tip/thanos/storage.md/ +[sharedbackupschedule]: /reference/apis/spaces-api/latest +[cron-formatted]: https://en.wikipedia.org/wiki/Cron +[spaces-api-documentation]: /reference/apis/spaces-api/v1_9 +[sharedbackup]: /reference/apis/spaces-api/latest +[backup]: /reference/apis/spaces-api/latest +[spaces-api-documentation-1]: /reference/apis/spaces-api/v1_9 + + + diff --git a/spaces_versioned_docs/version-v1.11/howtos/cloud-spaces/_category_.json b/spaces_versioned_docs/version-v1.11/howtos/cloud-spaces/_category_.json new file mode 100644 index 000000000..1e1869a38 --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/cloud-spaces/_category_.json @@ -0,0 +1,10 @@ +{ + "label": "Cloud Spaces", + "position": 1, + "collapsed": true, + "customProps": { + "plan": "standard" + } +} + + diff --git a/spaces_versioned_docs/version-v1.11/howtos/cloud-spaces/dedicated-spaces-deployment.md b/spaces_versioned_docs/version-v1.11/howtos/cloud-spaces/dedicated-spaces-deployment.md new file mode 100644 index 000000000..ebad9493e --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/cloud-spaces/dedicated-spaces-deployment.md @@ -0,0 +1,33 @@ +--- +title: Dedicated Spaces +sidebar_position: 4 +description: A guide to Upbound Dedicated Spaces +plan: business +--- + + +## Benefits + +Dedicated Spaces offer the following benefits: + +- **Single-tenancy** A control plane space where Upbound guarantees you're the only tenant operating in the environment. +- **Connectivity to your private network** Establish secure network connections between your Dedicated Cloud Space running in Upbound and your own resources behind your private network. +- **Reduced Overhead.** Offload day-to-day operational burdens to Upbound while focusing on your job of building your platform. + +## Architecture + +A Dedicated Space is a deployment of the Upbound Spaces software inside an +Upbound-controlled cloud account and network. The control planes you run. + +The diagram below illustrates the high-level architecture of Upbound Dedicated Spaces: + +![Upbound Managed Spaces arch](/img/managed-arch-gcp.png) + +## How to get access to Dedicated Spaces + +If you have an interest in Upbound Dedicated Spaces, contact +[Upbound][contact-us]. We can chat more about your +requirements and see if Dedicated Spaces are a good fit for you. + +[contact-us]: https://www.upbound.io/contact-us +[managed-space]: /spaces/howtos/self-hosted/managed-spaces-deployment diff --git a/spaces_versioned_docs/version-v1.11/howtos/cloud-spaces/gitops-on-upbound.md b/spaces_versioned_docs/version-v1.11/howtos/cloud-spaces/gitops-on-upbound.md new file mode 100644 index 000000000..fa59a8dce --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/cloud-spaces/gitops-on-upbound.md @@ -0,0 +1,318 @@ +--- +title: GitOps with Upbound Control Planes +sidebar_position: 80 +description: An introduction to doing GitOps with control planes on Upbound Cloud Spaces +tier: "business" +--- + +:::info Deployment Model +This guide applies to **Upbound Cloud Spaces** (Dedicated and Managed Spaces). For self-hosted Spaces deployments, see [GitOps with ArgoCD in Self-Hosted Spaces](/spaces/howtos/self-hosted/gitops-with-argocd/). +::: + +GitOps is an approach for managing a system by declaratively describing desired resources' configurations in Git and using controllers to realize the desired state. Upbound's control planes are compatible with this pattern and it's strongly recommended you integrate GitOps in the platforms you build on Upbound. + + +## Integrate with Argo CD + + +[Argo CD][argo-cd] is a project in the Kubernetes ecosystem commonly used for GitOps. You can use it in tandem with Upbound control planes to achieve GitOps flows. The sections below explain how to integrate these tools with Upbound. + +### Generate a kubeconfig for your control plane + +Use the up CLI to [generate a kubeconfig][generate-a-kubeconfig] for your control plane. + +```bash +up ctx /// -f - > context.yaml +``` + +### Create an API token + + +You need a personal access token (PAT). You create PATs on a per-user basis in the Upbound Console. Go to [My Account - API tokens][my-account-api-tokens] and select Create New Token. Give the token a name and save the secret value to somewhere safe. + + +### Add the up CLI init container to Argo + +Create a new file called `up-plugin-values.yaml` and paste the following YAML: + +```yaml +controller: + volumes: + - name: up-plugin + emptyDir: {} + - name: up-home + emptyDir: {} + + volumeMounts: + - name: up-plugin + mountPath: /usr/local/bin/up + subPath: up + - name: up-home + mountPath: /home/argocd/.up + + initContainers: + - name: up-plugin + image: xpkg.upbound.io/upbound/up-cli:v0.39.0 + command: ["cp"] + args: + - /usr/local/bin/up + - /plugin/up + volumeMounts: + - name: up-plugin + mountPath: /plugin + +server: + volumes: + - name: up-plugin + emptyDir: {} + - name: up-home + emptyDir: {} + + volumeMounts: + - name: up-plugin + mountPath: /usr/local/bin/up + subPath: up + - name: up-home + mountPath: /home/argocd/.up + + initContainers: + - name: up-plugin + image: xpkg.upbound.io/upbound/up-cli:v0.39.0 + command: ["cp"] + args: + - /usr/local/bin/up + - /plugin/up + volumeMounts: + - name: up-plugin + mountPath: /plugin +``` + +### Install or upgrade Argo using the values file + +Install or upgrade Argo via Helm, including the values from the `up-plugin-values.yaml` file: + +```bash +helm upgrade --install -n argocd -f up-plugin-values.yaml --reuse-values argocd argo/argo-cd +``` + + +### Configure Argo CD + + +To configure Argo CD for Annotation resource tracking, edit the Argo CD ConfigMap in the Argo CD namespace. +Add `application.resourceTrackingMethod: annotation` to the data section as below. +This configuration turns off Argo CD auto pruning, preventing the deletion of Crossplane resources. + +Next, configure the [auto respect RBAC for the Argo CD controller][auto-respect-rbac-for-the-argo-cd-controller]. +By default, Argo CD attempts to discover some Kubernetes resource types that don't exist in a control plane. +You must configure Argo CD to respect the cluster's RBAC rules so that Argo CD can sync. +Add `resource.respectRBAC: normal` to the data section as below. + +```bash +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-cm +data: + ... + application.resourceTrackingMethod: annotation + resource.respectRBAC: normal +``` + +:::tip +The `resource.respectRBAC` configuration above tells Argo to respect RBAC for _all_ cluster contexts. If you're using an Argo CD instance to manage more than only control planes, you should consider changing the `clusters` string match for the configuration to apply only to control planes. For example, if every control plane context name followed the convention of being named `controlplane-`, you could set the string match to be `controlplane-*` +::: + + +### Create a cluster context definition + + +Replace the variables and run the following script to configure a new Argo cluster context definition. + +To configure Argo for a control plane in a Connected Space, replace `stringData.server` with the ingress URL of the control plane. This URL is what's outputted when using `up ctx`. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: my-control-plane + namespace: argocd + labels: + argocd.argoproj.io/secret-type: cluster +type: Opaque +stringData: + name: my-control-plane-context + server: https://.spaces.upbound.io/apis/spaces.upbound.io/v1beta1/namespaces//controlplanes//k8s + config: | + { + "execProviderConfig": { + "apiVersion": "client.authentication.k8s.io/v1", + "command": "up", + "args": [ "org", "token" ], + "env": { + "ORGANIZATION": "", + "UP_TOKEN": "" + } + }, + "tlsClientConfig": { + "insecure": false, + "caData": "" + } + } +``` + + +## GitOps for Upbound resources + + +Like any other cloud service, you can drive the lifecycle of Upbound Cloud resources with Crossplane. This lets you establish GitOps flows to declaratively create and manage: + +- [control plane groups][control-plane-groups] +- [control planes][control-planes] +- [Upbound IAM resources][upbound-iam-resources] + +Use a control plane installed with [provider-upbound][provider-upbound] and [provider-kubernetes][provider-kubernetes] to achieve this. + +### Provider-upbound + +[Provider-upbound][provider-upbound-2] is a Crossplane provider built by Upbound to interact with Upbound resources. use _provider-upbound_ to declaratively create and manage the lifecycle of IAM resources and repositories: + +- [Robots][robots] and their membership to teams +- [Teams][teams] +- [Repositories][repositories] and [permissions][permissions] on those repositories. + +:::tip +This provider defines managed resources for control planes, their auth, and permissions. These resources only applicable for customers who run in Upbound's **Legacy Spaces** control plane hosting environments. Customers should use provider-kubernetes explained below to manage the lifecycle of control planes with Crossplane. +::: + +### Provider-kubernetes + +[Provider-kubernetes][provider-kubernetes-3] is a Crossplane provider that defines an [Object][object] resource. Use _Objects_ as general-purpose resources to wrap _any_ Kubernetes resource for Crossplane to manage. + +Upbound [Space APIs][space-apis] are Kube-like APIs and have implemented support for most Kubernetes-style API concepts. You can use kubectl or any other Kubernetes-compatible tooling to interact with the API. This means you can use _provider-kubernetes_ to drive interactions with Space APIs. + +:::warning +When interacting with a Cloud Space's API, the Kubernetes [watch][watch] feature **isn't implemented.** Argo CD requires _watch_ support to function as expected, meaning you can't point Argo directly at a Cloud Space until it's implemented. +::: + +Use _provider-kubernetes_ to declaratively drive interactions with all [Space APIs][space-apis-1]. Wrap the desired API resource in an _Object_. See the example below for a control plane: + +```yaml +apiVersion: kubernetes.crossplane.io/v1alpha2 +kind: Object +metadata: + name: my-controlplane +spec: + forProvider: + manifest: + apiVersion: spaces.upbound.io/v1beta1 + kind: ControlPlane + metadata: + name: my-controlplane + namespace: default + spec: + crossplane: + autoUpgrade: + channel: Rapid +``` + +[Control plane groups][control-plane-groups-2] are a special case because they technically map to an underlying Kubernetes namespace. You should create a `kind: namespace` with the `spaces.upbound.io/group` label to create a control plane group in a Space. See the example below: + +```yaml +apiVersion: kubernetes.crossplane.io/v1alpha2 +kind: Object +metadata: + name: group1 +spec: + forProvider: + manifest: + apiVersion: v1 + kind: Namespace + metadata: + name: group1 + labels: + spaces.upbound.io/group: "true" + spec: {} +``` + +### Configure auth for provider-kubernetes + +Like any other Crossplane provider, _provider-kubernetes_ requires a valid [ProviderConfig][providerconfig] to authenticate with Upbound before interacting with its APIs. Follow the steps below to configure auth for a ProviderConfig on a control plane that you want to use to interact with Upbound resources. + +1. Define an environment variable for the name of your Upbound org account. Use `up org list` to retrieve this value. +```ini +export UPBOUND_ACCOUNT="" +``` + +2. Create a [personal access token][personal-access-token] and store it as an environment variable. +```shell +export UPBOUND_TOKEN="" +``` + +3. Log on to Upbound. +```shell +up login +``` + +4. Create a kubeconfig for the desired Cloud Space instance you want to interact with. +```shell +export CONTROLPLANE_CONFIG=/tmp/controlplane-kubeconfig +KUBECONFIG=$CONTROLPLANE_CONFIG up ctx $UPBOUND_ACCOUNT/upbound-gcp-us-west-1 # Replace this path with whichever Cloud Space you want to communicate with. +``` + +5. On the control plane you want to use to interact with Upbound resources, create a secret containing the credentials: +```shell +kubectl -n crossplane-system create secret generic cluster-config --from-file=kubeconfig=$CONTROLPLANE_CONFIG +kubectl -n crossplane-system create secret generic upbound-credentials --from-literal=token=$UPBOUND_TOKEN +``` + +6. Create a ProviderConfig that references the credentials created in the prior step. Create this resource in your control plane: +```yaml +apiVersion: kubernetes.crossplane.io/v1alpha1 +kind: ProviderConfig +metadata: + name: default +spec: + credentials: + source: Secret + secretRef: + namespace: crossplane-system + name: cluster-config + key: kubeconfig + identity: + type: UpboundTokens + source: Secret + secretRef: + name: upbound-credentials + namespace: crossplane-system + key: token +``` + +You can now create _Objects_ in the control plane which wrap Space APIs. + +[generate-a-kubeconfig]: /manuals/cli/concepts/contexts +[control-plane-groups]: /spaces/concepts/groups +[control-planes]: /spaces/concepts/control-planes +[upbound-iam-resources]: /manuals/platform/concepts/identity-management +[space-apis]: /reference/apis/spaces-api/v1_9 +[space-apis-1]: /reference/apis/spaces-api/v1_9 +[control-plane-groups-2]: /spaces/concepts/groups + + +[argo-cd]: https://argo-cd.readthedocs.io/en/stable/ +[my-account-api-tokens]: https://accounts.upbound.io/settings/tokens +[auto-respect-rbac-for-the-argo-cd-controller]: https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#auto-respect-rbac-for-controller +[spec-writeconnectionsecrettoref]: /reference/apis/spaces-api/latest +[auto-respect-rbac-for-the-argo-cd-controller-1]: https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#auto-respect-rbac-for-controller +[provider-upbound]: https://marketplace.upbound.io/providers/upbound/provider-upbound +[provider-kubernetes]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes +[provider-upbound-2]: https://marketplace.upbound.io/providers/upbound/provider-upbound +[robots]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/iam.upbound.io/Robot/v1alpha1 +[teams]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/iam.upbound.io/Team/v1alpha1 +[repositories]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/repository.upbound.io/Repository/v1alpha1 +[permissions]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/repository.upbound.io/Permission/v1alpha1 +[provider-kubernetes-3]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes +[object]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes/v0.17.0/resources/kubernetes.crossplane.io/Object/v1alpha2 +[watch]: https://kubernetes.io/docs/reference/using-api/api-concepts/#watch-bookmarks +[providerconfig]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes/v0.17.0/resources/kubernetes.crossplane.io/ProviderConfig/v1alpha1 +[personal-access-token]: https://accounts.upbound.io/settings/tokens diff --git a/spaces_versioned_docs/version-v1.11/howtos/control-plane-topologies.md b/spaces_versioned_docs/version-v1.11/howtos/control-plane-topologies.md new file mode 100644 index 000000000..9020e5a41 --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/control-plane-topologies.md @@ -0,0 +1,566 @@ +--- +title: Control Plane Topologies +sidebar_position: 15 +description: Configure scheduling of composites to remote control planes +--- + +:::info API Version Information +This guide is for the Control Plane Topology feature, which is in **private preview**. interested customers with access to this feature, it applies to v1.12+. + +For related API specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). details on feature availability, see the . +::: + +:::important +This feature is in private preview for select customers in Upbound Spaces. If you're interested in this deployment mode, please [contact us](https://www.upbound.io/support/contact). +::: + +Upbound's _Control Plane Topology_ feature lets you build and deploy a platform +of multiple control planes. These control planes work together for a unified platform +experience. + + +With the _Topology_ feature, you can install resource APIs that are +reconciled by other control planes and configure the routing that occurs between +control planes. You can also build compositions that reference other resources +running on your control plane or elsewhere in Upbound. + +This guide explains how to use Control Plane Topology APIs to install, configure +remote APIs, and build powerful compositions that reference other resources. + +## Benefits + +The Control Plane Topology feature provides the following benefits: + +* Decouple your platform architecture into independent offerings to improve your platform's software development lifecycle. +* Install composite APIs from Configurations as CRDs which are fulfilled and reconciled by other control planes. +* Route APIs to other control planes by configuring an _Environment_ resource, which define a set of routable dimensions. + +## How it works + + +Imagine the scenario where you want to let a user reference a subnet when creating a database instance. To your control plane, the `kind: database` and `kind: subnet` are independent resources. To you as the composition author, these resources have an important relationship. It may be that: + +- you don't want your user to ever be able to create a database without specifying a subnet. +- you want to let them create a subnet when they create the database, if it doesn't exist. +- you want to allow them to reuse a subnet that got created elsewhere or gets shared by another user. + +In each of these scenarios, you must resort to writing complex composition logic +to handle each case. The problem is compounded when the resource exists in a +context separate from the current control plane's context. Imagine a scenario +where one control plane manages Database resources and a second control plane +manages networking resources. With the _Topology_ feature, you can offload these +concerns to Upbound machinery. + + +![Control Plane Topology feature arch](/img/topology-arch.png) + +## Prerequisites + +Enable the Control Plane Topology feature in the Space you plan to run your control plane in: + +- Cloud Spaces: Not available yet +- Connected Spaces: Space administrator must enable this feature +- Disconnected Spaces: Space administrator must enable this feature + + + +## Compose resources with _ReferencedObjects_ + + + +_ReferencedObject_ is a resource type available in an Upbound control plane that lets you reference other Kubernetes resources in Upbound. + +:::tip +This feature is useful for composing resources that exist in a +remote context, like another control plane. You can also use +_ReferencedObjects_ to resolve references to any other Kubernetes object +in the current control plane context. This could be a secret, another Crossplane +resource, or more. +::: + +### Declare the resource reference in your XRD + +To compose a _ReferencedObject_, you should start by adding a resource reference +in your Composite Resource Definition (XRD). The convention for the resource +reference follows the shape shown below: + +```yaml +Ref: + type: object + properties: + apiVersion: + type: string + default: "" + enum: [ "" ] + kind: + type: string + default: "" + enum: [ "" ] + grants: + type: array + default: [ "Observe" ] + items: + type: string + enum: [ "Observe", "Create", "Update", "Delete", "*" ] + name: + type: string + namespace: + type: string + required: + - name +``` + +The `Ref` should be the kind of resource you want to reference. The `apiVersion` and `kind` should be the associated API version and kind of the resource you want to reference. + +The `name` and `namespace` strings are inputs that let your users specify the resource instance. + +#### Grants + +The `grants` field is a special array that lets you give users the power to influence the behavior of the referenced resource. You can configure which of the available grants you let your user select and which it defaults to. Similar in behavior as [Crossplane management policies][crossplane-management-policies], each grant value does the following: + +- **Observe:** The composite may observe the state of the referenced resource. +- **Create:** The composite may create the referenced resource if it doesn't exist. +- **Update:** The composite may update the referenced resource. +- **Delete:** The composite may delete the referenced resource. +- **\*:** The composite has full control over the referenced resource. + +Here are some examples that show how it looks in practice: + +
+ +Show example for defining the reference to another composite resource + +```yaml +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xsqlinstances.database.platform.upbound.io +spec: + type: object + properties: + parameters: + type: object + properties: + networkRef: + type: object + properties: + apiVersion: + type: string + default: "networking.platform.upbound.io" + enum: [ "networking.platform.upbound.io" ] + grants: + type: array + default: [ "Observe" ] + items: + type: string + enum: [ "Observe" ] + kind: + type: string + default: "Network" + enum: [ "Network" ] + name: + type: string + namespace: + type: string + required: + - name +``` + +
+ + +
+Show example for defining the reference to a secret +```yaml +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xsqlinstances.database.platform.upbound.io +spec: + type: object + properties: + parameters: + type: object + properties: + secretRef: + type: object + properties: + apiVersion: + type: string + default: "v1" + enum: [ "v1" ] + grants: + type: array + default: [ "Observe" ] + items: + type: string + enum: [ "Observe", "Create", "Update", "Delete", "*" ] + kind: + type: string + default: "Secret" + enum: [ "Secret" ] + name: + type: string + namespace: + type: string + required: + - name +``` +
+ +### Manually add the jsonPath + +:::important +This step is a known limitation of the preview. We're working on tooling that +removes the need for authors to do this step. +::: + +During the preview timeframe of this feature, you must add an annotation by hand +to the XRD. In your XRD's `metadata.annotations`, set the +`references.upbound.io/schema` annotation. It should be a JSON string in the +following format: + +```json +{ + "apiVersion": "references.upbound.io/v1alpha1", + "kind": "ReferenceSchema", + "references": [ + { + "jsonPath": ".spec.parameters.secretRef", + "kinds": [ + { + "apiVersion": "v1", + "kind": "Secret" + } + ] + } + ] +} +``` + +Flatten this JSON into a string and set the annotation on your XRD. View the +example below for an illustration: + +
+Show example setting the references.upbound.io/schema annotation +```yaml +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xthings.networking.acme.com + annotations: + references.upbound.io/schema: '{"apiVersion":"references.upbound.io/v1alpha1","kind":"ReferenceSchema","references":[{"jsonPath":".spec.secretRef","kinds":[{"apiVersion":"v1","kind":"Secret"}]},{"jsonPath":".spec.configMapRef","kinds":[{"apiVersion":"v1","kind":"ConfigMap"}]}]}' +``` +
+ +
+Show example for setting multiples references in the references.upbound.io/schema annotation +```yaml +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xthings.networking.acme.com + annotations: + references.upbound.io/schema: '{"apiVersion":"references.upbound.io/v1alpha1","kind":"ReferenceSchema","references":[{"jsonPath":".spec.parameters.secretRef","kinds":[{"apiVersion":"v1","kind":"Secret"}]},{"jsonPath":".spec.parameters.configMapRef","kinds":[{"apiVersion":"v1","kind":"ConfigMap"}]}]}' +``` +
+ + +You can use a VSCode extension like [vscode-pretty-json][vscode-pretty-json] to make this task easier. + + +### Compose a _ReferencedObject_ + +To pair with the resource reference declared in your XRD, you must compose the referenced resource. Use the _ReferencedObject_ resource type to bring the resource into your composition. _ReferencedObject_ has the following schema: + +```yaml +apiVersion: references.upbound.io/v1alpha1 +kind: ReferencedObject +spec: + managementPolicies: + - Observe + deletionPolicy: Orphan + composite: + apiVersion: + kind: + name: + jsonPath: .spec.parameters.secretRef +``` + +The `spec.composite.apiVersion` and `spec.composite.kind` should match the API version and kind of the `compositeTypeRef` declared in your composition. The `spec.composite.name` should be the name of the composite resource instance. + +The `spec.composite.jsonPath` should be the path to the root of the resource ref you declared in your XRD. + +
+Show example for composing a resource reference to a secret + +```yaml +apiVersion: apiextensions.crossplane.io/v1 +kind: Composition +metadata: + name: demo-composition +spec: + compositeTypeRef: + apiVersion: networking.acme.com/v1alpha1 + kind: XThing + mode: Pipeline + pipeline: + - step: patch-and-transform + functionRef: + name: crossplane-contrib-function-patch-and-transform + input: + apiVersion: pt.fn.crossplane.io/v1beta1 + kind: Resources + resources: + - name: secret-ref-object + base: + apiVersion: references.upbound.io/v1alpha1 + kind: ReferencedObject + spec: + managementPolicies: + - Observe + deletionPolicy: Orphan + composite: + apiVersion: networking.acme.com/v1alpha1 + kind: XThing + name: TO_BE_PATCHED + jsonPath: .spec.parameters.secretRef + patches: + - type: FromCompositeFieldPath + fromFieldPath: metadata.name + toFieldPath: spec.composite.name +``` +
+ +By declaring a resource reference in your XRD, Upbound handles resolution of the desired resource. + +## Deploy APIs + +To configure routing resource requests between control planes, you need to deploy APIs in at least two control planes. + +### Deploy into a service-level control plane + +Package the APIs you build into a Configuration package an deploy it on a +control plane in an Upbound Space. In Upbound, it's common to refer to the +control plane where the Configuration package is deployed as a **service-level +control plane**. This control plane runs the controllers that processes the API +requests and provisions underlying resources. In a later section, you learn how +you can use _Topology_ features to [configure routing][configure-routing]. + +### Deploy as Remote APIs on a platform control plane + +You should use the same package source as deployed in the **service-level +control planes**, but this time deploy the Configuration in a separate control +plane as a _RemoteConfiguration_. The _RemoteConfiguration_ installs Kubernetes +CustomResourceDefinitions for the APIs defined in the Configuration package, but +no controllers get deployed. + +### Install a _RemoteConfiguration_ + +_RemoteConfiguration_ is a resource type available in an Upbound manage control +planes that acts like a sort of Crossplane [Configuration][configuration] +package. Unlike standard Crossplane Configurations, which install XRDs, +compositions, and functions into a desired control plane, _RemoteConfigurations_ +install only the CRDs for claimable composite resource types. + +#### Install directly + +Install a _RemoteConfiguration_ by defining the following and applying it to +your control plane: + +```yaml +apiVersion: pkg.upbound.io/v1alpha1 +kind: RemoteConfiguration +metadata: + name: +spec: + package: +``` + +#### Declare as a project dependency + +You can declare _RemoteConfigurations_ as dependencies in your control plane's +[project file][project-file]. Use the up CLI to add the dependency, providing +the `--remote` flag: + +```tsx live +up dep add --remote +``` + +This command adds a declaration in the `spec.apiDependencies` stanza of your +project's `upbound.yaml` as demonstrated below: + +```yaml +apiVersion: meta.dev.upbound.io/v1alpha1 +kind: Project +metadata: + name: service-controlplane +spec: + apiDependencies: + - configuration: xpkg.upbound.io/upbound/remote-configuration + version: '>=v0.0.0' + dependsOn: + - provider: xpkg.upbound.io/upbound/provider-kubernetes + version: '>=v0.0.0' +``` + +Like a Configuration, a _RemoteConfigurationRevision_ gets created when the +package gets installed on a control plane. Unlike Configurations, XRDs and +compositions **don't** get installed by a _RemoteConfiguration_. Only the CRDs +for claimable composite types get installed and Crossplane thereafter manages +their lifecycle. You can tell when a CRD gets installed by a +_RemoteConfiguration_ because it has the `internal.scheduling.upbound.io/remote: +true` label: + +```yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: things.networking.acme.com + labels: + internal.scheduling.upbound.io/remote: "true" +``` + +## Use an _Environment_ to route resources + +_Environment_ is a resource type available in Upbound control planes that works +in tandem with resources installed by _RemoteConfigurations_. _Environment_ is a +namespace-scoped resource that lets you configure how to route remote resources +to other control planes by a set of user-defined dimensions. + +### Define a routing dimension + +To establish a routing dimensions between two control planes, you must do two +things: + +1. Annotate the service control plane with the name and value of a dimension. +2. Configure an environment on another control plane with a dimension matching the field and value of the service control plane. + +The example below demonstrates the creation of a service control plane with a +`region` dimension: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + labels: + dimension.scheduling.upbound.io/region: "us-east-1" + name: prod-1 + namespace: default +spec: +``` + +Upbound's Spaces controller keeps an inventory of all declared dimensions and +listens for control planes to route to them. + +### Create an _Environment_ + +Next, create an _Environment_ on a separate control plane, referencing the +dimension from before. The example below demonstrates routing all remote +resource requests in the `default` namespace of the control plane based on a +single `region` dimension: + +```yaml +apiVersion: scheduling.upbound.io/v1alpha1 +kind: Environment +metadata: + name: default + namespace: default +spec: + dimensions: + region: us-east-1 +``` + +You can specify whichever dimensions as you want. The example below demonstrates +multiple dimensions: + +```yaml +apiVersion: scheduling.upbound.io/v1alpha1 +kind: Environment +metadata: + name: default + namespace: default +spec: + dimensions: + region: us-east-1 + env: prod + offering: databases +``` + +In order for the routing controller to match, _all_ dimensions must match for a +given service control plane. + +You can specify dimension overrides on a per-resource group basis. This lets you +configure default routing rules for a given _Environment_ and override routing +on a per-offering basis. + +```yaml +apiVersion: scheduling.upbound.io/v1alpha1 +kind: Environment +metadata: + name: default + namespace: default +spec: + dimensions: + region: us-east-1 + resourceGroups: + - name: database.platform.upbound.io # database + dimensions: + region: "us-east-1" + env: "prod" + offering: "databases" + - name: networking.platform.upbound.io # networks + dimensions: + region: "us-east-1" + env: "prod" + offering: "networks" +``` + +### Confirm the configured route + +After you create an _Environment_ on a control plane, the routes selected get +reported in the _Environment's_ `.status.resourceGroups`. This is illustrated +below: + +```yaml +apiVersion: scheduling.upbound.io/v1alpha1 +kind: Environment +metadata: + name: default +... +status: + resourceGroups: + - name: database.platform.upbound.io # database + proposed: + controlPlane: ctp-1 + group: default + space: upbound-gcp-us-central1 + dimensions: + region: "us-east-1" + env: "prod" + offering: "databases" +``` + +If you don't see a response in the `.status.resourceGroups`, this indicates a +match wasn't found or an error establishing routing occurred. + +:::tip +There's no limit to the number of control planes you can route to. You can also +stack routing and form your own topology of control planes, with multiple layers +of routing. +::: + +### Limitations + + +Routing from one control plane to another is currently scoped to control planes +that exist in a single Space. You can't route resource requests to control +planes that exist on a cross-Space boundary. + + +[project-file]: /manuals/cli/howtos/project +[contact-us]: https://www.upbound.io/usage/support/contact +[crossplane-management-policies]: https://docs.crossplane.io/latest/managed-resources/managed-resources/#managementpolicies +[vscode-pretty-json]: https://marketplace.visualstudio.com/items?itemName=chrismeyers.vscode-pretty-json +[configure-routing]: #use-an-environment-to-route-resources +[configuration]: https://docs.crossplane.io/latest/packages/providers diff --git a/spaces_versioned_docs/version-v1.11/howtos/ctp-connector.md b/spaces_versioned_docs/version-v1.11/howtos/ctp-connector.md new file mode 100644 index 000000000..b2cc48c49 --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/ctp-connector.md @@ -0,0 +1,508 @@ +--- +title: Control Plane Connector +weight: 80 +description: A guide for how to connect a Kubernetes app cluster to a control plane in Upbound using the Control Plane connector feature +plan: "standard" +--- + + + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). Control Plane Connector functionality is consistent across versions. + +For related API specifications and resources, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +Control Plane Connector connects arbitrary Kubernetes application clusters outside the +Upbound Spaces to your control planes running in Upbound Spaces. +This lets you interact with your control plane's API from the app cluster. The claim APIs and the namespaced XR APIs +you define via CompositeResourceDefinitions (XRDs) in the control plane, are available in +your app cluster alongside Kubernetes workload APIs like Pod. Control Plane Connector +enables the same experience as a locally installed Crossplane. + +![control plane connector operations flow](/img/ConnectorFlow.png) + +### Control Plane Connector operations + +Control Plane Connector leverages the [Kubernetes API AggregationLayer][kubernetes-api-aggregationlayer] +to create an extension API server and serve the claim APIs and the namespaced XR APIs in the control plane. It +discovers the claim APIs and the namespaced XR APIs available in the control plane and registers corresponding +APIService resources on the app cluster. Those APIService resources refer to the +extension API server of Control Plane Connector. + +The claim APIs and the namespaced XR APIs are available in your Kubernetes cluster, just like all native +Kubernetes APIs. + +The Control Plane Connector processes every request targeting the claim APIs and the namespaced XR APIs and makes the +relevant requests to the connected control plane. + +Only the connected control plane stores and processes all claims and namespaced XRs created in the app +cluster, eliminating any storage use at the application cluster. The control plane +connector provisions a target namespace at the control plane for the app cluster and stores +all claims and namespaced XRs in this target namespace. + +For managing the claims and namespaced XRs, the Control Plane Connector creates a unique identifier for a +resource by combining input parameters from claims, including: +- `metadata.name` +- `metadata.namespace` +- `your cluster name` + + +It employs SHA-256 hashing to generate a hash value and then extracts the first +16 characters of that hash. This ensures the resulting identifier remains within +the 64-character limit in Kubernetes. + + + +For instance, if a claim named `my-bucket` exists in the test namespace in +`cluster-dev`, the system calculates the SHA-256 hash from +`my-bucket-x-test-x-00000000-0000-0000-0000-000000000000` and takes the first 16 +characters. The control plane side then names the claim `claim-c603e518969b413e`. + +For namespaced XRs, the process is similar, only the prefix is different. +The name becomes `nxr-c603e518969b413e`. + + +### Installation + + + + + +Log in with the up CLI: + +```bash +up login +``` + +Connect your app cluster to a namespace in an Upbound control plane with `up controlplane connector install `. This command creates a user token and installs the Control Plane Connector to your cluster. It's recommended you create a values file called `connector-values.yaml` and provide the following below. Select the tab according to which environment your control plane is running in. + + + + + + +```yaml +upbound: + # This is your org account in Upbound e.g. the name displayed after executing `up org list` + account: + # This is a personal access token generated in the Upbound Console + token: + +spaces: + # If your control plane is running in Upbound's GCP Cloud Space, else use upbound-aws-us-east-1.spaces.upbound.io + host: "upbound-gcp-us-west-1.spaces.upbound.io" + insecureSkipTLSVerify: true + controlPlane: + # The name of the control plane you want the Connector to attach to + name: + # The control plane group the control plane resides in + group: + # The namespace within the control plane to sync claims from the app cluster to. NOTE: This must be created before you install the connector. + claimNamespace: +``` + + + + + +1. Create a [kubeconfig][kubeconfig] for the control plane. Update your Upbound context to the path for your desired control plane. +```ini +up login +up ctx /upbound-gcp-us-central-1/default/your-control-plane +up ctx . -f - > context.yaml +``` + +2. Write it to a secret in the cluster where you plan to +install the Control Plane Connector to. +```ini +kubectl create secret generic my-controlplane-kubeconfig --from-file=context.yaml +``` + +3. Reference this secret in the +`spaces.controlPlane.kubeconfigSecret` field below. + +```yaml +spaces: + controlPlane: + # The namespace within the control plane to sync claims from the app cluster to. NOTE: This must be created before you install the connector. + claimNamespace: + kubeconfigSecret: + name: my-controlplane-kubeconfig + key: kubeconfig +``` + + + + + + +Provide the values file above when you run the CLI command: + + +```bash {copy-lines="3"} +up controlplane connector install my-control-plane my-app-ns-1 --file=connector-values.yaml +``` + +The Claim APIs and the namespaced XR APIs from your control plane are now visible in the cluster. +You can verify this with `kubectl api-resources`. + +```bash +kubectl api-resources +``` + +### Uninstall + +Disconnect an app cluster that you prior installed the Control Plane Connector on by +running the following: + +```bash +up ctp connector uninstall +``` + +This command uninstalls the helm chart for the Control Plane Connector from an app +cluster. It moves any claims in the app cluster into the control plane +at the specified namespace. + +:::tip +Make sure your kubeconfig's current context is pointed at the app cluster where +you want to uninstall Control Plane Connector from. +::: + + + + +It's recommended you create a values file called `connector-values.yaml` and +provide the following below. Select the tab according to which environment your +control plane is running in. + + + + + + +```yaml +upbound: + # This is your org account in Upbound e.g. the name displayed after executing `up org list` + account: + # This is a personal access token generated in the Upbound Console + token: + +spaces: + # Upbound GCP US-West-1 upbound-gcp-us-west-1.spaces.upbound.io + # Upbound AWS US-East-1 upbound-aws-us-east-1.spaces.upbound.io + # Upbound GCP US-Central-1 upbound-gcp-us-central-1.spaces.upbound.io + host: "" + insecureSkipTLSVerify: true + controlPlane: + # The name of the control plane you want the Connector to attach to + name: + # The control plane group the control plane resides in + group: + # The namespace within the control plane to sync claims from the app cluster to. + # NOTE: This must be created before you install the connector. + claimNamespace: +``` + + + + +Create a [kubeconfig][kubeconfig-1] for the +control plane. Write it to a secret in the cluster where you plan to +install the Control Plane Connector to. Reference this secret in the +`spaces.controlPlane.kubeconfigSecret` field below. + +```yaml +spaces: + controlPlane: + # The namespace within the control plane to sync claims from the app cluster to. NOTE: This must be created before you install the connector. + claimNamespace: + kubeconfigSecret: + name: my-controlplane-kubeconfig + key: kubeconfig +``` + + + + + + +Provide the values file above when you `helm install` the Control Plane Connector: + + +```bash +helm install --wait mcp-connector oci://xpkg.upbound.io/spaces-artifacts/mcp-connector -n kube-system -f connector-values.yaml +``` +:::tip +Create an API token from the Upbound user account settings page in the console by following [these instructions][these-instructions]. +::: + +### Uninstall + +You can uninstall Control Plane Connector with Helm by running the following: + +```bash +helm uninstall mcp-connector +``` + + + + + +### Example usage + +This example creates a control plane using [Configuration +EKS][configuration-eks]. `KubernetesCluster` is +available as a claim API in your control plane. The following is [an +example][an-example] +object you can create in your control plane. + +```yaml +apiVersion: k8s.starter.org/v1alpha1 +kind: KubernetesCluster +metadata: + name: my-cluster + namespace: default +spec: + id: my-cluster + parameters: + nodes: + count: 3 + size: small + services: + operators: + prometheus: + version: "34.5.1" + writeConnectionSecretToRef: + name: my-cluster-kubeconfig +``` + +After connecting your Kubernetes app cluster to the control plane, you +can create the `KubernetesCluster` object in your app cluster. Although your +local cluster has an Object, the actual resources is in your managed control +plane inside Upbound. + +```bash {copy-lines="3"} +# Applying the claim YAML above. +# kubectl is set up to talk with your Kubernetes cluster. +kubectl apply -f claim.yaml + + +kubectl get claim -A +NAME SYNCED READY CONNECTION-SECRET AGE +my-cluster True True my-cluster-kubeconfig 2m +``` + +Once Kubernetes creates the object, view the console to see your object. + +![Claim by connector in console](/img/ClaimInConsole.png) + +You can interact with the object through your cluster just as if it +lives in your cluster. + +### Migration to control planes + +This guide details the migration of a Crossplane installation to Upbound-managed +control planes using the Control Plane Connector to manage claims on an application +cluster. + +![migration flow application cluster to control plane](/img/ConnectorMigration.png) + +#### Export all resources + +Before proceeding, ensure that you have set the correct kubecontext for your application +cluster. + +```bash +up controlplane migration export --pause-before-export --output=my-export.tar.gz --yes +``` + +This command performs the following: +- Pauses all claim, composite, and managed resources before export. +- Scans the control plane for resource types. +- Exports Crossplane and native resources. +- Archives the exported state into `my-export.tar.gz`. + +Example output: +```bash +Exporting control plane state... + ✓ Pausing all claim resources before export... 1 resources paused! ⏸️ + ✓ Pausing all composite resources before export... 7 resources paused! ⏸️ + ✓ Pausing all managed resources before export... 34 resources paused! ⏸️ + ✓ Scanning control plane for types to export... 231 types found! 👀 + ✓ Exporting 231 Crossplane resources...125 resources exported! 📤 + ✓ Exporting 3 native resources...19 resources exported! 📤 ✓ Archiving exported state... archived to "my-export.tar.gz"! 📦 + +Successfully exported control plane state! +``` + +#### Import all resources + +The system restores the target control plane with the exported +resources, which serves as the destination for the Control Plane Connector. + + +Log into Upbound and select the correct context: + +```bash +up login +up ctx +up ctp create ctp-a +``` + +Output: +```bash +ctp-a created +``` + +Verify that the Crossplane version on both the application cluster and the new managed +control plane matches the core Crossplane version. + +Use the following command to import the resources: +```bash +up controlplane migration import -i my-export.tar.gz \ + --unpause-after-import \ + --mcp-connector-cluster-id=my-appcluster \ + --mcp-connector-claim-namespace=my-appcluster +``` + +This command: +- Note: `--mcp-connector-cluster-id` needs to be unique per application cluster +- Note: `--mcp-connector-claim-namespace` is the namespace the system creates + during the import +- Restores base resources +- Waits for XRDs and packages to establish +- Imports Claims, XRs resources +- Finalizes the import and resumes managed resources + +Example output: +```bash +Importing control plane state... + ✓ Reading state from the archive... Done! 👀 + ✓ Importing base resources... 56 resources imported!📥 + ✓ Waiting for XRDs... Established! ⏳ + ✓ Waiting for Packages... Installed and Healthy! ⏳ + ✓ Importing remaining resources... 88 resources imported! 📥 + ✓ Finalizing import... Done! 🎉 + ✓ Unpausing managed resources ... Done! ▶️ + +fully imported control plane state! +``` + +Verify Imported Claims + + +The Control Plane Connector renames all claims and adds additional labels to them. + +```bash +kubectl get claim -A +``` + +Example output: +```bash +NAMESPACE NAME SYNCED READY CONNECTION-SECRET AGE +my-appcluster cluster.aws.platformref.upbound.io/claim-e708ff592b974f51 True True platform-ref-aws-kubeconfig 3m17s +``` + +Inspect the labels: +```bash +kubectl get -n my-appcluster cluster.aws.platformref.upbound.io/claim-e708ff592b974f51 -o yaml | yq .metadata.labels +``` + +Example output: +```bash +mcp-connector.upbound.io/app-cluster: my-appcluster +mcp-connector.upbound.io/app-namespace: default +mcp-connector.upbound.io/app-resource-name: example +``` + +#### Cleanup the app cluster + +Remove all Crossplane-related resources from the application cluster, including: + +- Managed Resources +- Claims +- Compositions +- XRDs +- Packages (Functions, Configurations, Providers) +- Crossplane and all associated CRDs + + +#### Install Control Plane Connector + + +Follow the preceding installation guide and configure the `connector-values.yaml`: + +```yaml +# NOTE: clusterID needs to match --mcp-connector-cluster-id used in the import on the managed control Plane +clusterID: my-appcluster +upbound: + account: + token: + +spaces: + host: "" + insecureSkipTLSVerify: true + controlPlane: + name: + group: + # NOTE: This is the --mcp-connector-claim-namespace used during the import to the control plane + claimNamespace: +``` +Once the Control Plane Connector installs, verify that resources exist in the application +cluster: + +```bash +kubectl api-resources | grep platform +``` + +Example output: +```bash +awslbcontrollers aws.platform.upbound.io/v1alpha1 true AWSLBController +podidentities aws.platform.upbound.io/v1alpha1 true PodIdentity +sqlinstances aws.platform.upbound.io/v1alpha1 true SQLInstance +clusters aws.platformref.upbound.io/v1alpha1 true Cluster +osss observe.platform.upbound.io/v1alpha1 true Oss +apps platform.upbound.io/v1alpha1 true App +``` + +Restore claims from the control plane to the application cluster: + +```bash +kubectl get claim -A +``` + +Example output: +```bash +NAMESPACE NAME SYNCED READY CONNECTION-SECRET AGE +default cluster.aws.platformref.upbound.io/example True True platform-ref-aws-kubeconfig 127m +``` + +With this guide, you migrated your Crossplane installation to +Upbound-control planes. This ensures seamless integration with your +application cluster using the Control Plane Connector. + +### Connect multiple app clusters to a control plane + +Claims are store in a unique namespace in the Upbound control plane. +Every cluster creates a new control plane namespace. + +![Multi-cluster architecture with control plane connector](/img/ConnectorMulticlusterArch.png) + +There's no limit on the number of clusters connected to a single control plane. +Control plane operators can see all their infrastructure in a central control +plane. + +Without using control planes and Control Plane Connector, users have to install +Crossplane and providers for cluster. Each cluster requires configuration for +providers with necessary credentials. With a single control plane where multiple +clusters connected through Upbound tokens, you don't need to give out any cloud +credentials to the clusters. + + +[kubeconfig]: /manuals/cli/howtos/context-config/#generate-a-kubeconfig-for-a-control-plane-in-a-group +[kubeconfig-1]:/spaces/concepts/control-planes/#connect-directly-to-your-control-plane +[these-instructions]:/manuals/console/#create-a-personal-access-token +[kubernetes-api-aggregationlayer]: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/ +[configuration-eks]: https://github.com/upbound/configuration-eks +[an-example]: https://github.com/upbound/configuration-eks/blob/9f86b6d/.up/examples/cluster.yaml diff --git a/spaces_versioned_docs/version-v1.11/howtos/debugging-a-ctp.md b/spaces_versioned_docs/version-v1.11/howtos/debugging-a-ctp.md new file mode 100644 index 000000000..521271e40 --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/debugging-a-ctp.md @@ -0,0 +1,128 @@ +--- +title: Debugging issues on a control plane +sidebar_position: 70 +description: A guide for how to debug resources on a control plane running in Upbound. +--- + +This guide provides troubleshooting guidance for how to identify and fix issues on a control plane. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). Debugging methods are consistent across versions. + +For related control plane specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version-specific features, see the . +::: + +## Start from Upbound Console + + +The Upbound [Console][console] has a built-in control plane explorer experience +that surfaces status and events for the resources on your control plane. The +explorer is claim-based. Resources in this view exist only if they exist in the +reference chain originating from a claim. This view is a helpful starting point +if you are attempting to debug an issue originating from a claim. + +:::tip +If you directly create Crossplane Managed Resources (`MR`s) or Composite +Resources (`XR`s), they won't render in the explorer. +::: + +### Example + +The example below uses the control plane explorer view to inspect why a claim for an EKS Cluster isn't healthy. + +#### Check the health status of claims + +From the API type card, two claims branching from of it: one shows a healthy green icon, while the other shows an unhealthy red icon. + +![Use control plane explorer view to see status of claims](/img/debug-overview.png) + +Select `More details` on the unhealthy claim card and Upbound shows details for the claim. + +![Use control plane explorer view to see details of claims](/img/debug-claim-more-details.png) + +Looking at the three events for this claim: + +- **ConfigureCompositeResource**: this event indicates Upbound created the claimed Composite Resource (`XR`). + +- **BindCompositeResource**: this indicates the Composite Resource (`XR`) that's being "claimed" isn't ready yet. A claim doesn't show `HEALTHY` until the XR it references is ready. + +- **ConfigureCompositeResource**: the error saying, `cannot apply composite resource...the object has been modified; please apply your changes to the latest version and try again` is a generic event from Crossplane resources. It's safe to ignore this error. + +Next, look at the `status` field of the rendered YAML for the resource. + +![Use control plane explorer view to see status details of claims](/img/debug-claim-status.png) + +The status reports a similar message as the event stream: this claim is waiting for a Composite Resource to be ready. Based on this, investigate the Composite Resource referenced by this claim next. + +#### Check the health status of the Composite Resource + + +The control plane explorer only shows the claim cards by default. Selecting the claim card renders the rest of the Crossplane resource tree associated with the selected claim. + + +The previous claim expands into this screenshot: + +![Use control plane explorer view to expand tree of claim](/img/debug-claim-expansion.png) + +This renders the XR referenced by the claim (along with all its references). You can see the XR is showing the same unhealthy status icon in its card. Notice the XR has itself two nested XRs. One of the nested XRs shows a healthy green icon on its card, while the other shows an unhealthy red icon. Like the claim, a Composite Resource doesn't show healthy until all referenced resources also show healthy. + +#### Inspecting Managed Resources + +Select `more details` to inspect one of the unhealthy Managed Resources shows the following: + +![Use control plane explorer view to view events for an MR](/img/debug-mr-event.png) + +This event reveals it's unhealthy because it's waiting on a reference to another Managed Resource. Searching the rendered YAML of the MR for this resource shows the following: + +![Use control plane explorer view to view status for an MR](/img/debug-mr-status.png) + +The rendered YAML shows this MR is referencing a sibling MR that shares the same controller. The same parent XR created both of these managed resources. Inspect the sibling MR to see what its status is. + +![Use control plane explorer view to view status for a sibling MR](/img/debug-mr-dependency-status.png) + +The sibling MR event stream shows the Provider processed the resource create request. Ignore the `CannotInitalizeManagedResrouce` event. EKS clusters can take 15 minutes or more to provision in AWS. The root cause is everything is fine -- all the resources are still provisioning. Waiting longer and then looking at the control plane explorer again, shows all resources are healthy. reference, below is an example status field for a resource that's healthy and provisioned. + +```yaml +... +status: + atProvider: + id: team-b-app-cluster-bhwfb-hwtgs-20230403135452772300000008 + conditions: + - lastTransitionTime: '2023-04-03T13:56:35Z' + reason: Available + status: 'True' + type: Ready + - lastTransitionTime: '2023-04-03T13:54:02Z' + reason: ReconcileSuccess + status: 'True' + type: Synced + - lastTransitionTime: '2023-04-03T13:54:53Z' + reason: Success + status: 'True' + type: LastAsyncOperation + - lastTransitionTime: '2023-04-03T13:54:53Z' + reason: Finished + status: 'True' + type: AsyncOperation +``` + +### Control plane explorer limitations + +The control plane explorer view is currently designed around claims (`XC`s). The control plane explorer doesn't inspect other Crossplane resources. To inspect other Crossplane resources, use the `up` CLI. + +Some examples of Crossplane resources that require the `up` CLI + +- Managed Resources that aren't associated with a claim +- Composite Resources that aren't associated with a claim +- The status of _deleting_ resources +- ProviderConfigs +- Provider events + +## Use direct CLI access + +If your preference is to use a terminal instead of a GUI, Upbound supports direct access to the API server of the control plane. Use [`up ctx`][up-ctx] to connect directly to your control plane. + + +[console]: /manuals/console/upbound-console +[up-ctx]: /reference/cli-reference diff --git a/spaces_versioned_docs/version-v1.11/howtos/managed-service.md b/spaces_versioned_docs/version-v1.11/howtos/managed-service.md new file mode 100644 index 000000000..40b983a76 --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/managed-service.md @@ -0,0 +1,23 @@ +--- +title: Managed Upbound control planes +description: "Learn about the managed service capabilities of a Space" +sidebar_position: 10 +--- + +Control planes in Upbound are fully isolated [Upbound Crossplane][uxp] instances +that Upbound manages for you. This means: + +- the underlying lifecycle of infrastructure (compute, memory, and storage) required to power your instance. +- scaling of the infrastructure. +- the maintenance of the core Upbound Crossplane components that make up a control plane. + +This lets users focus on building their APIs and operating their control planes, +while Upbound handles the rest. Each control plane has its own dedicated API +server connecting users to their control plane. + +## Learn about Upbound control planes + +Read the [concept][ctp-concept] documentation to learn about Upbound control planes. + +[uxp]: /manuals/uxp/overview +[ctp-concept]: /spaces/concepts/control-planes \ No newline at end of file diff --git a/spaces_versioned_docs/version-v1.11/howtos/mcp-connector-guide.md b/spaces_versioned_docs/version-v1.11/howtos/mcp-connector-guide.md new file mode 100644 index 000000000..8a3866d07 --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/mcp-connector-guide.md @@ -0,0 +1,169 @@ +--- +title: Consume control plane APIs in an app cluster with control plane connector +sidebar_position: 99 +description: A tutorial to configure a Space with Argo to declaratively create and + manage control planes +--- + +In this tutorial, you learn how to configure a Kubernetes app cluster to communicate with a control plane in an Upbound self-hosted Space. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Control plane connector functionality is consistent across versions. + +For related API specifications and resources, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +The [control plane connector][control-plane-connector] bridges your Kubernetes application clusters---running outside of Upbound--to your control planes running in Upbound. This allows you to interact with your control plane's API right from the app cluster. The claim APIs you define via `CompositeResourceDefinitions` are available alongside Kubernetes workload APIs like `Pod`. In effect, control plane connector provides the same experience as a locally installed Crossplane. + +## Prerequisites + +To complete this tutorial, you need the following: + +- Have already deployed an Upbound Space. +- Have already deployed an Kubernetes cluster (referred to as `app cluster`). + +## Create a control plane + +Create a new control plane in your self-hosted Space. Run the following command in a terminal: + +```bash +up ctp create my-control-plane +``` + +Once the control plane is ready, connect to it. + +```bash +up ctp connect my-control-plane +``` + +For convenience, install a an Upbound [platform reference Configuration][platform-reference-configuration] from the marketplace. production scenarios, replace this with your own Crossplane Configurations or compositions. + +```bash +up ctp configuration install xpkg.upbound.io/upbound/platform-ref-aws:v1.4.0 +``` + +## Fetch the control plane's connection details + +Run the following command in a terminal: + +```shell +kubectl get secret kubeconfig-my-control-plane -n default -o jsonpath='{.data.kubeconfig}' | base64 -d > kubeconfig-my-control-plane.yaml +``` + +This command saves the kubeconfig for the control plane to a file in your working directory. + +## Install control plane connector in your app cluster + +Switch contexts to your Kubernetes app cluster. To install the control plane connector in your app cluster, you must first provide a secret containing your control plane's kubeconfig at install-time. Run the following command in a terminal: + +:::important +Make sure the following commands are executed against your **app cluster**, not your control plane. +::: + +```bash +kubectl create secret generic kubeconfig-my-control-plane -n kube-system --from-file=kubeconfig=./kubeconfig-my-control-plane.yaml +``` + +Set the environment variable below to configure which namespace _in your control plane_ you wish to sync the app cluster's claims to. + +```shell +export CONNECTOR_CTP_NAMESPACE=app-cluster-1 +``` + +Install the Control Plane Connector in the app cluster and point it to your control plane. + +```bash +up ctp connector install my-control-plane $CONNECTOR_CTP_NAMESPACE --control-plane-secret=kubeconfig-my-control-plane +``` + +## Inspect your app cluster + +After you install Control Plane Connector in the app cluster, you can now see APIs which live on the control plane. You can confirm this is the case by running the following command on your app cluster: + +```bash {copy-lines="1"} +kubectl api-resources | grep upbound + +# The output should look like this: +sqlinstances aws.platform.upbound.io/v1alpha1 true SQLInstance +clusters aws.platformref.upbound.io/v1alpha1 true Cluster +osss observe.platform.upbound.io/v1alpha1 true Oss +apps platform.upbound.io/v1alpha1 true App +``` + +## Claim a database instance on your app cluster + +Create a database claim against the `SQLInstance` API and observe resources get created by your control plane. Apply the following resources to your app cluster: + +```yaml +cat < --output + ``` + + The command exports your existing Crossplane control plane configuration/state into an archive file. + +::: note +By default, the export command doesn't make any changes to your existing Crossplane control plane state, leaving it intact. Use the `--pause-before-export` flag to pause the reconciliation on managed resources before exporting the archive file. + +This safety mechanism ensures the control plane you migrate state to doesn't assume ownership of resources before you're ready. +::: + +2. Use the control plane [create command][create-command] to create a managed +control plane in Upbound: + + ```bash + up controlplane create my-controlplane + ``` + +3. Use [`up ctx`][up-ctx] to connect to the control plane created in the previous step: + + ```bash + up ctx "///my-controlplane" + ``` + + The command configures your local `kubeconfig` to connect to the control plane. + +4. Run the following command to import the archive file into the control plane: + + ```bash + up controlplane migration import --input + ``` + +:::note +By default, the import command leaves the control plane in an inactive state by pausing the reconciliation on managed +resources. This pause gives you an opportunity to review the imported configuration/state before activating the control plane. +Use the `--unpause-after-import` flag to change the default behavior and activate the control plane immediately after +importing the archive file. +::: + + + +5. Review and validate the imported configuration/state. When you are ready, activate your managed + control plane by running the following command: + + ```bash + kubectl annotate managed --all crossplane.io/paused- + ``` + + At this point, you can delete the source Crossplane control plane. + +## CLI options + +### Filtering + +The migration tool captures the state of a Control Plane. The only filtering +supported is Kubernetes namespace and Kubernetes resource Type filtering. + +You can exclude namespaces using the `--exclude-namespaces` CLI option. This can prevent the CLI from including unwanted resources in the export. + +```bash +--exclude-namespaces=kube-system,kube-public,kube-node-lease,local-path-storage,... + +# A list of specific namespaces to exclude from the export. Defaults to 'kube-system', 'kube-public','kube-node-lease', and 'local-path-storage'. +``` + +You can exclude Kubernetes Resource types by using the `--exclude-resources` CLI option: + +```bash +--exclude-resources=EXCLUDE-RESOURCES,... + +# A list of resource types to exclude from the export in "resource.group" format. No resources are excluded by default. +``` + +For example, here's an example for excluding the CRDs installed by Crossplane functions (since they're not needed): + +```bash +up controlplane migration export \ + --exclude-resources=gotemplates.gotemplating.fn.crossplane.io,kclinputs.template.fn.crossplane.io +``` + +:::warning +You must specify resource names in lowercase "resource.group" format (for example, `gotemplates.gotemplating.fn.crossplane.io`). Using only the resource kind (for example, `GoTemplate`) isn't supported. +::: + + +:::tip Function Input CRDs + +Exclude function input CRDs (`inputs.template.fn.crossplane.io`, `resources.pt.fn.crossplane.io`, `gotemplates.gotemplating.fn.crossplane.io`, `kclinputs.template.fn.crossplane.io`) from migration exports. Upbound automatically recreates these resources during import. Function input CRDs typically have owner references to function packages and may have restricted RBAC access. Upbound installs these CRDs during the import when function packages are restored. + +::: + + +After export, users can also change the archive file to only include necessary resources. + +### Export non-Crossplane resources + +Use the `--include-extra-resources=` CLI option to select other CRD types to include in the export. + +### Set the kubecontext + +Currently `--context` isn't supported in the migration CLI. You should be able to use the `--kubeconfig` CLI option to use a file that's set to the correct context. example: + +```bash +up controlplane migration export --kubeconfig +``` + +Use this in tandem with `up ctx` to export a control plane's kubeconfig: + +```bash +up ctx --kubeconfig ~/.kube/config + +# To list the current contet +up ctx . --kubeconfig ~/.kube/config +``` + +## Export archive + +The migration CLI exports an archive upon successful completion. Below is an example export of a control plane that excludes several CRD types and skips the confirmation prompt. A file gets written to the working directory, unless you select another output file: + +
+ +View the example export + +```bash +$ up controlplane migration export --exclude-resources=gotemplates.gotemplating.fn.crossplane.io,kclinputs.template.fn.crossplane.io --yes +Exporting control plane state... +✓ Scanning control plane for types to export... 121 types found! 👀 +✓ Exporting 121 Crossplane resources...60 resources exported! 📤 +✓ Exporting 3 native resources...8 resources exported! 📤 +✓ Archiving exported state... archived to "xp-state.tar.gz"! 📦 +``` + +
+ + +When an export occurs, a file named `xp-state.tar.gz` by default gets created in the working directory. You can unzip the file and all the contents of the export are all text YAML files. + +- Each CRD (for example `vpcs.ec2.aws.upbound.io`) gets its own directory +which contains: + - A `metadata.yaml` file that contains Kubernetes Object Metadata + - A list of Kubernetes Categories the resource belongs to +- A `cluster` directory that contains YAML manifests for all resources provisioned +using the CRD. + +Sample contents for a Cluster with a single `XNetwork` Composite from +[configuration-aws-network][configuration-aws-network] is show below: + + +
+ +View the example cluster content + +```bash +├── compositionrevisions.apiextensions.crossplane.io +│ ├── cluster +│ │ ├── kcl.xnetworks.aws.platform.upbound.io-4ca6a8a.yaml +│ │ └── xnetworks.aws.platform.upbound.io-9859a34.yaml +│ └── metadata.yaml +├── configurations.pkg.crossplane.io +│ ├── cluster +│ │ └── configuration-aws-network.yaml +│ └── metadata.yaml +├── deploymentruntimeconfigs.pkg.crossplane.io +│ ├── cluster +│ │ └── default.yaml +│ └── metadata.yaml +├── export.yaml +├── functions.pkg.crossplane.io +│ ├── cluster +│ │ ├── crossplane-contrib-function-auto-ready.yaml +│ │ ├── crossplane-contrib-function-go-templating.yaml +│ │ └── crossplane-contrib-function-kcl.yaml +│ └── metadata.yaml +├── internetgateways.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-xgl4q.yaml +│ └── metadata.yaml +├── mainroutetableassociations.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-t2qh7.yaml +│ └── metadata.yaml +├── namespaces +│ └── cluster +│ ├── crossplane-system.yaml +│ ├── default.yaml +│ └── upbound-system.yaml +├── providerconfigs.aws.upbound.io +│ ├── cluster +│ │ └── default.yaml +│ └── metadata.yaml +├── providerconfigusages.aws.upbound.io +│ ├── cluster +│ │ ├── 0a2a3ec6-ef13-45f9-9cf0-63af7f4a6b6b.yaml +...redacted +│ │ └── f7092b0f-3a78-4bfe-82c8-57e5085a9b11.yaml +│ └── metadata.yaml +├── providers.pkg.crossplane.io +│ ├── cluster +│ │ ├── upbound-provider-aws-ec2.yaml +│ │ └── upbound-provider-family-aws.yaml +│ └── metadata.yaml +├── routes.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-dt9cj.yaml +│ └── metadata.yaml +├── routetableassociations.ec2.aws.upbound.io +│ ├── cluster +│ │ ├── borrelli-backup-test-mr2sd.yaml +│ │ ├── borrelli-backup-test-ngq5h.yaml +│ │ ├── borrelli-backup-test-nrkgg.yaml +│ │ └── borrelli-backup-test-wq752.yaml +│ └── metadata.yaml +├── routetables.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-dv4mb.yaml +│ └── metadata.yaml +├── secrets +│ └── namespaces +│ ├── crossplane-system +│ │ ├── cert-token-signing-gateway-pub.yaml +│ │ ├── mxp-hostcluster-certs.yaml +│ │ ├── package-pull-secret.yaml +│ │ └── xgql-tls.yaml +│ └── upbound-system +│ └── aws-creds.yaml +├── securitygrouprules.ec2.aws.upbound.io +│ ├── cluster +│ │ ├── borrelli-backup-test-472f4.yaml +│ │ └── borrelli-backup-test-qftmw.yaml +│ └── metadata.yaml +├── securitygroups.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-w5jch.yaml +│ └── metadata.yaml +├── storeconfigs.secrets.crossplane.io +│ ├── cluster +│ │ └── default.yaml +│ └── metadata.yaml +├── subnets.ec2.aws.upbound.io +│ ├── cluster +│ │ ├── borrelli-backup-test-8btj6.yaml +│ │ ├── borrelli-backup-test-gbmrm.yaml +│ │ ├── borrelli-backup-test-m7kh7.yaml +│ │ └── borrelli-backup-test-nttt5.yaml +│ └── metadata.yaml +├── vpcs.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-7hwgh.yaml +│ └── metadata.yaml +└── xnetworks.aws.platform.upbound.io +├── cluster +│ └── borrelli-backup-test.yaml +└── metadata.yaml +43 directories, 87 files +``` + +
+ + +The `export.yaml` file contains metadata about the export, including the configuration of the export, Crossplane information, and what's included in the export bundle. + +
+ +View the export + +```yaml +version: v1alpha1 +exportedAt: 2025-01-06T17:39:53.173222Z +options: + excludedNamespaces: + - kube-system + - kube-public + - kube-node-lease + - local-path-storage + includedResources: + - namespaces + - configmaps + - secrets + excludedResources: + - gotemplates.gotemplating.fn.crossplane.io + - kclinputs.template.fn.crossplane.io +crossplane: + distribution: universal-crossplane + namespace: crossplane-system + version: 1.17.3-up.1 + featureFlags: + - --enable-provider-identity + - --enable-environment-configs + - --enable-composition-functions + - --enable-usages +stats: + total: 68 + nativeResources: + configmaps: 0 + namespaces: 3 + secrets: 5 + customResources: + amicopies.ec2.aws.upbound.io: 0 + amilaunchpermissions.ec2.aws.upbound.io: 0 + amis.ec2.aws.upbound.io: 0 + availabilityzonegroups.ec2.aws.upbound.io: 0 + capacityreservations.ec2.aws.upbound.io: 0 + carriergateways.ec2.aws.upbound.io: 0 + compositeresourcedefinitions.apiextensions.crossplane.io: 0 + compositionrevisions.apiextensions.crossplane.io: 2 + compositions.apiextensions.crossplane.io: 0 + configurationrevisions.pkg.crossplane.io: 0 + configurations.pkg.crossplane.io: 1 +...redacted +``` + +
+ +### Skipped resources + +Along with to the resources excluded via CLI options, the following resources aren't +included in the backup: + +- The `kube-root-ca.crt` ConfigMap, since this is cluster-specific +- Resources directly managed via Helm (ArgoCD's helm implementation, which templates +Helm resources and then applies them, get included in the backup). The migration creates the exclusion list by looking for: + - Any Resource with the label `"app.kubernetes.io/managed-by" == "Helm"` + - Kubernetes Secrets with the label prefix `helm.sh/release`. example, `helm.sh/release.v1` +- Resources installed via a Crossplane package. These have an `ownerReference` with +a prefix `pkg.crossplane.io`. The expectation is that during import, the Crossplane Package Manager bears responsibility for installing the resources. +- Crossplane Locks: Any `Lock.pkg.crossplane.io` resource isn't included in the +export. + +## Restore + +The following is an example of a successful import run. At the end of the import, all Managed Resources are in a paused state. + +
+ +View the migration import + +```bash +$ up controlplane migration import +Importing control plane state... +✓ Reading state from the archive... Done! 👀 +✓ Importing base resources... 18 resources imported! 📥 +✓ Waiting for XRDs... Established! ⏳ +✓ Waiting for Packages... Installed and Healthy! ⏳ +✓ Importing remaining resources... 50 resources imported! 📥 +✓ Finalizing import... Done! 🎉 +``` + +
+ +Your scenario may involve migrating resources which already exist through other automation on the platform. When executing an import in these circumstances, the importer applies the new manifests to the cluster. If the resource already exists, the restore sets fields to what's in the backup. + +The importer restores all resources in the export archive. Managed Resources get imported with the `crossplane.io/paused: "true"` annotation set. Use the `--unpause-after-import` CLI argument to automatically un-pause resources that got +paused during backup, or remove the annotation manually. + +### Restore order + +The importer restores based on Kubernetes types. The restore order doesn't include parent/child relationships. + +Because Crossplane Composites create new Managed Resources if not present on the cluster, all +Claims, Composites and Managed Resources get imported in a paused state. You can un-pause them after the restore completes. + +The first step of import is installing Base Resources into the cluster. These resources (such has +packages and XRDs) must be ready before proceeding with the import. +Base Resources are: + +- Kubernetes Resources + - ConfigMaps + - Namespaces + - Secrets +- Crossplane Resources + - ControllerConfigs: `controllerconfigs.pkg.crossplane.io` + - DeploymentRuntimeConfigs: `deploymentruntimeconfigs.pkg.crossplane.io` + - StoreConfigs: `storeconfigs.secrets.crossplane.io` +- Crossplane Packages + - Providers: `providers.pkg.crossplane.io` + - Functions: `functions.pkg.crossplane.io` + - Configurations: `configurations.pkg.crossplane.io` + +Restore waits for the base resources to be `Ready` before moving on to the next step. Next, restore walks through the archive and restores all the manifests present. + +During import, the `crossplane.io/paused` annotation gets added to Managed Resources, Claims +and Composites. + +To manually un-pause managed resources after an import, remove the annotation by running: + +```bash +kubectl annotate managed --all crossplane.io/paused- +``` + +You can also run import again with the `--unpause-after-import` flag to remove the annotations. + +```bash +up controlplane migration import --unpause-after-import +``` + +### Restoring resource status + +The importer applies the status of all resources during import. The importer determines if the CRD version has a status field defined based on the stored CRD version. + + +[cli-command]: /reference/cli-reference +[up-cli]: /reference/cli-reference +[up-cli-1]: /manuals/cli/overview +[create-command]: /reference/cli-reference +[up-ctx]: /reference/cli-reference +[configuration-aws-network]: https://marketplace.upbound.io/configurations/upbound/configuration-aws-network diff --git a/spaces_versioned_docs/version-v1.11/howtos/observability.md b/spaces_versioned_docs/version-v1.11/howtos/observability.md new file mode 100644 index 000000000..8fc5c3278 --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/observability.md @@ -0,0 +1,395 @@ +--- +title: Observability +sidebar_position: 50 +description: A guide for how to use the integrated observability pipeline feature + in a Space. +plan: "enterprise" +--- + + + +This guide explains how to configure observability in Upbound Spaces. Upbound +provides integrated observability features built on +[OpenTelemetry][opentelemetry] to collect, process, and export logs, metrics, +and traces. + +Upbound Spaces offers two levels of observability: + +1. **Space-level observability** - Observes the cluster infrastructure where Spaces software is installed (Self-Hosted only) +2. **Control plane observability** - Observes workloads running within individual control planes + + + + + +:::info API Version Information & Version Selector +This guide applies to all supported versions (v1.9-v1.15+). Observability features evolved: + +- **v1.9-v1.12**: SharedTelemetryConfig available (alpha until v1.11) +- **v1.11+**: Observability promoted to stable with logs export support +- **v1.14+**: Both space-level and control-plane observability GA + +**View API Reference for Your Version**: +| Version | Status | Link | +|---------|--------|------| +| v1.15 | Latest | [Spaces API v1.15](../../../next/reference/apis/spaces-api/) | +| v1.14 | Supported | [Spaces API v1.14](../../../1.14/reference/apis/spaces-api/) | +| v1.13 | Supported | [Spaces API v1.13](../../../1.13/reference/apis/spaces-api/) | +| v1.12 | Supported | [Spaces API v1.12](../../../1.12/reference/apis/spaces-api/) | +| v1.11 | Supported | [Spaces API v1.11](../../../1.11/reference/apis/spaces-api/) | +| v1.10 | Limited | [Spaces API v1.10](../../../1.10/reference/apis/spaces-api/) | +| v1.9 | EOL | [Spaces API v1.9](../../../1.9/reference/apis/spaces-api/) | + +For version support policy and feature availability, see and . +::: + +:::important +**Space-level observability** (available since v1.6.0, GA in v1.14.0): +- Disabled by default +- Requires manual enablement and configuration +- Self-Hosted Spaces only + +**Control plane observability** (available since v1.13.0, GA in v1.14.0): +- Enabled by default +- No additional configuration required +::: + + + + +## Prerequisites + + +**Control plane observability** is enabled by default. No additional setup is +required. + + + +### Self-hosted Spaces + +1. **Enable the observability feature** when installing Spaces: + ```bash + up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "observability.enabled=true" + ``` + +Set `features.alpha.observability.enabled=true` instead if using Spaces version +before `v1.14.0`. + +2. **Install OpenTelemetry Operator** (required for Space-level observability): + ```bash + kubectl apply -f https://github.com/open-telemetry/opentelemetry-operator/releases/download/v0.116.0/opentelemetry-operator.yaml + ``` + + :::important + If running Spaces `v1.11` or later, use OpenTelemetry Operator `v0.110.0` or later due to breaking changes. + ::: + + +## Space-level Observability + +Space-level observability is only available for self-hosted Spaces and allows +administrators to observe the cluster infrastructure. + +### Configuration + +Configure Space-level observability using the `spacesCollector` value in your +Spaces Helm chart: + +```yaml +observability: + spacesCollector: + config: + exporters: + otlphttp: + endpoint: "" + headers: + api-key: YOUR_API_KEY + exportPipeline: + logs: + - otlphttp + metrics: + - otlphttp +``` + +This configuration exports metrics and logs from: + +- Crossplane installation +- Spaces infrastructure (controller, API, router, etc.) + +### Router metrics + +The Spaces router uses Envoy as a reverse proxy and automatically exposes +metrics when you enable Space-level observability. These metrics provide +visibility into: + +- Traffic routing to control planes and services +- Request status codes, timeouts, and retries +- Circuit breaker state preventing cascading failures +- Client connection patterns and request volume +- Request latency (P50, P95, P99) + +For more information about available metrics, example queries, and how to enable +this feature, see the [Space-level observability guide][space-level-o11y]. + +## Control plane observability + +Control plane observability collects telemetry data from workloads running +within individual control planes using `SharedTelemetryConfig` resources. + +The pipeline deploys [OpenTelemetry Collectors][opentelemetry-collectors] per +control plane, defined by a `SharedTelemetryConfig` at the group level. +Collectors pass data to external observability backends. + +:::important +From Spaces `v1.13` and beyond, telemetry only includes user-facing control +plane workloads (Crossplane, providers, functions). + +Self-hosted users can include system workloads (`api-server`, `etcd`) by setting +`observability.collectors.includeSystemTelemetry=true` in Helm. +::: + +:::important +Spaces validates `SharedTelemetryConfig` resources before applying them by +sending telemetry to configured exporters. self-hosted Spaces, ensure that +`spaces-controller` can reach the exporter endpoints. +::: + +### `SharedTelemetryConfig` + +`SharedTelemetryConfig` is a group-scoped custom resource that defines telemetry +configuration for control planes. + +#### New Relic example + +```yaml +apiVersion: observability.spaces.upbound.io/v1alpha1 +kind: SharedTelemetryConfig +metadata: + name: newrelic + namespace: default +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + org: foo + exporters: + otlphttp: + endpoint: https://otlp.nr-data.net + headers: + api-key: YOUR_API_KEY + exportPipeline: + metrics: [otlphttp] + traces: [otlphttp] + logs: [otlphttp] +``` + +#### Datadog Example + +```yaml +apiVersion: observability.spaces.upbound.io/v1alpha1 +kind: SharedTelemetryConfig +metadata: + name: datadog + namespace: default +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + org: foo + exporters: + datadog: + api: + site: ${DATADOG_SITE} + key: ${DATADOG_API_KEY} + exportPipeline: + metrics: [datadog] + traces: [datadog] + logs: [datadog] +``` + +### Control plane selection + +Use `spec.controlPlaneSelector` to specify which control planes should use the +telemetry configuration. + +#### Label-based selection + +```yaml +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +#### Expression-based selection + +```yaml +spec: + controlPlaneSelector: + labelSelectors: + - matchExpressions: + - { key: environment, operator: In, values: [production,staging] } +``` + +#### Name-based selection + +```yaml +spec: + controlPlaneSelector: + names: + - controlplane-dev + - controlplane-staging + - controlplane-prod +``` + +### Manage sensitive data + +:::important +Available from Spaces `v1.10` +::: + +Store sensitive data in Kubernetes secrets and reference them in your +`SharedTelemetryConfig`: + +1. **Create the secret:** + ```bash + kubectl create secret generic sensitive -n \ + --from-literal=apiKey='YOUR_API_KEY' + ``` + +2. **Reference in SharedTelemetryConfig:** + ```yaml + apiVersion: observability.spaces.upbound.io/v1alpha1 + kind: SharedTelemetryConfig + metadata: + name: newrelic + spec: + configPatchSecretRefs: + - name: sensitive + key: apiKey + path: exporters.otlphttp.headers.api-key + controlPlaneSelector: + labelSelectors: + - matchLabels: + org: foo + exporters: + otlphttp: + endpoint: https://otlp.nr-data.net + headers: + api-key: dummy # Replaced by secret value + exportPipeline: + metrics: [otlphttp] + traces: [otlphttp] + logs: [otlphttp] + ``` + +### Telemetry processing + +:::important +Available from Spaces `v1.11` +::: + +Configure processing pipelines to transform telemetry data using the [transform +processor][transform-processor]. + +#### Add labels to metrics + +```yaml +spec: + processors: + transform: + error_mode: ignore + metric_statements: + - context: datapoint + statements: + - set(attributes["newLabel"], "someLabel") + processorPipeline: + metrics: [transform] +``` + +#### Remove labels + +From metrics: +```yaml +processors: + transform: + metric_statements: + - context: datapoint + statements: + - delete_key(attributes, "kubernetes_namespace") +``` + +From logs: +```yaml +processors: + transform: + log_statements: + - context: log + statements: + - delete_key(attributes, "log.file.name") +``` + +#### Modify log messages + +```yaml +processors: + transform: + log_statements: + - context: log + statements: + - set(attributes["original"], body) + - set(body, Concat(["log message:", body], " ")) +``` + +### Monitor status + +Check the status of your `SharedTelemetryConfig`: + +```bash +kubectl get stc +NAME SELECTED FAILED PROVISIONED AGE +datadog 1 0 1 63s +``` + +- `SELECTED`: Number of control planes selected +- `FAILED`: Number of control planes that failed provisioning +- `PROVISIONED`: Number of successfully running collectors + +For detailed status information: + +```bash +kubectl describe stc +``` + +## Supported exporters + +Both Space-level and control plane observability support: +- `datadog` -. Datadog integration +- `otlphttp` - General-purpose exporter (used by New Relic, among others) +- `debug` -. troubleshooting + +## Considerations + +- **Control plane conflicts**: Each control plane can only use one `SharedTelemetryConfig`. Multiple configs selecting the same control plane conflict. +- **Custom collector image**: Both Space-level and control plane observability use the same custom OpenTelemetry Collector image with supported exporters. +- **Resource scope**: `SharedTelemetryConfig` resources are group-scoped, allowing different telemetry configurations per group. + +For more advanced configuration options, review the [Helm chart +reference][helm-chart-reference] and [OpenTelemetry Transformation Language +documentation][opentelemetry-transformation-language]. + + +[opentelemetry]: https://opentelemetry.io/ +[opentelemetry-collectors]: https://opentelemetry.io/docs/collector/ +[opentelemetry-collector-configuration]: https://opentelemetry.io/docs/collector/configuration/#exporters +[opentelemetry-operator]: https://opentelemetry.io/docs/kubernetes/operator/ +[transform-processor]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/transformprocessor/README.md +[opentelemetry-transformation-language]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl +[space-level-o11y]: /spaces/howtos/self-hosted/space-observability +[helm-chart-reference]: /reference/helm-reference +[opentelemetry-transformation-language-functions]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/ottlfuncs/README.md +[opentelemetry-transformation-language-contexts]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts +[guide-on-ottl]: https://betterstack.com/community/guides/observability/ottl/#a-brief-overview-of-the-ottl-grammar diff --git a/spaces_versioned_docs/version-v1.11/howtos/query-api.md b/spaces_versioned_docs/version-v1.11/howtos/query-api.md new file mode 100644 index 000000000..78163de2f --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/query-api.md @@ -0,0 +1,320 @@ +--- +title: Query API +sidebar_position: 40 +description: Use the `up` CLI to query objects and resources +--- + + + + +Upbound's Query API allows users to inspect objects and resources within their control planes. The read-only `up alpha query` and `up alpha get` CLI commands allow you to gather information on your control planes in a fast and efficient package. These commands follow the [`kubectl` conventions][kubectl-conventions] for filtering, sorting, and retrieving information from your Space. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). Query API is available in Cloud Spaces since v1.6 and Self-Hosted since v1.8. + +For detailed deployment procedures, see [Deploy Query API infrastructure - Self-Hosted](./self-hosted/query-api.md). version compatibility details, see the . +::: + + + +## Using the Query API + + +The Query API allows you to retrieve control plane information faster than traditional `kubectl` commands. This feature lets you debug your Crossplane resources with the CLI or within the Upbound Console's enhanced management views. + +### Query within a single control plane + +Use the `up alpha get` command to retrieve information about objects within the current control plane context. This command uses the **Query** endpoint and targets the current control plane. + +To switch between control plane groups, use the [`up ctx` ][up-ctx] and change to your desired context with an interactive prompt or specify with your control plane path: + +```shell +up ctx /// +``` + +You can query within a single control plane with the [`up alpha get` command][up-alpha-get-command] to return more information about a given object within the current kubeconfig context. + +The `up alpha get` command can query resource types and aliases to return objects in your control plane. + +```shell +up alpha get managed +NAME READY SYNCED AGE +custom-account1-5bv5j-sa True True 15m +custom-cluster1-bq6dk-net True True 15m +custom-account1-5bv5j-subnet True True 15m +custom-cluster1-bq6dk-nodepool True True 15m +custom-cluster1-bq6dk-cluster True True 15m +custom-account1-5bv5j-net True True 15m +custom-cluster1-bq6dk-subnet True True 15m +custom-cluster1-bq6dk-sa True True 15m +``` + +The [`-A` flag][a-flag] queries for objects across all namespaces. + +```shell +up alpha get configmaps -A +NAMESPACE NAME AGE +crossplane-system uxp-versions-config 18m +crossplane-system universal-crossplane-config 18m +crossplane-system kube-root-ca.crt 18m +upbound-system kube-root-ca.crt 18m +kube-system kube-root-ca.crt 18m +kube-system coredns 18m +default kube-root-ca.crt 18m +kube-node-lease kube-root-ca.crt 18m +kube-public kube-root-ca.crt 18m +kube-system kube-apiserver-legacy-service-account-token-tracking 18m +kube-system extension-apiserver-authentication 18m +``` + +To query for [multiple resource types][multiple-resource-types], you can add the name or alias for the resource as a comma separated string. + +```shell +up alpha get providers,providerrevisions + +NAME HEALTHY REVISION IMAGE STATE DEP-FOUND DEP-INSTALLED AGE +providerrevision.pkg.crossplane.io/crossplane-contrib-provider-nop-ecc25c121431 True 1 xpkg.upbound.io/crossplane-contrib/provider-nop:v0.2.1 Active 18m +NAME INSTALLED HEALTHY PACKAGE AGE +provider.pkg.crossplane.io/crossplane-contrib-provider-nop True True xpkg.upbound.io/crossplane-contrib/provider-nop:v0.2.1 18m +``` + +### Query multiple control planes + +The [`up alpha query` command][up-alpha-query-command] returns a list of objects of any kind within all the control planes in your Space. This command uses either the **SpaceQuery** or **GroupQuery** endpoints depending on your query scope. The `-A` flag switches the query context from the group level to the entire Space + +The `up alpha query` command accepts resources and aliases to return objects across your group or Space. + +```shell +up alpha query crossplane + +NAME ESTABLISHED OFFERED AGE +compositeresourcedefinition.apiextensions.crossplane.io/xnetworks.platform.acme.co True True 20m +compositeresourcedefinition.apiextensions.crossplane.io/xaccountscaffolds.platform.acme.co True True 20m + + +NAME XR-KIND XR-APIVERSION AGE +composition.apiextensions.crossplane.io/xaccountscaffolds.platform.acme.co XAccountScaffold platform.acme.co/v1alpha1 20m +composition.apiextensions.crossplane.io/xnetworks.platform.acme.co XNetwork platform.acme.co/v1alpha1 20m + + +NAME REVISION XR-KIND XR-APIVERSION AGE +compositionrevision.apiextensions.crossplane.io/xaccountscaffolds.platform.acme.co-5ae9da5 1 XAccountScaffold platform.acme.co/v1alpha1 20m +compositionrevision.apiextensions.crossplane.io/xnetworks.platform.acme.co-414ce80 1 XNetwork platform.acme.co/v1alpha1 20m + +NAME READY SYNCED AGE +nopresource.nop.crossplane.io/custom-cluster1-bq6dk-subnet True True 19m +nopresource.nop.crossplane.io/custom-account1-5bv5j-net True True 19m + +## Output truncated... + +``` + + +The [`--sort-by` flag][sort-by-flag] allows you to return information to your specifications. You can construct your sort order in a JSONPath expression string or integer. + + +```shell +up alpha query crossplane -A --sort-by="{.metadata.name}" + +CONTROLPLANE NAME AGE +default/test deploymentruntimeconfig.pkg.crossplane.io/default 10m + +CONTROLPLANE NAME AGE TYPE DEFAULT-SCOPE +default/test storeconfig.secrets.crossplane.io/default 10m Kubernetes crossplane-system +``` + +To query for multiple resource types, you can add the name or alias for the resource as a comma separated string. + +```shell +up alpha query namespaces,configmaps -A + +CONTROLPLANE NAME AGE +default/test namespace/upbound-system 15m +default/test namespace/crossplane-system 15m +default/test namespace/kube-system 16m +default/test namespace/default 16m + +CONTROLPLANE NAMESPACE NAME AGE +default/test crossplane-system configmap/uxp-versions-config 15m +default/test crossplane-system configmap/universal-crossplane-config 15m +default/test crossplane-system configmap/kube-root-ca.crt 15m +default/test upbound-system configmap/kube-root-ca.crt 15m +default/test kube-system configmap/coredns 16m +default/test default configmap/kube-root-ca.crt 16m + +## Output truncated... + +``` + +The Query API also allows you to return resource types with specific [label columns][label-columns]. + +```shell +up alpha query composite -A --label-columns=crossplane.io/claim-namespace + +CONTROLPLANE NAME SYNCED READY COMPOSITION AGE CLAIM-NAMESPACE +query-api-test/test xeks.argo.discover.upbound.io/test-k7xbk False xeks.argo.discover.upbound.io 51d default + +CONTROLPLANE NAME EXTERNALDNS SYNCED READY COMPOSITION AGE CLAIM-NAMESPACE +spaces-clusters/controlplane-query-api-test-spaces-playground xexternaldns.externaldns.platform.upbound.io/spaces-cluster-0-xd8v2-lhnl7 6.34.2 True True xexternaldns.externaldns.platform.upbound.io 19d default +default/query-api-test xexternaldns.externaldns.platform.upbound.io/space-awg-kine-f7dxq-nkk2q 6.34.2 True True xexternaldns.externaldns.platform.upbound.io 55d default + +## Output truncated... + +``` + +### Query API request format + +The CLI can also return a version of your query request with the [`--debug` flag][debug-flag]. This flag returns the API spec request for your query. + +```shell +up alpha query composite -A -d + +apiVersion: query.spaces.upbound.io/v1alpha1 +kind: SpaceQuery +metadata: + creationTimestamp: null +spec: + cursor: true + filter: + categories: + - composite + controlPlane: {} + limit: 500 + objects: + controlPlane: true + table: {} + page: {} +``` + +For more complex queries, you can interact with the Query API like a Kubernetes-style API by creating a query and applying it with `kubectl`. + +The example below is a query for `claim` resources in every control plane from oldest to newest and returns specific information about those claims. + + +```yaml +apiVersion: query.spaces.upbound.io/v1alpha1 +kind: SpaceQuery +spec: + filter: + categories: + - claim + order: + - creationTimestamp: Asc + cursor: true + count: true + objects: + id: true + controlPlane: true + object: + kind: true + apiVersion: true + metadata: + name: true + uid: true + spec: + containers: + image: true +``` + + +The Query API is served by the Spaces API endpoint. You can use `up ctx` to +switch the kubectl context to the Spaces API ingress. After that, you can use +`kubectl create` and receive the `response` for your query parameters. + + +```shell +kubectl create -f spaces-query.yaml -o yaml +``` + +Your `response` should look similar to this example: + +```yaml {copy-lines="none"} +apiVersion: query.spaces.upbound.io/v1alpha1 +kind: SpaceQuery +metadata: + creationTimestamp: "2024-08-08T14:41:46Z" + name: default +response: + count: 3 + cursor: + next: "" + page: 0 + pageSize: 100 + position: 0 + objects: + - controlPlane: + name: query-api-test + namespace: default + id: default/query-api-test/823b2781-7e70-4d91-a6f0-ee8f455d67dc + object: + apiVersion: spaces.platform.upbound.io/v1alpha1 + kind: Space + metadata: + name: space-awg-kine + resourceVersion: "803868" + uid: 823b2781-7e70-4d91-a6f0-ee8f455d67dc + spec: {} + - controlPlane: + name: test-1 + namespace: test + id: test/test-1/08a573dd-851a-42cc-a600-b6f6ed37ee8d + object: + apiVersion: argo.discover.upbound.io/v1alpha1 + kind: EKS + metadata: + name: test-1 + resourceVersion: "4270320" + uid: 08a573dd-851a-42cc-a600-b6f6ed37ee8d + spec: {} + - controlPlane: + name: controlplane-query-api-test-spaces-playground + namespace: spaces-clusters + id: spaces-clusters/controlplane-query-api-test-spaces-playground/b5a6770f-1f85-4d09-8990-997c84bd4159 + object: + apiVersion: spaces.platform.upbound.io/v1alpha1 + kind: Space + metadata: + name: spaces-cluster-0 + resourceVersion: "1408337" + uid: b5a6770f-1f85-4d09-8990-997c84bd4159 + spec: {} +``` + + +## Query API Explorer + + + +import CrdDocViewer from '@site/src/components/CrdViewer'; + +### Query + +The Query resource allows you to query objects in a single control plane. + + + +### GroupQuery + +The GroupQuery resource allows you to query objects across a group of control planes. + + + +### SpaceQuery + +The SpaceQuery resource allows you to query objects across all control planes in a space. + + + + + + +[documentation]: /spaces/howtos/self-hosted/query-api +[up-ctx]: /reference/cli-reference +[up-alpha-get-command]: /reference/cli-reference +[a-flag]: /reference/cli-reference +[multiple-resource-types]: /reference/cli-reference +[up-alpha-query-command]: /reference/cli-reference +[sort-by-flag]: /reference/cli-reference +[label-columns]: /reference/cli-reference +[debug-flag]: /reference/cli-reference +[kubectl-conventions]: https://kubernetes.io/docs/reference/kubectl/generated/kubectl_get/ diff --git a/spaces_versioned_docs/version-v1.11/howtos/secrets-management.md b/spaces_versioned_docs/version-v1.11/howtos/secrets-management.md new file mode 100644 index 000000000..88e730ae5 --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/secrets-management.md @@ -0,0 +1,719 @@ +--- +title: Secrets Management +sidebar_position: 20 +description: A guide for how to configure synchronizing external secrets into control + planes in a Space. +--- + +Upbound's _Shared Secrets_ is a built in secrets management feature that +provides an integrated way to manage secrets across your platform. It allows you +to store sensitive data like passwords and certificates for your managed control +planes as secrets in an external secret store. + +This feature is a wrapper around the [External Secrets Operator (ESO)][external-secrets-operator-eso] that pulls secrets from external vaults and distributes them across your platform. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). The SharedSecretStore and SharedExternalSecret CRDs have been stable since v1.9. + +For API specifications and configuration details, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +## Benefits + +The Shared Secrets feature allows you to: + +* Access secrets from a variety of external secret stores without operation overhead +* Configure synchronization for multiple control planes in a group +* Store and manage all your secrets centrally +* Use Shared Secrets across all Upbound environments(Cloud and Disconnected Spaces) +* Synchronize secrets across groups of control planes while maintaining clear security boundaries +* Manage secrets at scale programmatically while ensuring proper isolation and access control + +## Understanding the Architecture + +The Shared Secrets feature uses a hierarchical approach to centrally manage +secrets and effectively control their distribution. + +![Shared Secrets workflow diagram](/img/shared-secrets-workflow.png) + +1. The flow begins at the group level, where you define your secret sources and distribution rules +2. These rules automatically create corresponding resources in your control planes +3. In each control plane, specific namespaces receive the secrets +4. Changes at the group level automatically propagate through this chain + +## Component configuration + +Upbound Shared Secrets consists of two components: + +1. **SharedSecretStore**: Defines connections to external secret providers +2. **SharedExternalSecret**: Specifies which secrets to synchronize and where + + +### Connect to an External Vault + + +The `SharedSecretStore` component is the connection point to your external +secret vaults. It provisions ClusterSecretStore resources into control planes +within the group. + + +#### AWS Secrets Manager + + + +In this example, you'll create a `SharedSecretStore` to connect to AWS +Secrets Manager in `us-west-2`. Then apply access to all control planes labeled with +`environment: production`, and make these secrets available in the `default` and +`crossplane-system` namespaces. + + +You can configure access to AWS Secrets Manager using static credentials or +workload identity. + +:::important +While the underlying ESO API supports more auth methods, static credentials are currently the only supported auth method in Cloud Spaces. +::: + +##### Static credentials + +1. Use the AWS CLI to create access credentials. + + +2. Create your access credentials. +```ini +# Create a text file with AWS credentials +cat > aws-credentials.txt << EOF +[default] +aws_access_key_id = +aws_secret_access_key = +EOF +``` + +3. Next,store the access credentials in a secret in the namespace you want to have access to the `SharedSecretStore`. +```shell +kubectl create secret \ + generic aws-credentials \ + -n default \ + --from-file=creds=./aws-credentials.txt +``` + +4. Create a `SharedSecretStore` custom resource file called `secretstore.yaml`. + Paste the following configuration: +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: aws-secrets +spec: + # Define which control planes should receive this configuration + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production + + # Define which namespaces within those control planes can access secrets + namespaceSelector: + names: + - default + - crossplane-system + + # Configure the connection to AWS Secrets Manager + provider: + aws: + service: SecretsManager + region: us-west-2 + auth: + secretRef: + accessKeyIDSecretRef: + name: aws-credentials + key: access-key-id + secretAccessKeySecretRef: + name: aws-credentials + key: secret-access-key +``` + + + +##### Workload Identity with IRSA + + + +You can also use AWS IAM Roles for Service Accounts (IRSA) depending on your +organizations needs: + +1. Ensure you have deployed the Spaces software into an IRSA-enabled EKS cluster. +2. Follow the AWS instructions to create an IAM OIDC provider with your EKS OIDC + provider URL. +3. Determine the Spaces-generated `controlPlaneID` of your control plane: +```shell +kubectl get controlplane -o jsonpath='{.status.controlPlaneID}' +``` + +4. Create an IAM trust policy in your AWS account to match the control plane. +```yaml +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam:::oidc-provider/" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + ":aud": "sts.amazonaws.com", + ":sub": [ +"system:serviceaccount:mxp--system:external-secrets-controller"] + } + } + } + ] +} +``` + +5. Update your Spaces deployment to annotate the SharedSecrets service account + with the role ARN. +```shell +up space upgrade ... \ + --set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."eks\.amazonaws\.com/role-arn"="" +``` + +6. Create a SharedSecretStore and reference the SharedSecrets service account: +```ini {copy-lines="all"} +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: aws-sm + namespace: default +spec: + provider: + aws: + service: SecretsManager + region: + auth: + jwt: + serviceAccountRef: + name: external-secrets-controller + controlPlaneSelector: + names: + - + namespaceSelector: + names: + - default +``` + +When you create a `SharedSecretStore` the underlying mechanism: + +1. Applies at the group level +2. Determines which control planes should receive this configuration by the `controlPlaneSelector` +3. Automatically creates a ClusterSecretStore inside each identified control plane +4. Maintains a connection in each control plane with the ClusterSecretStore + credentials and configuration from the parent SharedSecretStore + +Upbound automatically generates a ClusterSecretStore in each matching control +plane when you create a SharedSecretStore. + +```yaml {copy-lines="none"} +# Automatically created in each matching control plane +apiVersion: external-secrets.io/v1beta1 +kind: ClusterSecretStore +metadata: + name: aws-secrets # Name matches the parent SharedSecretStore +spec: + provider: + upboundspaces: + storeRef: + name: aws-secret +``` + +When you create the SharedSecretStore controller, it replaces the provider with +a special provider called `upboundspaces`. This provider references the +SharedSecretStore object in the Spaces API. This avoids copying the actual cloud +credentials from Spaces to each control plane. + +This workflow allows you to configure the store connection only once at the +group level and automatically propagates to each control plane. Individual control +planes can use the store without exposure to the group-level configuration and +updates all child ClusterSecretStores when updated. + + +#### Azure Key Vault + + +:::important +While the underlying ESO API supports more auth methods, static credentials are currently the only supported auth method in Cloud Spaces. +::: + +##### Static credentials + +1. Use the Azure CLI to create a service principal and authentication file. +2. Create a service principal and save credentials in a file: +```json +{ + "appId": "myAppId", + "displayName": "myServicePrincipalName", + "password": "myServicePrincipalPassword", + "tenant": "myTentantId" +} +``` + +3. Store the credentials as a Kubernetes secret: +```shell +kubectl create secret \ + generic azure-secret-sp \ + -n default \ + --from-file=creds=./azure-credentials.json +``` + +4. Create a SharedSecretStore referencing these credentials: +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: azure-kv +spec: + provider: + azurekv: + tenantId: "" + vaultUrl: "" + authSecretRef: + clientId: + name: azure-secret-sp + key: ClientID + clientSecret: + name: azure-secret-sp + key: ClientSecret + controlPlaneSelector: + names: + - + namespaceSelector: + names: + - default +``` + +##### Workload Identity + + +You can also use Entra Workload Identity Federation to access Azure Key Vault +without needing to manage secrets. + +To use Entra Workload ID with AKS: + + +1. Deploy the Spaces software into a [workload identity-enabled AKS cluster][workload-identity-enabled-aks-cluster]. +2. Retrieve the OIDC issuer URL of the AKS cluster: +```ini +az aks show --name "" \ + --resource-group "" \ + --query "oidcIssuerProfile.issuerUrl" \ + --output tsv +``` + +3. Use the Azure CLI to make a managed identity: +```ini +az identity create \ + --name "" \ + --resource-group "" \ + --location "" \ + --subscription "" +``` + +4. Look up the managed identity's client ID: +```ini +az identity show \ + --resource-group "" \ + --name "" \ + --query 'clientId' \ + --output tsv +``` + +5. Update your Spaces deployment to annotate the SharedSecrets service account with the associated Entra application client ID from the previous step: +```ini +up space upgrade ... \ + --set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."azure\.workload\.identity/client-id"="" \ + --set-string controlPlanes.sharedSecrets.pod.customLabels."azure\.workload\.identity/use"="true" +``` + +6. Determine the Spaces-generated `controlPlaneID` of your control plane. When you deploy a `kind: controlplane` in a Space, the Spaces software deploys a set of pods in a new namespace following the format `mxp--system`. +```ini +kubectl get controlplane -o jsonpath='{.status.controlPlaneID}' +``` + +7. Create a federated identity credential. +```ini +FEDERATED_IDENTITY_CREDENTIAL_NAME= +USER_ASSIGNED_IDENTITY_NAME= +RESOURCE_GROUP= +AKS_OIDC_ISSUER= +CONTROLPLANE_ID= +az identity federated-credential create --name ${FEDERATED_IDENTITY_CREDENTIAL_NAME} --identity-name "${USER_ASSIGNED_IDENTITY_NAME}" --resource-group "${RESOURCE_GROUP}" --issuer "${AKS_OIDC_ISSUER}" --subject system:serviceaccount:"mxp-${CONTROLPLANE_ID}-system:external-secrets-controller" --audience api://AzureADTokenExchange +``` + +8. Assign the `Key Vault Secrets User` role to the user-assigned managed identity that you created earlier. This step gives the managed identity permission to read secrets from the key vault: +```ini +az role assignment create \ + --assignee-object-id "${IDENTITY_PRINCIPAL_ID}" \ + --role "Key Vault Secrets User" \ + --scope "${KEYVAULT_RESOURCE_ID}" \ + --assignee-principal-type ServicePrincipal +``` + +:::important +You must manually restart a workload's pod when you add the annotation to the running pod's service account. The Entra workload identity mutating admission webhook requires a restart to inject the necessary environment. +::: + +8. Create a `SharedSecretStore`. Replace `vaultURL` with the URL of your Azure Key Vault instance. Replace `identityId` with the client ID of the managed identity created earlier: +```yaml {copy-lines="all"} +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: azure-kv +spec: + provider: + azurekv: + authType: WorkloadIdentity + vaultUrl: "" + controlPlaneSelector: + names: + - + namespaceSelector: + names: + - default +``` + + + + +#### Google Cloud Secret Manager + + + +You can configure access to Google Cloud Secret Manager using static credentials or workload identity. Below are instructions for configuring either. See the [ESO provider API][eso-provider-api] for more information. + +:::important +While the underlying ESO API supports more auth methods, static credentials are currently the only supported auth method in Cloud Spaces. +::: + +##### Static credentials + +1. Use the [GCP CLI][gcp-cli] to create access credentials. +2. Save the output in a file called `gcp-credentials.json`. +3. Store the access credentials in a secret in the same namespace as the `SharedSecretStore`. + ```shell {label="kube-create-secret",copy-lines="all"} + kubectl create secret \ + generic gcpsm-secret \ + -n default \ + --from-file=creds=./gcp-credentials.json + ``` + +4. Create a `SharedSecretStore`, referencing the secret created earlier. Replace `projectID` with your GCP Project ID: +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: gcp-sm +spec: + provider: + gcpsm: + auth: + secretRef: + secretAccessKeySecretRef: + name: gcpsm-secret + key: creds + projectID: + controlPlaneSelector: + names: + - + namespaceSelector: + names: + - default +``` + +:::tip +The example above maps a Shared Secret Store into a single namespace of a single control plane. Read [control plane selection][control-plane-selection] and [namespace selection][namespace-selection] to learn how to map into one or more namespaces of one or more control planes. +::: + + +##### Workload identity with Service Accounts to IAM Roles + + +To configure, grant the `roles/iam.workloadIdentityUser` role to the Kubernetes +service account in the control plane namespace to impersonate the IAM service +account. + +1. Ensure you've deployed Spaces on a [Workload Identity Federation-enabled][workload-identity-federation-enabled] GKE cluster. +2. Determine the Spaces-generated `controlPlaneID` of your control plane. When you deploy a `kind: controlplane` in a Space, the Spaces software deploys a set of pods in a new namespace following the format `mxp--system`. +```ini +kubectl get controlplane -o jsonpath='{.status.controlPlaneID}' +``` + +3. Create a GCP IAM service account with the [GCP CLI][gcp-cli-1]: +```ini +gcloud iam service-accounts create \ + --project= +``` + +4. Grant the IAM service account the role to access GCP Secret Manager: +```ini +SA_NAME= +IAM_SA_PROJECT_ID= +gcloud projects add-iam-policy-binding IAM_SA_PROJECT_ID \ + --member "serviceAccount:SA_NAME@IAM_SA_PROJECT_ID.iam.gserviceaccount.com" \ + --role roles/secretmanager.secretAccessor +``` + +5. When you enable the Shared Secrets feature, a service account gets created in each control plane for the External Secrets Operator. Apply a [GCP IAM policy binding][gcp-iam-policy-binding] to associate this service account with the desired GCP IAM role. +```ini +PROJECT_ID= +PROJECT_NUMBER= +CONTROLPLANE_ID= +gcloud projects add-iam-policy-binding projects/${PROJECT_ID} \ + --role "roles/iam.workloadIdentityUser" \ + --member=principal://iam.googleapis.com/projects/${PROJECT_NUMBER}/locations/global/workloadIdentityPools/${PROJECT_ID}.svc.id.goog/subject/ns/mxp-${CONTROLPLANE_ID}-system/sa/external-secrets-controller +``` + +6. Update your Spaces deployment to annotate the SharedSecrets service account with GCP IAM service account's identifier: +```ini +up space upgrade ... \ + --set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."iam\.gke\.io/gcp-service-account"="" +``` + +7. Create a `SharedSecretStore`. Replace `projectID` with your GCP Project ID: +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: gcp-sm +spec: + provider: + gcpsm: + projectID: + controlPlaneSelector: + names: + - + namespaceSelector: + names: + - default +``` + +:::tip +The example above maps a Shared Secret Store into a single namespace of a single control plane. Read [control plane selection][control-plane-selection-1] and [namespace selection][namespace-selection-2] to learn how to map into one or more namespaces of one or more control planes. +::: + +### Manage your secret distribution + +After you create your SharedSecretStore, you can define which secrets to +distribute using SharedExternalSecret: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedExternalSecret +metadata: + name: database-credentials + namespace: default +spec: + # Select the same control planes as your SharedSecretStore + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production + + externalSecretSpec: + refreshInterval: 1h + secretStoreRef: + name: aws-secrets # References the SharedSecretStore name + kind: ClusterSecretStore + target: + name: db-credentials + data: + - secretKey: username + remoteRef: + key: prod/database/credentials + property: username + - secretKey: password + remoteRef: + key: prod/database/credentials + property: password +``` + +This configuration: + +* Pulls database credentials from your external secret provider +* Creates secrets in all production control planes +* Refreshes the secrets every hour +* Creates a secret called `db-credentials` in each control plane + +When you create a SharedExternalSecret at the group level, Upbound's system +creates a template for the corresponding ClusterExternalSecrets in each selected +control plane. + +The example below simulates the ClusterExternalSecret that Upbound creates: + +```yaml +# Inside each matching control plane: +apiVersion: external-secrets.io/v1beta1 +kind: ClusterExternalSecret +metadata: + name: database-credentials +spec: + refreshInterval: 1h + secretStoreRef: + name: aws-secrets + kind: ClusterSecretStore + data: + - secretKey: username + remoteRef: + key: prod/database/credentials + property: username +``` + +The hierarchy in this configuration is: + +1. SharedExternalSecret (group level) defines what secrets to distribute +2. ClusterExternalSecret (control plane level) manages the distribution within + each control plane + +3. Kubernetes Secrets (namespace level) are created in specified namespaces + + +#### Control plane selection + +To configure which control planes in a group you want to project a SecretStore into, use the `spec.controlPlaneSelector` field. You can either use `labelSelectors` or the `names` of a control plane directly. A control plane matches if any of the label selectors match. + +This example matches all control planes in the group that have `environment: production` as a label: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +You can use the more complex `matchExpressions` to match labels based on an expression. This example matches control planes that have label `environment: production` or `environment: staging`: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + controlPlaneSelector: + labelSelectors: + - matchExpressions: + - { key: environment, operator: In, values: [production,staging] } +``` + +You can also specify the names of control planes directly: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + controlPlaneSelector: + names: + - controlplane-dev + - controlplane-staging + - controlplane-prod +``` + + +#### Namespace selection + +To configure which namespaces **within each matched control plane** to project the secret store into, use `spec.namespaceSelector` field. The projected secret store only appears in the namespaces matching the provided selector. You can either use `labelSelectors` or the `names` of namespaces directly. A control plane matches if any of the label selectors match. + +**For all control planes matched by** `spec.controlPlaneSelector`, This example matches all namespaces in each selected control plane that have `team: team1` as a label: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + namespaceSelector: + labelSelectors: + - matchLabels: + team: team1 +``` + +You can use the more complex `matchExpressions` to match labels based on an expression. This example matches namespaces that have label `team: team1` or `team: team2`: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + namespaceSelector: + labelSelectors: + - matchExpressions: + - { key: team, operator: In, values: [team1,team2] } +``` + +You can also specify the names of namespaces directly: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + namespaceSelector: + names: + - team1-namespace + - team2-namespace +``` + +## Configure secrets directly in a control plane + + +The above explains using group-scoped resources to project secrets into multiple control planes. You can also use ESO API types directly in a control plane as you would in standalone Crossplane or Kubernetes. + + +See the [ESO documentation][eso-documentation] for a full guide on using the API types. + +## Best practices + +When you configure secrets management in your Upbound environment, keep the +following best practices in mind: + +**Use consistent labeling schemes** across your control planes for predictable +and manageable secret distribution. + +**Organize your secrets** in your external provider using a hierarchical +structure that mirrors your control plane organization. + +**Set appropriate refresh intervals** based on your security requires and the +nature of the secrets. + +**Use namespace selection sparingly** to limit secret distribution to only the +namespaces that need them. + +**Use separate tokens for each environment.** Keep them in distinct +SharedSecretStores. Users could bypass SharedExternalSecret selectors by +creating ClusterExternalSecrets directly in control planes. This grants access to all +secrets available to that token. + +**Document your secret management architecture**, including which control planes +should receive which secrets. + +[control-plane-selection]: #control-plane-selection +[namespace-selection]: #namespace-selection +[control-plane-selection-1]: #control-plane-selection +[namespace-selection-2]: #namespace-selection + +[external-secrets-operator-eso]: https://external-secrets.io +[workload-identity-enabled-aks-cluster]: https://learn.microsoft.com/en-us/azure/aks/workload-identity-deploy-cluster +[eso-provider-api]: https://external-secrets.io/latest/provider/google-secrets-manager/ +[gcp-cli]: https://cloud.google.com/iam/docs/creating-managing-service-account-keys +[workload-identity-federation-enabled]: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#enable_on_clusters_and_node_pools +[gcp-cli-1]: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#kubernetes-sa-to-iam +[gcp-iam-policy-binding]: https://cloud.google.com/sdk/gcloud/reference/iam/service-accounts/add-iam-policy-binding +[eso-documentation]: https://external-secrets.io/latest/introduction/getting-started/ diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/_category_.json b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/_category_.json new file mode 100644 index 000000000..5bf23bb0a --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/_category_.json @@ -0,0 +1,11 @@ +{ + "label": "Self-Hosted Spaces", + "position": 2, + "collapsed": true, + "customProps": { + "plan": "business" + } + +} + + diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/administer-features.md b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/administer-features.md new file mode 100644 index 000000000..ce878014e --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/administer-features.md @@ -0,0 +1,121 @@ +--- +title: Administer features +sidebar_position: 12 +description: Enable and disable features in Spaces +--- + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Feature flags and their availability may vary by version. + +For detailed feature availability across versions, see the . +::: + +This guide shows how to enable or disable features in your self-hosted Space. + +## Shared secrets + +**Status:** Preview + +This feature is enabled by default in Cloud Spaces. + +To enable this feature in a self-hosted Space, set +`features.alpha.sharedSecrets.enabled=true` when installing the Space: + +```bash +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "features.alpha.sharedSecrets.enabled=true" \ +``` + + +## Observability + +**Status:** GA +**Available from:** Spaces v1.13+ + +This feature is enabled by default in Cloud Spaces. + + + +To enable this feature in a self-hosted Space, set +`observability.enabled=true` (`features.alpha.observability.enabled=true` before `v1.14.0`) when installing the Space: + +```bash +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "observability.enabled=true" \ +``` + +The observability feature collects telemetry data from user-facing control +plane workloads like: + +* Crossplane +* Providers +* Functions + +Self-hosted Spaces users can add control plane system workloads such as the +`api-server`, `etcd` by setting the +`observability.collectors.includeSystemTelemetry` Helm flag to true. + +### Sensitive data + +To avoid exposing sensitive data in the `SharedTelemetryConfig` resource, use +Kubernetes secrets to store the sensitive data and reference the secret in the +`SharedTelemetryConfig` resource. + +Create the secret in the same namespace/group as the `SharedTelemetryConfig` +resource. The example below uses `kubectl create secret` to create a new secret: + +```bash +kubectl create secret generic sensitive -n \ + --from-literal=apiKey='YOUR_API_KEY' +``` + +Next, reference the secret in the `SharedTelemetryConfig` resource: + +```yaml +apiVersion: observability.spaces.upbound.io/v1alpha1 +kind: SharedTelemetryConfig +metadata: + name: newrelic +spec: + configPatchSecretRefs: + - name: sensitive + key: apiKey + path: exporters.otlphttp.headers.api-key + controlPlaneSelector: + labelSelectors: + - matchLabels: + org: foo + exporters: + otlphttp: + endpoint: https://otlp.nr-data.net + headers: + api-key: dummy # This value is replaced by the secret value, can be omitted + exportPipeline: + metrics: [otlphttp] + traces: [otlphttp] + logs: [otlphttp] +``` + +The `configPatchSecretRefs` field in the `spec` specifies the secret `name`, +`key`, and `path` values to inject the secret value in the +`SharedTelemetryConfig` resource. + +## Shared backups + +As of Spaces `v.12.0`, this feature is enabled by default. + +To disable in a self-hosted Space, pass the `features.alpha.sharedBackup.enabled=false` as a Helm chart value. +`--set "features.alpha.sharedBackup.enabled=false"` + +## Query API + +**Status:** Preview +The Query API is available in the Cloud Space offering and enabled by default. + +Query API is required for self-hosted deployments with connected Spaces. See the +related [documentation][documentation] +to enable this feature. + +[documentation]: /spaces/howtos/query-api/ diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/attach-detach.md b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/attach-detach.md new file mode 100644 index 000000000..1465921cf --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/attach-detach.md @@ -0,0 +1,198 @@ +--- +title: Connect or disconnect a Space +sidebar_position: 12 +description: Enable and connect self-hosted Spaces to the Upbound console +--- +:::info API Version Information +This guide applies to **Spaces v1.8.0 and later** (Self-Hosted only). Connection to Upbound console requires Query API and RBAC to be enabled. + +For version-specific features and requirements, see the . Query API setup details, see [Deploy Query API infrastructure](./query-api.md). +::: + +:::important +This feature is in preview. Starting in Spaces `v1.8.0` and later, you must +deploy and [enable the Query API][enable-the-query-api] and [enable Upbound +RBAC][enable-upbound-rbac] to connect a Space to Upbound. +::: + +[Upbound][upbound] allows you to connect self-hosted Spaces and enables a streamlined operations and debugging experience in your Console. + +## Usage + +### Connect + +Before you begin, make sure you have: + +- An existing Upbound [organization][organization] in Upbound SaaS. +- The `up` CLI installed and logged into your organization +- `kubectl` installed with the kubecontext of your self-hosted Space cluster. +- A `token.json` license, provided by your Upbound account representative. +- You enabled the [Query API][query-api] in the self-hosted Space. + +Create a new `UPBOUND_SPACE_NAME`. If you don't create a name, `up` automatically generates one for you: + +```ini +export UPBOUND_SPACE_NAME=your-self-hosted-space +``` + +#### With up CLI + +:::tip +The command tries to connect the Space to the org account context pointed at by your `up` CLI profile. Make sure you've logged into Upbound SaaS with `up login -a ` before trying to connect the Space. +::: + +Connect the Space to the Console: + +```bash +up space connect "${UPBOUND_SPACE_NAME}" +``` + +This command installs a Connect agent, creates a service account, and configures permissions in your Upbound cloud organization in the `upbound-system` namespace of your Space. + +#### With Helm + +Export your Upbound org account name to an environment variable called `UPBOUND_ORG_NAME`. You can see this value by running `up org list` after logging on to Upbound. + +```ini +export UPBOUND_ORG_NAME=your-org-name +``` + +Create a new robot token and export it to an environment variable called `UPBOUND_TOKEN`: + +```bash +up robot create "${UPBOUND_SPACE_NAME}" --description="Robot used for authenticating Space '${UPBOUND_SPACE_NAME}' with Upbound Connect" +export UPBOUND_TOKEN=$(up robot token create "$UPBOUND_SPACE_NAME" "$UPBOUND_SPACE_NAME" --file - | jq -r '.token') +``` + +:::note +Follow the [`jq` installation guide][jq-install] if your machine doesn't include +it by default. +::: + +Create a secret containing the robot token: + +```bash +kubectl create secret -n upbound-system generic connect-token --from-literal=token=${UPBOUND_TOKEN} +``` + +Specify your username and password for the helm OCI registry: + +```bash +jq -r .token $SPACES_TOKEN_PATH | helm registry login xpkg.upbound.io -u $(jq -r .accessId $SPACES_TOKEN_PATH) --password-stdin +``` + +In the same cluster where you installed the Spaces software, install the Upbound connect agent with your token secret. + +```bash +helm -n upbound-system upgrade --install agent \ + oci://xpkg.upbound.io/spaces-artifacts/agent \ + --version "0.0.0-441.g68777b9" \ + --set "image.repository=xpkg.upbound.io/spaces-artifacts/agent" \ + --set "registration.image.repository=xpkg.upbound.io/spaces-artifacts/register-init" \ + --set "imagePullSecrets[0].name=upbound-pull-secret" \ + --set "registration.enabled=true" \ + --set "space=${UPBOUND_SPACE_NAME}" \ + --set "organization=${UPBOUND_ORG_NAME}" \ + --set "tokenSecret=connect-token" \ + --wait +``` + + +#### View your Space in the Console + + +Go to the [Upbound Console][upbound-console], log in, and choose the newly connected Space from the Space selector dropdown. + +![A screenshot of the Upbound Console space selector dropdown](/img/attached-space.png) + +:::note +You can only connect a self-hosted Space to a single organization at a time. +::: + +### Disconnect + +#### With up CLI + +To disconnect a self-hosted Space or a deleted self-hosted Space, run the following command: + +```bash +up space disconnect "${UPBOUND_SPACE_NAME}" +``` + +If the Space still exists, this command uninstalls the Connect agent and deletes the associated service account and permissions. + +#### With Helm + +To disconnect a self-hosted Space or a deleted self-hosted Space, run the following command: + +```bash +helm delete -n upbound-system agent +``` + +Clean up the robot token you created for this self-hosted Space: + +```bash +up robot delete "${UPBOUND_SPACE_NAME}" --force +``` + +## Security model + +### Architecture + +![An architectural diagram of a self-hosted Space attached to Upbound](/img/console-attach-architecture.jpg) + +:::note +This diagram illustrates a self-hosted Space running in AWS connected to the global Upbound Console. The same model applies to a Space running in AKS, GKE, or other Kubernetes environments. +::: + +### Data path + +Upbound uses a Pub/Sub model over TLS to communicate between Upbound's global +console and your self-hosted Space. Self-hosted Spaces establishes a secure +connection with `connect.upbound.io`. `api.upbound.io`, and `auth.upbound.io` and subscribes to an +endpoint. + +:::important +Add `connect.upbound.io`, `api.upbound.io`, and `auth.upbound.io` to your organization's list of +allowed endpoints. +::: + +The +Upbound Console communicates to the Space through that endpoint. The data flow +is: + +1. Users sign in to the Upbound Console, redirecting to authenticate with an organization's configured Identity Provider via SSO. +2. Once authenticated, actions in the Console, like listing control planes or specific resource types from a control plane. These requests post as messages to the Upbound Connect service. +3. A user's self-hosted Space polls the Upbound Connect service periodically for new messages, verifies the authenticity of the message, and fulfills the request contained. +4. A user's self-hosted Space returns the results of the request to the Upbound Connect service and the Console renders the results in the user's browser session. + +**Upbound never stores data originated from a self-hosted Space.** The data is transient and only exposed in the user's browser session. The Console needs this data to render your resources and control planes in the UI. + +### Data transmitted + +Users interact with the Upbound Console to generate request queries to the Upbound Connect Service while exploring, managing, or debugging a self-hosted Space. These requests send data back to the user's browser session in the Console, including: + +* Metadata for the Space +* Metadata for control planes in the state +* Configuration manifests for various resource types within your Space: Crossplane managed resources, composite resources, composite resource claims, Upbound shared secrets, Upbound shared backups, Crossplane providers, ProviderConfigs, Configurations, and Crossplane Composite Functions. + +:::important +This data only concerns resource configuration. The data _inside_ the managed +resource in your Space isn't visible at any point. +::: + +**Upbound can't see your data.** Upbound doesn't have access to session-based data rendered for your users in the Upbound Console. Upbound has no information about your self-hosted Space, other than that you've connected a self-hosted Space. + +### Threat vectors + +Only users with editor or administrative permissions can make changes using the Console like creating or deleting control planes or groups. + + +[enable-the-query-api]: /spaces/howtos/self-hosted/query-api +[enable-upbound-rbac]: /manuals/platform/concepts/authorization/upbound-rbac +[upbound]: /manuals/console/upbound-console +[organization]: /manuals/platform/concepts/identity-management/organizations +[query-api]: /spaces/howtos/self-hosted/query-api +[jq-install]: https://jqlang.org/download/ + +[upbound-console]: https://console.upbound.io diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/billing.md b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/billing.md new file mode 100644 index 000000000..145ff9f03 --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/billing.md @@ -0,0 +1,307 @@ +--- +title: Self-Hosted Space Billing +sidebar_position: 50 +description: A guide for how billing works in an Upbound Space +--- + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Billing models may evolve between versions; see Capacity Licensing for alternative models. + +For version-specific features and capacity-based licensing details, see the . reference specifications, see [Capacity Licensing](/spaces/howtos/self-hosted/capacity-licensing). +::: + +Spaces are a self-hosting feature of Upbound's [flagship product][flagship-product] for platform teams to deploy control planes in their self-managed environments. You can install Spaces into any Kubernetes cluster in your own cloud account, on-premises data center, or on the edge. The pricing usage-based and requires an Upbound account and subscription. The billing unit is a `Loop`. + + +:::info +This guide describes the traditional usage-based billing model using object storage. disconnected or air-gapped environments, consider [Capacity Licensing](/spaces/howtos/self-hosted/capacity-licensing), which provides a simpler fixed-capacity model with local usage tracking. +::: + +## Billing details + +Spaces **aren't connected** to Upbound's global service. To enable proper billing, the Spaces software ships a controller whose responsibility is to collect billing data from your Spaces deployment. The collection and storage of your billing data happens expressly locally within your environment; no data is automatically emitted back to Upbound's global service. This data gets written to object storage of your choice. AWS, Azure, and GCP are currently supported. The Spaces software exports billing usage software every ~15 seconds. + +Spaces customers must periodically provide the billing data to Upbound. Contact your Upbound sales representative to learn more. + + + +## AWS S3 + + + +Configure billing to write to an S3 bucket by providing the following values at install-time. Create an S3 bucket if you don't already have one. + +### IAM policy + +You must create an IAM policy and attach it to the IAM user (for static credentials) or IAM role (for assumed +roles). + +The policy example below enables the necessary S3 permissions: + +```json +{ + "Sid":"EnableS3Permissions", + "Effect":"Allow", + "Action": [ + "s3:PutObject", + "s3:GetObject", + "s3:ListBucket", + "s3:DeleteObject" + ], + "Resource": [ + "arn:aws:s3:::your-bucket-name/*", + "arn:aws:s3:::your-bucket-name" + ] +}, +{ + "Sid": "ListBuckets", + "Effect": "Allow", + "Action": "s3:ListAllMyBuckets", + "Resource": "*" +} +``` + +### Authentication with static credentials + +In your Spaces install cluster, create a secret in the `upbound-system` +namespace. This secret must contain keys `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`. + +```bash +kubectl create secret generic billing-credentials -n upbound-system \ + --from-literal=AWS_ACCESS_KEY_ID= \ + --from-literal=AWS_SECRET_ACCESS_KEY= +``` + +Install the Space software, providing the billing details to the other required values. + + + + + + +```bash {hl_lines="2-6"} +helm -n upbound-system upgrade --install spaces ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=aws" \ + --set "billing.storage.aws.region=" \ + --set "billing.storage.aws.bucket=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + +```bash {hl_lines="2-6"} +up space init ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=aws" \ + --set "billing.storage.aws.region=" \ + --set "billing.storage.aws.bucket=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + + + +### Authentication with an IAM role + + +To use short-lived credentials with an assumed IAM role, create an IAM role with +established trust to the `vector`-serviceaccount in all `mxp-*-system` +namespaces. + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::12345678912:oidc-provider/oidc.eks.eu-west-2.amazonaws.com/id/YOUROIDCPROVIDERID" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringLike": { + "oidc.eks.eu-west-2.amazonaws.com/id/YOUROIDCPROVIDERID:sub": "system:serviceaccount:mxp-*-system:vector" + } + } + } + ] +} +``` + +For more information about workload identities, review the [Workload-identity +Configuration documentation][workload-identity-configuration-documentation] + + + + + + +```bash {hl_lines="2-7"} +helm -n upbound-system upgrade --install spaces ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=aws" \ + --set "billing.storage.aws.region=" \ + --set "billing.storage.aws.bucket=" \ + --set "billing.storage.secretRef.name=" \ + --set "controlPlanes.vector.serviceAccount.customAnnotations[eks.amazonaws.com/role-arn]=" + ... +``` + + + + + +```bash {hl_lines="2-7"} +up space init ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=aws" \ + --set "billing.storage.aws.region=" \ + --set "billing.storage.aws.bucket=" \ + --set "billing.storage.secretRef.name=" \ + --set "controlPlanes.vector.serviceAccount.customAnnotations[eks.amazonaws.com/role-arn]=" + ... +``` + + + + + + +*Note*: You must set `billing.storage.secretRef.name` to an empty string when using an assumed role. + + +## Azure blob storage + +Configure billing to write to a blob in Azure by providing the following values at install-time. Create a storage account and container if you don't already have one. + +Then, on the cluster where you installed the Spaces software, create a secret in `upbound-system`. This secret must contain keys `AZURE_TENANT_ID`, `AZURE_CLIENT_ID`, and `AZURE_CLIENT_SECRET`. Make sure to replace the values with details generated from your Azure account. + +```bash +kubectl create secret generic billing-credentials -n upbound-system \ + --from-literal=AZURE_TENANT_ID= \ + --from-literal=AZURE_CLIENT_ID= \ + --from-literal=AZURE_CLIENT_SECRET= +``` + +Install the Space software, providing the billing details to the other required values. + + + + + + +```bash {hl_lines="2-6"} +helm -n upbound-system upgrade --install spaces ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=azure" \ + --set "billing.storage.azure.storageAccount=" \ + --set "billing.storage.azure.container=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + +```bash {hl_lines="2-6"} +up space init ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=azure" \ + --set "billing.storage.azure.storageAccount=" \ + --set "billing.storage.azure.container=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + + + +## GCP Cloud Storage Buckets + + +Configure billing to write to a Cloud Storage bucket in GCP by providing the following values at install-time. Create a bucket if you don't already have one. + +Then, on the cluster where you installed the Spaces software, create a secret in `upbound-system`. This secret must contain the key `google_application_credentials`. Make sure to replace the value with a GCP service account key JSON generated from your GCP account. + +```bash +kubectl create secret generic billing-credentials -n upbound-system \ + --from-literal=google_application_credentials= +``` + +Install the Space software, providing the billing details to the other required values. + + + + + + +```bash {hl_lines="2-5"} +helm -n upbound-system upgrade --install spaces ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=gcp" \ + --set "billing.storage.gcp.bucket=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + +```bash {hl_lines="2-5"} +up space init ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=gcp" \ + --set "billing.storage.gcp.bucket=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + +## Export billing data to send to Upbound + +To prepare the billing data to send to Upbound, do the following: + +Ensure the current context of your kubeconfig points at the Spaces cluster. Then, run the [export][export] command. + + +:::important +Your current CLI must have read access to the bucket to run this command. +::: + + +The example below exports billing data stored in AWS: + +```bash +up space billing export --provider=aws \ + --bucket=spaces-billing-bucket \ + --account=your-upbound-org \ + --billing-month=2024-07 \ + --force-incomplete +``` + +The command creates a billing report that's zipped up in your current working directory. Send the output to your Upbound sales representative. + + +You can find full instructions and command options in the up [CLI reference][cli-reference] docs. + + +[export]: /reference/cli-reference +[cli-reference]: /reference/cli-reference +[flagship-product]: https://www.upbound.io/platform +[workload-identity-configuration-documentation]: https://docs.upbound.io/operate/accounts/authentication/oidc-configuration diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/capacity-licensing.md b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/capacity-licensing.md new file mode 100644 index 000000000..a1dc6c101 --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/capacity-licensing.md @@ -0,0 +1,591 @@ +--- +title: Capacity Licensing +sidebar_position: 60 +description: A guide for capacity-based licensing in self-hosted Spaces +plan: "enterprise" +--- + + + + + +This guide explains how to configure and monitor capacity-based licensing in +self-hosted Upbound Spaces. Capacity licensing provides a simplified billing +model for disconnected or air-gapped environments where automated usage +reporting isn't possible. + +:::info +Spaces `v1.15` and later support Capacity Licensing as an +alternative to the traditional usage-based billing model described in the +[Self-Hosted Space Billing][space-billing] guide. +::: + +## Overview + +Capacity licensing allows organizations to purchase a fixed capacity of +resources upfront. The Spaces software tracks usage locally and provides +visibility into consumption against your purchased capacity, all without +requiring external connectivity to Upbound's services. + +### Key concepts + +- **Resource Hours**: The primary billing unit representing all resources + managed by Crossplane over time. This includes managed resources, + composites (XRs), claims (XRCs), and all composed resources - essentially + everything Crossplane manages. The system aggregates resource counts over each + hour using trapezoidal integration to accurately account for changes in + resource count throughout the hour. +- **Operations**: The number of Operations invoked by Crossplane. +- **License Capacity**: The total amount of resource hours and operations included in your license. +- **Usage Tracking**: Continuous monitoring of consumption with real-time utilization percentages. + +### How it works + +1. Upbound provides you with a license file containing your purchased capacity +2. You configure a `SpaceLicense` in your Spaces cluster +3. The metering system automatically: + - Collects measurements from all control planes every minute + - Aggregates usage data into hourly intervals + - Stores usage data in a local PostgreSQL database + - Updates the `SpaceLicense` status with current consumption + +## Prerequisites + +### PostgreSQL database + +Capacity licensing requires a PostgreSQL database to store usage measurements. You can use: + +- An existing PostgreSQL instance +- A managed PostgreSQL service (AWS RDS, Azure Database, Google Cloud SQL) +- A PostgreSQL instance deployed in your cluster + +The database must be: + +- Accessible from the Spaces cluster +- Configured with a dedicated database and credentials + +#### Example: Deploy PostgreSQL with CloudNativePG + +If you don't have an existing PostgreSQL instance, you can deploy one in your +cluster using [CloudNativePG] (CNPG). CNPG is a Kubernetes operator that +manages PostgreSQL clusters. + +1. Install the CloudNativePG operator: + +```bash +kubectl apply -f https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.24/releases/cnpg-1.24.1.yaml +``` + +2. Create a PostgreSQL cluster for metering: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: metering-postgres + namespace: upbound-system +spec: + instances: 1 + imageName: ghcr.io/cloudnative-pg/postgresql:16 + bootstrap: + initdb: + database: metering + owner: metering + postInitApplicationSQL: + - ALTER ROLE "metering" CREATEROLE; + storage: + size: 5Gi + # Optional: Configure resources for production use + # resources: + # requests: + # memory: "512Mi" + # cpu: "500m" + # limits: + # memory: "1Gi" + # cpu: "1000m" +--- +apiVersion: v1 +kind: Secret +metadata: + name: metering-postgres-app + namespace: upbound-system + labels: + cnpg.io/reload: "true" +stringData: + username: metering + password: "your-secure-password-here" +type: kubernetes.io/basic-auth +``` + +```bash +kubectl apply -f metering-postgres.yaml +``` + +3. Wait for the cluster to be ready: + +```bash +kubectl wait --for=condition=ready cluster/metering-postgres -n upbound-system --timeout=5m +``` + +4. You can access the PostgreSQL cluster at `metering-postgres-rw.upbound-system.svc.cluster.local:5432`. + +:::tip +For production deployments, consider: +- Increasing `instances` to 3 for high availability +- Configuring [backups] to object storage +- Setting appropriate resource requests and limits +- Using a dedicated storage class with good I/O performance +::: + +### License file + +Contact your Upbound sales representative to obtain a license file for your organization. The license file contains: +- Your unique license ID +- Purchased capacity (resource hours and operations) +- License validity period +- Any usage restrictions (such as cluster UUID pinning) + +## Configuration + +### Step 1: Create database credentials secret + +Create a Kubernetes secret containing your PostgreSQL password using the pgpass format: + +```bash +# Create a pgpass file with format: hostname:port:database:username:password +# Note: The database name and username must be 'metering' +# For CNPG clusters, use the read-write service endpoint: -rw..svc.cluster.local +echo "metering-postgres-rw.upbound-system.svc.cluster.local:5432:metering:metering:your-secure-password-here" > pgpass + +# Create the secret +kubectl create secret generic metering-postgres-credentials \ + -n upbound-system \ + --from-file=pgpass=pgpass + +# Clean up the pgpass file +rm pgpass +``` + +The secret must contain a single key: +- **`pgpass`**: PostgreSQL password file in the format `hostname:port:metering:metering:password` + +:::note +The database name and username are fixed as `metering`. Ensure your PostgreSQL instance has a database named `metering` with a user `metering` that has appropriate permissions. + +If you deployed PostgreSQL using CNPG as shown in the example above, the password should match what you set in the `metering-postgres-app` secret. +::: + +:::tip +For production environments, consider using external secret management solutions: +- [External Secrets Operator][eso] +- Cloud-specific secret managers (AWS Secrets Manager, Azure Key Vault, GCP Secret Manager) +::: + +### Step 2: Enable metering in Spaces + +Enable the metering feature when installing or upgrading Spaces: + + + + + +```bash {hl_lines="2-7"} +helm -n upbound-system upgrade --install spaces ... \ + --set "metering.enabled=true" \ + --set "metering.storage.postgres.connection.url=metering-postgres-rw.upbound-system.svc.cluster.local:5432" \ + --set "metering.storage.postgres.connection.credentials.secret.name=metering-postgres-credentials" \ + --set "metering.interval=1m" \ + --set "metering.workerCount=10" \ + --set "metering.aggregationInterval=1h" \ + --set "metering.measurementRetentionDays=30" + ... +``` + + + + + +```bash {hl_lines="2-7"} +up space init ... \ + --set "metering.enabled=true" \ + --set "metering.storage.postgres.connection.url=metering-postgres-rw.upbound-system.svc.cluster.local:5432" \ + --set "metering.storage.postgres.connection.credentials.secret.name=metering-postgres-credentials" \ + --set "metering.interval=1m" \ + --set "metering.workerCount=10" \ + --set "metering.aggregationInterval=1h" \ + --set "metering.measurementRetentionDays=30" + ... +``` + + + + + +#### Configuration options + +| Option | Default | Description | +|--------|---------|-------------| +| `metering.enabled` | `false` | Enable the metering feature | +| `metering.storage.postgres.connection.url` | - | PostgreSQL host and port (format: `host:port`, required) | +| `metering.storage.postgres.connection.credentials.secret.name` | - | Name of the secret containing PostgreSQL credentials (required) | +| `metering.storage.postgres.connection.sslmode` | `require` | SSL mode for PostgreSQL connection (`disable`, `allow`, `prefer`, `require`, `verify-ca`, `verify-full`) | +| `metering.storage.postgres.connection.ca.name` | - | Name of the secret containing CA certificate for TLS connections (optional) | +| `metering.interval` | `1m` | How often to collect measurements from control planes | +| `metering.workerCount` | `10` | Number of parallel workers for measurement collection | +| `metering.aggregationInterval` | `1h` | How often to aggregate measurements into hourly usage data | +| `metering.measurementRetentionDays` | `30` | Days to retain raw measurements (0 = indefinite) | + + +#### Database sizing and retention + +The metering system uses two PostgreSQL tables to track usage: + +**Raw measurements table** (`measurements`): +- Stores point-in-time snapshots collected every measurement interval (default: 1 minute) +- One row per control plane per interval +- Affected by the `measurementRetentionDays` setting +- Used for detailed auditing and troubleshooting + +**Aggregated usage table** (`hourly_usage`): +- Stores hourly aggregated resource hours and operations per license +- One row per hour per license +- Never deleted (required for accurate license tracking) +- Grows much slower than raw measurements + +##### Storage sizing guidelines + +Estimate your PostgreSQL storage needs based on these factors: + + +| Deployment Size | Control Planes | Measurement Interval | Retention Days | Raw Measurements | Indexes & Overhead | Total Storage | +|----------------|----------------|---------------------|----------------|------------------|-------------------|---------------| +| Small | 10 | 1m | 30 | ~85 MB | ~40 MB | **~125 MB** | +| Medium | 50 | 1m | 30 | ~430 MB | ~215 MB | **~645 MB** | +| Large | 200 | 1m | 30 | ~1.7 GB | ~850 MB | **~2.5 GB** | +| Large (90-day retention) | 200 | 1m | 90 | ~5.2 GB | ~2.6 GB | **~7.8 GB** | + +The aggregated hourly usage table adds minimal overhead (~50 KB per year per license). + +**Formula for custom calculations**: +``` +Daily measurements per control plane = (24 * 60) / interval_minutes +Total rows = control_planes × daily_measurements × retention_days +Storage (MB) ≈ (total_rows × 200 bytes) / 1,048,576 × 1.5 (with indexes) +``` + +##### Retention behavior + +The `measurementRetentionDays` setting controls retention of raw measurement data: + +- **Default: 30 days** - Balances audit capabilities with storage efficiency +- **Set to 0**: Disables cleanup, retains all raw measurements indefinitely +- **Cleanup runs**: Every aggregation interval (default: hourly) +- **What's kept forever**: Aggregated hourly usage data (needed for license tracking) +- **What's cleaned up**: Raw point-in-time measurements older than retention period + +**Recommendations**: +- **30 days**: For most troubleshooting and short-term auditing +- **60 to 90 days**: For environments requiring extended audit trails +- **Unlimited (0)**: Only for environments with ample storage or specific compliance requirements + +:::note +Increasing retention period linearly increases storage requirements for raw measurements. The aggregated hourly data is always retained regardless of this setting. +::: + +### Step 3: Apply your license + +Use the `up` CLI to apply your license file: + +```bash +up space license apply /path/to/license.json +``` + +This command automatically: +- Creates a secret containing your license file in the `upbound-system` namespace +- Creates the `SpaceLicense` resource configured to use that secret + +:::tip +You can specify a different namespace for the license secret using the `--namespace` flag: +```bash +up space license apply /path/to/license.json --namespace my-namespace +``` +::: + +
+Alternative: Manual kubectl approach + +If you prefer not to use the `up` CLI, you can manually create the resources: + +1. Create the license secret: + +```bash +kubectl create secret generic space-license \ + -n upbound-system \ + --from-file=license.json=/path/to/license.json +``` + +2. Create the SpaceLicense resource: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceLicense +metadata: + name: space +spec: + secretRef: + name: space-license + namespace: upbound-system + key: license.json +``` + +```bash +kubectl apply -f spacelicense.yaml +``` + +:::important +You **must** name the `SpaceLicense` resource `space`. This resource is a singleton and only one can exist in the cluster. +::: + +
+ +## Monitoring usage + +### Check license status + +Use the `up` CLI to view your license details and current usage: + +```bash +up space license show +``` + +Example output: + +``` +Spaces License Status: Valid (License is valid) + +Created: 2024-01-01T00:00:00Z +Expires: 2025-01-01T00:00:00Z + +Plan: enterprise + +Resource Hour Limit: 1000000 +Operation Limit: 500000 + +Enabled Features: +- spaces +- query-api +- backup-restore +``` + +The output shows: +- License validity status and any validation messages +- Creation and expiration dates +- Your commercial plan tier +- Capacity limits for resource hours and operations +- Enabled features in your license +- Any restrictions (such as cluster UUID pinning) + +
+Alternative: View detailed status with kubectl + +For detailed information including usage statistics, use kubectl: + +```bash +kubectl get spacelicense space -o yaml +``` + +Example output showing usage data: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceLicense +metadata: + name: space +spec: + secretRef: + name: space-license + namespace: upbound-system +status: + conditions: + - type: LicenseValid + status: "True" + reason: Valid + message: "License is valid" + id: "lic_abc123xyz" + plan: "enterprise" + capacity: + resourceHours: 1000000 + operations: 500000 + usage: + resourceHours: 245680 + operations: 12543 + resourceHoursUtilization: "24.57%" + operationsUtilization: "2.51%" + firstMeasurement: "2024-01-15T10:00:00Z" + lastMeasurement: "2024-02-10T14:30:00Z" + createdAt: "2024-01-01T00:00:00Z" + expiresAt: "2025-01-01T00:00:00Z" + enabledFeatures: + - "spaces" + - "query-api" + - "backup-restore" +``` + +
+ +### Understanding the status fields + +| Field | Description | +|-------|-------------| +| `status.id` | Unique license identifier | +| `status.plan` | Your commercial plan (community, standard, enterprise) | +| `status.capacity` | Total capacity included in your license | +| `status.usage.resourceHours` | Total resource hours consumed | +| `status.usage.operations` | Total operations performed | +| `status.usage.resourceHoursUtilization` | Percentage of resource hours capacity used | +| `status.usage.operationsUtilization` | Percentage of operations capacity used | +| `status.usage.firstMeasurement` | When usage tracking began | +| `status.usage.lastMeasurement` | Most recent usage update | +| `status.expiresAt` | License expiration date | + +### Monitor with kubectl + +Watch your license utilization in real-time: + +```bash +kubectl get spacelicense space -w +``` + +Short output format: + +``` +NAME PLAN VALID REASON AGE +space enterprise True Valid 45d +``` + +## Managing licenses + +### Updating your license + +To update your license with a new license file (for example, when renewing or upgrading capacity), apply the new license: + +```bash +up space license apply /path/to/new-license.json +``` + +This command replaces the existing license secret and updates the SpaceLicense resource. + +### Removing a license + +To remove a license: + +```bash +up space license remove +``` + +This command: +- Prompts for confirmation before proceeding +- Removes the license secret + +To skip the confirmation prompt, use the `--force` flag: + +```bash +up space license remove --force +``` + +## Troubleshooting + +### License not updating + +If the license status doesn't update with usage data: + +1. **Check metering controller logs**: + ```bash + kubectl logs -n upbound-system deployment/spaces-controller -c metering + ``` + +2**Check if the system captures your measurements**: + + ```bash + # Connect to PostgreSQL and query the measurements table + kubectl exec -it -- psql -U -d \ + -c "SELECT COUNT(*) FROM measurements WHERE timestamp > NOW() - INTERVAL '1 hour';" + ``` + +### High utilization warnings + +If you're approaching your capacity limits: + +1. **Review resource usage** by control plane to identify high consumers +2. **Contact your Upbound sales representative** to discuss capacity expansion +3. **Optimize managed resources** by cleaning up unused resources + +### License validation failures + +If your license shows as invalid: + +1. **Check expiration date**: `kubectl get spacelicense space -o jsonpath='{.status.expiresAt}'` +2. **Verify license file integrity**: Ensure the secret contains valid JSON +3. **Check for cluster UUID restrictions**: Upbound pins some licenses to + specific clusters +4. **Review controller logs** for detailed error messages + +## Differences from traditional billing + +### Capacity licensing + +- ✅ Works in disconnected environments +- ✅ Provides real-time usage visibility +- ✅ No manual data export required +- ✅ Requires PostgreSQL database +- ✅ Fixed capacity model + +### Traditional billing (object storage) + + +- ❌ Requires periodic manual export +- ❌ Delayed visibility into usage +- ✅ Works with S3/Azure Blob/GCS +- ❌ Requires cloud storage access +- ✅ Pay-as-you-go model + +## Best practices + +### Database management + +1. **Regular backups**: Back up your metering database regularly to preserve usage history +2. **Monitor database size**: Set appropriate retention periods to manage storage growth +3. **Use managed databases**: Consider managed PostgreSQL services for production +4. **Connection pooling**: Use connection pooling for better performance at scale + +### License management + +1. **Monitor utilization**: Set up alerts before reaching 80% capacity +2. **Plan renewals early**: Start renewal discussions 60 days before expiration +3. **Track grace periods**: Note the `gracePeriodEndsAt` date for planning +4. **Secure license files**: Treat license files as sensitive credentials + +### Operational monitoring + +1. **Set up dashboards**: Create Grafana dashboards for usage trends +2. **Enable alerting**: Configure alerts for high utilization and expiration +3. **Regular audits**: Periodically review usage patterns across control planes +4. **Capacity planning**: Use historical data to predict future capacity needs + +## Next steps + +- Learn about [Observability] to monitor your Spaces deployment +- Explore [Backup and Restore][backup-restore] to protect your control plane data +- Review [Self-Hosted Space Billing][space-billing] for the traditional billing model +- Contact [Upbound Sales][sales] to discuss capacity licensing options + + +[space-billing]: /spaces/howtos/self-hosted/billing +[CloudNativePG]: https://cloudnative-pg.io/ +[backups]: https://cloudnative-pg.io/documentation/current/backup_recovery/ +[backup-restore]: /spaces/howtos/backup-and-restore +[sales]: https://www.upbound.io/contact +[eso]: https://external-secrets.io/ +[Observability]: /spaces/howtos/observability + + diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/certs.md b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/certs.md new file mode 100644 index 000000000..e517c250e --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/certs.md @@ -0,0 +1,274 @@ +--- +title: Istio Ingress Gateway With Custom Certificates +sidebar_position: 20 +description: Install self hosted spaces using istio ingress gateway in a Kind cluster +--- + +:::important +Prerequisites + +- Spaces Token available in a file +- `docker login xpkg.upbound.io -u -p ` +- [`istioctl`][istioctl] installation +- `jq` installation +::: + +This document describes the installation of a self hosted space on an example `kind` +cluster along with Istio Ingress Gateway and certificates. The service mesh and certificates +installation is transferable to self hosted spaces in arbitrary clouds. + +## Create a kind cluster + +```shell +cat < +## Install Istio + + + +:::important +This is an example and not recommended for use in production. +::: + + +1. Create the `istio-values.yaml` file + +```shell +cat > istio-values.yaml << 'EOF' +apiVersion: install.istio.io/v1alpha1 +kind: IstioOperator +spec: + hub: gcr.io/istio-release + components: + ingressGateways: + - enabled: true + name: istio-ingressgateway + k8s: + nodeSelector: + ingress-ready: "true" + overlays: + - apiVersion: apps/v1 + kind: Deployment + name: istio-ingressgateway + patches: + - path: spec.template.spec.containers.[name:istio-proxy].ports + value: + - containerPort: 8080 + hostPort: 80 + - containerPort: 8443 + hostPort: 443 +EOF +``` + +2. Install istio via `istioctl` + +```shell +istioctl install -f istio-values.yaml +``` + +## Create a self-signed Certificate via cert-manager + +:::important +This Certificate manifest creates a self-signed certificate for a proof of concept +environment and isn't recommended for production use cases. +::: + +1. Create the upbound-system namespace + +```shell +kubectl create namespace upbound-system +``` + +2. Create a self-signed certificate + +```shell +cat < +## Create an Istio Gateway and VirtualService + + + + +Configure an Istio Gateway and VirtualService to use TLS passthrough. + + +```shell +cat < spaces-values.yaml << 'EOF' +# Configure spaces-router to use the TLS secret created by cert-manager. +externalTLS: + tlsSecret: + name: example-tls-secret + caBundleSecret: + name: example-tls-secret + key: ca.crt +ingress: + provision: false + # Allow Istio Ingress Gateway to communicate to the spaces-router + namespaceLabels: + kubernetes.io/metadata.name: istio-system + podLabels: + app: istio-ingressgateway + istio: ingressgateway +EOF +``` + +2. Set the required environment variables + +```shell +# Update these according to your account/token file +export SPACES_TOKEN_PATH= +export UPBOUND_ACCOUNT= +# Replace SPACES_ROUTER_HOST with your Spaces ingress hostname +export SPACES_ROUTER_HOST="proxy.upbound-127.0.0.1.nip.io" +export SPACES_VERSION="1.14.1" +``` + +3. Create an image pull secret for Spaces + +```shell +kubectl -n upbound-system create secret docker-registry upbound-pull-secret \ + --docker-server=https://xpkg.upbound.io \ + --docker-username="$(jq -r .accessId $SPACES_TOKEN_PATH)" \ + --docker-password="$(jq -r .token $SPACES_TOKEN_PATH)" +``` + +4. Install the Spaces helm chart + +```shell +# Login to xpkg.upbound.io +jq -r .token $SPACES_TOKEN_PATH | helm registry login xpkg.upbound.io -u $(jq -r .accessId $SPACES_TOKEN_PATH) --password-stdin + +# Install spaces helm chart +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "authentication.hubIdentities=true" \ + --set "authorization.hubRBAC=true" \ + --wait -f spaces-values.yaml +``` + +## Validate the installation + +Successful access of the `up` command to interact with your self hosted space validates the +certificate installation. + +- `up ctx .` + +You can also issue control plane creation, list and deletion commands. + +- `up ctp create cert-test` +- `up ctp list` +- `up ctx disconnected/kind-kind/default/cert-test && kubectl get namespace` +- `up ctp delete cert-test` + +:::note +If `up` can't connect to your control plane, follow [this guide to create a new profile][up-profile]. +::: + +## Troubleshooting + +Examine your certificate with `openssl`: + +```shell +openssl s_client -connect proxy.upbound-127.0.0.1.nip.io:443 -showcerts +``` + +[istioctl]: https://istio.io/latest/docs/ops/diagnostic-tools/istioctl/ +[up-profile]: /manuals/cli/howtos/profile-config/ diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/configure-ha.md b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/configure-ha.md new file mode 100644 index 000000000..ddf36c55e --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/configure-ha.md @@ -0,0 +1,450 @@ +--- +title: Production Scaling and High Availability +description: Configure your Self-Hosted Space for production +sidebar_position: 5 +--- + + + +This guide explains how to configure an existing Upbound Space deployment for +production operation at scale. + +Use this guide when you're ready to deploy production scaling, high availability, +and monitoring in your Space. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. + +For API specifications on ControlPlane resources and configurations, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +## Prerequisites + +Before you begin scaling your Spaces deployment, make sure you have: + + +* A working Space deployment +* Cluster administrator access +* An understanding of load patterns and growth in your organization +* A familiarity with node affinity, tainting, and Horizontal Pod Autoscaling + (HPA) + + +## Production scaling strategy + + +In this guide, you will: + + + +* Create dedicated node pools for different component types +* Configure high-availability to ensure there are no single points of failure +* Set dynamic scaling for variable workloads +* Optimize your storage and component operations +* Monitor your deployment health and performance + +## Spaces architecture + +The basic Spaces workflow follows the pattern below: + + +![Spaces workflow][spaces-workflow] + +## Node architecture + +You can mitigate resource contention and improve reliability by separating system +components into dedicated node pools. + +### `etcd` dedicated nodes + +`etcd` performance directly impacts your entire Space, so isolate it for +consistent performance. + +1. Create a dedicated `etcd` node pool + + **Requirements:** + - **Minimum**: 3 nodes for HA + - **Instance type**: General purpose with high network throughput/low latency + - **Storage**: High performance storage (`etcd` is I/O sensitive) + +2. Taint `etcd` nodes to reserve them + + ```bash + kubectl taint nodes target=etcd:NoSchedule + ``` + +3. Configure `etcd` storage + + `etcd` is sensitive to storage I/O performance. Review the [`etcd` scaling + documentation][scaling] + for specific storage guidance. + +### API server dedicated nodes + +API servers handle all control plane requests and should run on dedicated +infrastructure. + +1. Create dedicated API server nodes + + **Requirements:** + - **Minimum**: 2 nodes for HA + - **Instance type**: Compute-optimized, memory-optimized, or general-purpose + - **Scaling**: Scale vertically based on API server load patterns + +2. Taint API server nodes + + ```bash + kubectl taint nodes target=apiserver:NoSchedule + ``` + +### Configure cluster autoscaling + +Enable cluster autoscaling for all node pools. + +For AWS EKS clusters, Upbound recommends using [`Karpenter`][karpenter] for +improved bin-packing and instance type selection. + +For GCP GKE clusters, follow the [GKE autoscaling][gke-autoscaling] guide. + +For Azure AKS clusters, follow the [AKS autoscaling][aks-autoscaling] guide. + + +## Configure high availability + +Ensure control plane components can survive node and zone failures. + +### Enable high availability mode + +1. Configure control planes for high availability + + ```yaml + controlPlanes: + ha: + enabled: true + ``` + + This configures control plane pods to run with multiple replicas and + associated pod disruption budgets. + +### Configure component distribution + +1. Set up API server pod distribution + + ```yaml + controlPlanes: + vcluster: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: target + operator: In + values: + - apiserver + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vcluster + topologyKey: "kubernetes.io/hostname" + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vcluster + topologyKey: topology.kubernetes.io/zone + weight: 100 + ``` + +2. Configure `etcd` pod distribution + + ```yaml + controlPlanes: + etcd: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: target + operator: In + values: + - etcd + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vcluster-etcd + topologyKey: "kubernetes.io/hostname" + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vcluster-etcd + topologyKey: topology.kubernetes.io/zone + weight: 100 + ``` + +### Configure tolerations + +Allow control plane pods to schedule on the tainted dedicated nodes (available +in Spaces v1.14+). + +1. Add tolerations for `etcd` pods + + ```yaml + controlPlanes: + etcd: + tolerations: + - key: "target" + operator: "Equal" + value: "etcd" + effect: "NoSchedule" + ``` + +2. Add tolerations for API server pods + + ```yaml + controlPlanes: + vcluster: + tolerations: + - key: "target" + operator: "Equal" + value: "apiserver" + effect: "NoSchedule" + ``` + + +## Configure autoscaling for Spaces components + + +Set up the Spaces system components to handle variable load automatically. + +### Scale API and `apollo` services + +1. Configure minimum replicas for availability + + ```yaml + api: + replicaCount: 2 + + features: + alpha: + apollo: + enabled: true + replicaCount: 2 + ``` + + Both services support horizontal and vertical scaling based on load patterns. + +### Configure router autoscaling + +The `spaces-router` is the entry point for all traffic and needs intelligent +scaling. + + +1. Enable Horizontal Pod Autoscaler + + ```yaml + router: + hpa: + enabled: true + minReplicas: 2 + maxReplicas: 8 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 80 + ``` + +2. Monitor scaling factors + + **Router scaling behavior:** + - **Vertical scaling**: Scales based on number of control planes + - **Horizontal scaling**: Scales based on request volume + - **Resource monitoring**: Monitor CPU and memory usage + + + +### Configure controller scaling + +The `spaces-controller` manages Space-level resources and requires vertical +scaling. + +1. Configure adequate resources with headroom + + ```yaml + controller: + resources: + requests: + cpu: "500m" + memory: "1Gi" + limits: + cpu: "2000m" + memory: "4Gi" + ``` + + **Important**: The controller can spike when reconciling large numbers of + control planes, so provide adequate headroom for resource spikes. + +## Set up production storage + + +### Configure Query API database + + +1. Use a managed PostgreSQL database + + **Recommended services:** + - [AWS RDS][rds] + - [Google Cloud SQL][gke-sql] + - [Azure Database for PostgreSQL][aks-sql] + + **Requirements:** + - Minimum 400 IOPS performance + + +## Monitoring + + + +Monitor key metrics to ensure healthy scaling and identify issues quickly. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +### Control plane health + +Track these `spaces-controller` metrics: + +1. **Total control planes** + + ``` + spaces_control_plane_exists + ``` + + Tracks the total number of control planes in the system. + +2. **Degraded control planes** + + ``` + spaces_control_plane_degraded + ``` + + Returns control planes that don't have a `Synced`, `Ready`, and + `Healthy` state. + +3. **Stuck control planes** + + ``` + spaces_control_plane_stuck + ``` + + Control planes stuck in a provisioning state. + +4. **Deletion issues** + + ``` + spaces_control_plane_deletion_stuck + ``` + + Control planes stuck during deletion. + +### Alerting + +Configure alerts for critical scaling and health metrics: + +- **High error rates**: Alert when 4xx/5xx response rates exceed thresholds +- **Control plane health**: Alert when degraded or stuck control planes exceed acceptable counts + +## Architecture overview + +**Spaces System Components:** + +- **`spaces-router`**: Entry point for all endpoints, dynamically builds routes to control plane API servers +- **`spaces-controller`**: Reconciles Space-level resources, serves webhooks, works with `mxp-controller` for provisioning +- **`spaces-api`**: API for managing groups, control planes, shared secrets, and telemetry objects (accessed only through spaces-router) +- **`spaces-apollo`**: Hosts the Query API, connects to PostgreSQL database populated by `apollo-syncer` pods + + +**Control Plane Components (per control plane):** +- **`mxp-controller`**: Handles provisioning tasks, serves webhooks, installs UXP and `XGQL` +- **`XGQL`**: GraphQL API powering console views +- **`kube-state-metrics`**: Collects usage metrics for billing (updated by `mxp-controller` when CRDs change) +- **`vector`**: Works with `kube-state-metrics` to send usage data to external storage for billing +- **`apollo syncer`**: Syncs `etcd` data into PostgreSQL for the Query API + + +### `up ctx` workflow + + + up ctx workflow diagram + + +### Access a control plane API server via kubectl + + + kubectl workflow diagram + + +### Query API/Apollo + + + query API workflow diagram + + +## See also + +* [Upbound Spaces deployment requirements][deployment] +* [Upbound `etcd` scaling resources][scaling] + +[up-ctx-workflow]: /img/up-ctx-workflow.png +[kubectl]: /img/kubectl-workflow.png +[query-api]: /img/query-api-workflow.png +[spaces-workflow]: /img/up-basic-flow.png +[rds]: https://aws.amazon.com/rds/postgresql/ +[gke-sql]: https://cloud.google.com/kubernetes-engine/docs/tutorials/stateful-workloads/postgresql +[aks-sql]: https://learn.microsoft.com/en-us/azure/aks/deploy-postgresql-ha?tabs=azuredisk +[deployment]: https://docs.upbound.io/spaces/howtos/self-hosted/deployment-reqs/ +[karpenter]: https://docs.aws.amazon.com/eks/latest/best-practices/karpenter.html +[gke-autoscaling]: https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-autoscaler +[aks-autoscaling]: https://learn.microsoft.com/en-us/azure/aks/cluster-autoscaler-overview +[scaling]: https://docs.upbound.io/deploy/self-hosted-spaces/scaling-resources#scaling-etcd-storage diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/controllers.md b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/controllers.md new file mode 100644 index 000000000..692740638 --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/controllers.md @@ -0,0 +1,389 @@ +--- +title: Controllers +weight: 250 +description: A guide to how to wrap and deploy an Upbound controller into control planes on Upbound. +--- + +:::important +This feature is in private preview for select customers in Upbound Spaces. If you're interested in this feature, please [contact us](https://www.upbound.io/contact-us). +::: + +Upbound's _Controllers_ feature lets you build and deploy control plane software from the Kubernetes ecosystem. With the _Controllers_ feature, you're not limited to just managing resource types defined by Crossplane. Now you can create resources from _CustomResourceDefinitions_ defined by other Kubernetes ecosystem tooling. + +This guide explains how to bundle and deploy control plane software from the Kubernetes ecosystem on a control plane in Upbound. + +## Benefits + +The Controllers feature provides the following benefits: + +* Deploy control plane software from the Kubernetes ecosystem. +* Use your control plane's package manager to handle the lifecycle of the control plane software and define dependencies between package. +* Build powerful compositions that combine both Crossplane and Kubernetes _CustomResources_. + +## How it works + +A _Controller_ is a package type that bundles control plane software from the Kubernetes ecosystem. Examples of such software includes: + +- Kubernetes policy engines +- CI/CD tooling +- Your own private custom controllers defined by your organization + +You build a _Controller_ package by wrapping a helm chart along with its requisite _CustomResourceDefinitions_. Your _Controller_ package gets pushed to an OCI registry, and from there you can apply it to a control plane like you would any other Crossplane package. Your control plane's package manager is responsible for managing the lifecycle of the software once applied. + +## Prerequisites + +Enable the Controllers feature in the Space you plan to run your control plane in: + +- Cloud Spaces: Not available yet +- Connected Spaces: Space administrator must enable this feature +- Disconnected Spaces: Space administrator must enable this feature + +Packaging a _Controller_ requires [up CLI][cli] `v0.39.0` or later. + + + +## Build a _Controller_ package + + + +_Controllers_ are a package type that get administered by your control plane's package manager. + +### Prepare the package + +To define a _Controller_, you need a Helm chart. This guide assumes the control plane software you want to build into a _Controller_ already has a Helm chart available. + +Start by making a working directory to assemble the necessary parts: + +```ini +mkdir controller-package +cd controller-package +``` + +Inside the working directory, pull the Helm chart: + +```shell +export CHART_REPOSITORY= +export CHART_NAME= +export CHART_VERSION= + +helm pull $CHART_NAME --repo $CHART_REPOSITORY --version $CHART_VERSION +``` + +Be sure to update the Helm chart repository, name, and version with your own. + +Move the Helm chart into its own folder: + +```ini +mkdir helm +mv $CHART_NAME-$CHART_VERSION.tgz helm/chart.tgz +``` + +Unpack the CRDs from the Helm chart into their own directory: + +```shell +export RELEASE_NAME= +export RELEASE_NAMESPACE= + +mkdir crds +helm template $RELEASE_NAME helm/chart.tgz -n $RELEASE_NAMESPACE --include-crds | \ + yq e 'select(.kind == "CustomResourceDefinition")' - | \ + yq -s '("crds/" + .metadata.name + ".yaml")' - +``` +Be sure to update the Helm release name, and namespace with your own. + +:::info +The instructions above assume your CRDs get deployed as part of your Helm chart. If they're deployed another way, you need to manually copy your CRDs instead. +::: + +Create a `crossplane.yaml` with your controller metadata: + +```yaml +cat < crossplane.yaml +apiVersion: meta.pkg.upbound.io/v1alpha1 +kind: Controller +metadata: + annotations: + friendly-name.meta.crossplane.io: Controller + meta.crossplane.io/description: | + A brief description of what the controller does. + meta.crossplane.io/license: Apache-2.0 + meta.crossplane.io/maintainer: + meta.crossplane.io/readme: | + An explanation of your controller. + meta.crossplane.io/source: + name: +spec: + packagingType: Helm + helm: + releaseName: + releaseNamespace: + # Value overrides for the helm release can be provided below. + # values: + # foo: bar +EOF +``` + +Your controller's file structure should look like this: + +```ini +. +├── crds +│ ├── your-crd.yaml +│ ├── second-crd.yaml +│ └── another-crd.yaml +├── crossplane.yaml +└── helm + └── chart.tgz +``` + +### Package and push the _Controller_ + +At the root of your controller's working directory, build the contents into an xpkg: + +```ini +up xpkg build +``` + +This causes an xpkg to get saved to your current directory with a name like `controller-f7091386b4c0.xpkg`. + +Push the package to your desired OCI registry: + +```shell +export UPBOUND_ACCOUNT= +export CONTROLLER_NAME= +export CONTROLLER_VERSION= +export XPKG_FILENAME= + +up xpkg push xpkg.upbound.io/$UPBOUND_ACCOUNT/$CONTROLLER_NAME:$CONTROLLER_VERSION -f $XPKG_FILENAME +``` + + + +## Deploy a _Controller_ package + + + +:::important +_Controllers_ are only installable on control planes running Crossplane `v1.19.0` or later. +::: + +Set your kubecontext to the desired control plane in Upbound. Change the package path to the OCI registry you pushed it to. Then, deploy the _Controller_ directly: + +```shell +export CONTROLLER_NAME= +export CONTROLLER_VERSION= + +cat < crossplane.yaml +apiVersion: meta.pkg.upbound.io/v1alpha1 +kind: Controller +metadata: + annotations: + friendly-name.meta.crossplane.io: Controller ArgoCD + meta.crossplane.io/description: | + The ArgoCD Controller enables continuous delivery and declarative configuration + management for Kubernetes applications using GitOps principles. + meta.crossplane.io/license: Apache-2.0 + meta.crossplane.io/maintainer: Upbound Maintainers + meta.crossplane.io/readme: | + ArgoCD is a declarative GitOps continuous delivery tool for Kubernetes that + follows the GitOps methodology to manage infrastructure and application + configurations. + meta.crossplane.io/source: https://github.com/argoproj/argo-cd + name: argocd +spec: + packagingType: Helm + helm: + releaseName: argo-cd + releaseNamespace: argo-system + # values: + # foo: bar +EOF +``` + +Your controller's file structure should look like this: + +```ini +. +├── crds +│ ├── applications.argoproj.io.yaml +│ ├── applicationsets.argoproj.io.yaml +│ └── appprojects.argoproj.io.yaml +├── crossplane.yaml +└── helm + └── chart.tgz +``` + +### Package and push controller-argocd + +At the root of your controller's working directory, build the contents into an xpkg: + +```ini +up xpkg build +``` + +This causes an xpkg to get saved to your current directory with a name like `argocd-f7091386b4c0.xpkg`. + +Push the package to your desired OCI registry: + +```shell +export UPBOUND_ACCOUNT= +export CONTROLLER_NAME=controller-argocd +export CONTROLLER_VERSION=v7.8.8 +export XPKG_FILENAME= + +up xpkg push --create xpkg.upbound.io/$UPBOUND_ACCOUNT/$CONTROLLER_NAME:$CONTROLLER_VERSION -f $XPKG_FILENAME +``` + +### Deploy controller-argocd to a control plane + +Set your kubecontext to the desired control plane in Upbound. Change the package path to the OCI registry you pushed it to. Then, deploy the _Controller_ directly: + +```ini +cat < + +## Frequently asked questions + +
+Can I package any software or are there any prerequisites to be a Controller? + +We define a *Controller* as a software that has at least one Custom Resource Definition (CRD) and a Kubernetes controller for that CRD. This is the minimum requirement to be a *Controller*. We have some checks to enforce this at packaging time. + +
+ +
+How can I package my software as a Controller? + +Currently, we support Helm charts as the underlying package format for *Controllers*. As long as you have a Helm chart, you can package it as a *Controller*. + +If you don't have a Helm chart, you can't deploy the software. We only support Helm charts as the underlying package format for *Controllers*. We may extend this to support other packaging formats like Kustomize in the future. + +
+ +
+Can I package Crossplane XRDs/Compositions as a Helm chart to deploy as a Controller? + +This is not recommended. For packaging Crossplane XRDs/ and Compositions, we recommend using the `Configuration` package format. A helm chart only with Crossplane XRDs/Compositions does not qualify as a *Controller*. + +
+ +
+How can I override the Helm values when deploying a Controller? + +Overriding the Helm values is possible at two levels: +- During packaging time, in the package manifest file. +- At runtime, using a `ControllerRuntimeConfig` resource (similar to Crossplane `DeploymentRuntimeConfig`). + +
+ +
+How can I configure the helm release name and namespace for the controller? + +Right now, it is not possible to configure this at runtime. The package author configures release name and namespace during packaging, so it is hardcoded inside the package. Unlike a regular application that is deployed by a Helm chart, *Controllers* can only be deployed once in a given control plane, so, we hope it should be ok to rely on predefined release names and namespaces. We may consider exposing these in `ControllerRuntimeConfig` later, but, we would like to keep it opinionated unless there are strong reasons to do so. + +
+ +
+Can I deploy more than one instance of a Controller package? + +No, this is not possible. Remember, a *Controller* package introduces CRDs which are cluster-scoped objects. Just like one cannot deploy more than one instance of the same Crossplane Provider package today, it is not possible to deploy more than one instance of a *Controller*. + +
+ +
+Do I need a specific Crossplane version to run Controllers? + +Yes, you need to use Crossplane v1.19.0 or later to use *Controllers*. This is because of the changes in the Crossplane codebase to support third-party package formats in dependencies. + +Spaces `v1.12.0` supports Crossplane `v1.19` in the *Rapid* release channel. + +
+ +
+Can I deploy Controllers outside of an Upbound control plane? With UXP? + +No, *Controllers* are a proprietary package format and are only available for control planes running in Spaces hosting environments in Upbound. + +
+ + +[cli]: /manuals/uxp/overview + diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/ctp-audit-logs.md b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/ctp-audit-logs.md new file mode 100644 index 000000000..52f52c776 --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/ctp-audit-logs.md @@ -0,0 +1,549 @@ +--- +title: Control plane audit logging +--- + +This guide explains how to enable and configure audit logging for control planes +in Self-Hosted Upbound Spaces. + +Starting in Spaces `v1.14.0`, each control plane contains an API server that +supports audit log collection. You can use audit logging to track creation, +updates, and deletions of Crossplane resources. Control plane audit logs +use observability features to collect audit logs with `SharedTelemetryConfig` and +send logs to an OpenTelemetry (`OTEL`) collector. + +:::info API Version Information +This guide applies to **Spaces v1.14.0 and later** (Self-Hosted only). Audit logging is not available in earlier versions. + +For API specifications on observability resources, see the [Spaces API Reference - v1.14](../../../../../1.14/reference/apis/spaces-api/). details on observability evolution across versions, see the . +::: + +## Prerequisites + +Before you begin, make sure you have: + +* Spaces `v1.14.0` or greater +* Admin access to your Spaces host cluster +* `kubectl` configured to access the host cluster +* `helm` installed +* `yq` installed +* `up` CLI installed and logged in to your organization + +## Enable observability + + +Observability graduated to General Available in `v1.14.0` but is disabled by +default. + + + + + +### Before `v1.14` +To enable the GA Observability feature, upgrade your Spaces installation to `v1.14.0` +or later and update your installation setting to the new flag: + +```diff +helm upgrade spaces upbound/spaces -n upbound-system \ +- --set "features.alpha.observability.enabled=true" ++ --set "observability.enabled=true" +``` + + + +### After `v1.14` + +To enable the GA Observability feature for `v1.14.0` and later, pass the feature +flag: + +```sh +helm upgrade spaces upbound/spaces -n upbound-system \ + --set "observability.enabled=true" + +``` + + + + +To confirm Observability is enabled, run the `helm get values` command: + + +```shell +helm get values --namespace upbound-system spaces | yq .observability +``` + +Your output should return: + +```shell-noCopy + enabled: true +``` + +## Install an observability backend + +:::note +If you already have an observability backend in your environment, skip to the +next section. +::: + + +For this guide, you'll use Grafana's `docker-otel-lgtm` bundle to validate audit log +generation. production environments, configure a dedicated observability +backend like Datadog, Splunk, or an enterprise-grade Grafana stack. + + + +First, make sure your `kubectl` context points to your Spaces host cluster: + +```shell +kubectl config current-context +``` + +The output should return your cluster name. + +Next, install `docker-otel-lgtm` as a deployment using port-forwarding to +connect to Grafana. Create a manifest file and paste the +following configuration: + +```yaml title="otel-lgtm.yaml" +apiVersion: v1 +kind: Namespace +metadata: + name: observability +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: otel-lgtm + name: otel-lgtm + namespace: observability +spec: + ports: + - name: grpc + port: 4317 + protocol: TCP + targetPort: 4317 + - name: http + port: 4318 + protocol: TCP + targetPort: 4318 + - name: grafana + port: 3000 + protocol: TCP + targetPort: 3000 + selector: + app: otel-lgtm +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: otel-lgtm + labels: + app: otel-lgtm + namespace: observability +spec: + replicas: 1 + selector: + matchLabels: + app: otel-lgtm + template: + metadata: + labels: + app: otel-lgtm + spec: + containers: + - name: otel-lgtm + image: grafana/otel-lgtm + ports: + - containerPort: 4317 + - containerPort: 4318 + - containerPort: 3000 +``` + +Next, apply the manifest: + +```shell +kubectl apply --filename otel-lgtm.yaml +``` + +Your output should return the resources: + +```shell +namespace/observability created + service/otel-lgtm created + deployment.apps/otel-lgtm created +``` + +To verify your resources deployed, use `kubectl get` to display resources with +an `ACTIVE` or `READY` status. + +Next, forward the Grafana port: + +```shell +kubectl port-forward svc/otel-lgtm --namespace observability 3000:3000 +``` + +Now you can access the Grafana UI at http://localhost:3000. + + +## Create an audit-enabled control plane + +To enable audit logging for a control plane, you need to label it so the +`SharedTelemetryConfig` can identify and apply audit settings. This section +creates a new control plane with the `audit-enabled: "true"` label. The +`audit-enabled: "true"` label marks this control plane for audit logging. The +`SharedTelemetryConfig` (created in the next section) finds control planes with +this label and enables audit logging on them. + +Create a new manifest file and paste the configuration below: + +
+```yaml title="ctp-audit.yaml" +apiVersion: v1 +kind: Namespace +metadata: + name: audit-test +--- +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + labels: + audit-enabled: "true" + name: ctp1 + namespace: audit-test +spec: + writeConnectionSecretToRef: + name: kubeconfig-ctp1 + namespace: audit-test +``` +
+ +The `metadata.labels` section contains the `audit-enabled` setting. + +Apply the manifest: + +```shell +kubectl apply --filename ctp-audit.yaml +``` + +Confirm your control plane reaches the `READY` status: + +```shell +kubectl get --filename ctp-audit.yaml +``` + +## Create a `SharedTelemetryConfig` + +The `SharedTelemetryConfig` applies to all control plane objects in a namespace +and enables audit logging and routes logs to your `OTEL` endpoint. + +Create a `SharedTelemetryConfig` manifest file and paste the configuration +below: + +
+```yaml title="sharedtelemetryconfig.yaml" +apiVersion: observability.spaces.upbound.io/v1alpha1 +kind: SharedTelemetryConfig +metadata: + name: apiserver-audit + namespace: audit-test +spec: + apiServer: + audit: + enabled: true + exporters: + otlphttp: + endpoint: http://otel-lgtm.observability:4318 + exportPipeline: + logs: [otlphttp] + controlPlaneSelector: + labelSelectors: + - matchLabels: + audit-enabled: "true" +``` +
+ +This configuration: + +* Sets `apiServer.audit.enabled` to `true` +* Configures the `otlphttp` exporter to point to the `docker-otel-lgtm` service +* Uses `controlPlaneSelector` to match any control plane in the namespace with the `audit-enabled` label set to `true` + +:::note +You can configure the `SharedTelemetryConfig` to select control planes in +several ways. more information on control plane selection, see the [control +plane selection][ctp-selection] documentation. +::: + +Apply the `SharedTelemetryConfig`: + +```shell +kubectl apply --filename sharedtelemetryconfig.yaml +``` + +Confirm the configuration selected the control plane: + +```shell +kubectl get --filename sharedtelemetryconfig.yaml +``` + +The output should return `SELECTED` as `1` and `VALIDATED` as `TRUE`. + +For more detailed status information, use `kubectl get`: + +```shell +kubectl get --filename sharedtelemetryconfig.yaml --output yaml | yq .status +``` + +## Generate and monitor audit events + +You enabled telemetry on your new control plane and can now generate events to +test the audit logging. This guide uses the `nop-provider` to simulate resource +operations. + +Switch your `up` context to the new control plane: + +```shell +up ctx /// +``` + +Create a new Provider manifest: + +```yaml title="provider-nop.yaml" +apiVersion: pkg.crossplane.io/v1 + kind: Provider + metadata: + name: crossplane-contrib-provider-nop + spec: + package: xpkg.upbound.io/crossplane-contrib/provider-nop:v0.4.0 +``` + +Apply the provider manifest: + +```shell +kubectl apply --filename provider-nop.yaml +``` + +Verify the provider installed and returns `HEALTHY` status as `TRUE`. + +Apply an example resource to kick off event generation: + + +```shell +kubectl apply --filename https://raw.githubusercontent.com/crossplane-contrib/provider-nop/refs/heads/main/examples/nopresource.yaml +``` + +In your Grafana dashboard, navigate to **Drilldown** > **Logs** under the +Grafana menu. + + +Filter for `controlplane-audit` log messages. + +Create a query to find `create` events on `nopresources` by filtering: + +* The `verb` field for `create` events +* The `objectRef_resource` field to match the Kind `nopresources` + +Review the audit log results. The log stream displays: + +*The client applying the create operation +* The resource kind +* Client details +* The response code + +Expand the example below for an audit log entry: + +
+ Audit log entry + +```json +{ + "level": "Metadata", + "auditID": "51bbe609-14ad-4874-be78-1289c10d506a", + "stage": "ResponseComplete", + "requestURI": "/apis/nop.crossplane.io/v1alpha1/nopresources?fieldManager=kubectl-client-side-apply&fieldValidation=Strict", + "verb": "create", + "user": { + "username": "kubernetes-admin", + "groups": ["system:masters", "system:authenticated"] + }, + "impersonatedUser": { + "username": "upbound:spaces:host:masterclient", + "groups": [ + "system:authenticated", + "upbound:controlplane:admin", + "upbound:spaces:host:system:masters" + ] + }, + "sourceIPs": ["10.244.0.135", "127.0.0.1"], + "userAgent": "kubectl/v1.32.2 (darwin/arm64) kubernetes/67a30c0", + "objectRef": { + "resource": "nopresources", + "name": "example", + "apiGroup": "nop.crossplane.io", + "apiVersion": "v1alpha1" + }, + "responseStatus": { "metadata": {}, "code": 201 }, + "requestReceivedTimestamp": "2025-09-19T23:03:24.540067Z", + "stageTimestamp": "2025-09-19T23:03:24.557583Z", + "annotations": { + "authorization.k8s.io/decision": "allow", + "authorization.k8s.io/reason": "RBAC: allowed by ClusterRoleBinding \"controlplane-admin\" of ClusterRole \"controlplane-admin\" to Group \"upbound:controlplane:admin\"" + } + } +``` +
+ +## Customize the audit policy + +Spaces `v1.14.0` includes a default audit policy. You can customize this policy +by creating a configuration file and passing the values to +`observability.collectors.apiServer.auditPolicy` in the helm values file. + +An example custom audit policy: + +```yaml +observability: + controlPlanes: + apiServer: + auditPolicy: | + apiVersion: audit.k8s.io/v1 + kind: Policy + rules: + # ============================================================================ + # RULE 1: Exclude health check and version endpoints + # ============================================================================ + - level: None + nonResourceURLs: + - '/healthz*' + - '/readyz*' + - /version + # ============================================================================ + # RULE 2: ConfigMaps - Write operations only + # ============================================================================ + - level: Metadata + resources: + - group: "" + resources: + - configmaps + verbs: + - create + - update + - patch + - delete + omitStages: + - RequestReceived + - ResponseStarted + # ============================================================================ + # RULE 3: Secrets - ALL operations + # ============================================================================ + - level: Metadata + resources: + - group: "" + resources: + - secrets + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + omitStages: + - RequestReceived + - ResponseStarted + # ============================================================================ + # RULE 4: Global exclusion of read-only operations + # ============================================================================ + - level: None + verbs: + - get + - list + - watch + # ========================================================================== + # RULE 5: Exclude standard Kubernetes resources from write operation logging + # ========================================================================== + - level: None + resources: + - group: "" + - group: "apps" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "storage.k8s.io" + - group: "batch" + - group: "autoscaling" + - group: "metrics.k8s.io" + - group: "node.k8s.io" + - group: "scheduling.k8s.io" + - group: "coordination.k8s.io" + - group: "discovery.k8s.io" + - group: "events.k8s.io" + - group: "flowcontrol.apiserver.k8s.io" + - group: "internal.apiserver.k8s.io" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "admissionregistration.k8s.io" + verbs: + - create + - update + - patch + - delete + # ============================================================================ + # RULE 6: Catch-all for ALL custom resources and any missed resources + # ============================================================================ + - level: Metadata + verbs: + - create + - update + - patch + - delete + omitStages: + - RequestReceived + - ResponseStarted + # ============================================================================ + # RULE 7: Final catch-all - exclude everything else + # ============================================================================ + - level: None + omitStages: + - RequestReceived + - ResponseStarted +``` +You can apply this policy during Spaces installation or upgrade using the helm values file. + +Audit policies use rules evaluated in order from top to bottom where the first +matching rule applies. Control plane audit policies follow Kubernetes conventions and use the +following logging levels: + +* **None** - Don't log events matching this rule +* **Metadata** - Log request metadata (user, timestamp, resource, verb) but not request or response bodies +* **Request** - Log metadata and request body but not response body +* **RequestResponse** - Log metadata, request body, and response body + +For more information, review the Kubernetes [Auditing] documentation. + +## Disable audit logging + +You can disable audit logging on a control plane by removing it from the +`SharedTelemetryConfig` selector or by deleting the `SharedTelemetryConfig`. + +### Disable for specific control planes + +Remove the `audit-enabled` label from control planes that should stop sending audit logs: + +```bash +kubectl label controlplane --namespace audit-enabled- +``` + +The `SharedTelemetryConfig` no longer selects this control plane, and audit log collection stops. + +### Disable for all control planes + +Delete the `SharedTelemetryConfig` to stop audit logging for all control planes it manages: + +```bash +kubectl delete sharedtelemetryconfig --namespace +``` + +[ctp-selection]: /spaces/howtos/observability/#control-plane-selection +[Auditing]: https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/ diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/declarative-ctps.md b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/declarative-ctps.md new file mode 100644 index 000000000..2c3e5331b --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/declarative-ctps.md @@ -0,0 +1,110 @@ +--- +title: Declaratively create control planes +sidebar_position: 99 +description: A tutorial to configure a Space with Argo to declaratively create and + manage control planes +--- + +In this tutorial, you learn how to configure [Argo CD][argo-cd] to communicate with a self-hosted Space. This flow allows you to declaratively create and manage control planes from Git. Argo CD is a continuous delivery tool for Kubernetes that you can use to drive GitOps flows for your control plane infrastructure. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. + +For API specifications on ControlPlane resources and their declarative creation, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +## Prerequisites + +To complete this tutorial, you need the following: + +- Have already deployed an Upbound Space. +- Have already deployed an instance of Argo CD on a Kubernetes cluster. + +## Connect your Space to Argo CD + +Fetch the kubeconfig for the Space cluster, the Kubernetes cluster where you installed the Upbound Spaces software. You must add the Space cluster as a context to Argo. + +```ini +export SPACES_CLUSTER_SERVER="https://url" +export SPACES_CLUSTER_NAME="cluster" +``` + +Switch contexts to the Kubernetes cluster where you've installed Argo. Create a secret on the Argo cluster whose data contains the connection details of the Space cluster. + +:::important +Make sure the following commands are executed against your **Argo** cluster, not your Space cluster. +::: + +Run the following command in a terminal: + +```yaml +cat < +When you install a Crossplane provider on a control plane, memory gets consumed +according to the number of custom resources it defines. Upbound [Official Provider families][official-provider-families] provide higher fidelity control +to platform teams to install providers for only the resources they need, +reducing the bloat of needlessly installing unused custom resources. Still, you +must factor provider memory usage into your calculations to ensure you've +rightsized the memory available in your Spaces cluster. + + +:::important +Be careful not to conflate `managed resource` with `custom resource definition`. +The former is an "instance" of an external resource in Crossplane, while the +latter defines the API schema of that resource. +::: + +It's estimated that each custom resource definition consumes ~3 MB of memory. +The calculation is: + +```bash +number_of_managed_resources_defined_in_provider x 3 MB = memory_required +``` + +For example, if you plan to use [provider-aws-ec2][provider-aws-ec2], [provider-aws-s3][provider-aws-s3], and [provider-aws-iam][provider-aws-iam], the resulting calculation is: + +```bash +provider-aws-ec2: 98 x 3 MB = 294 MB +provider-aws-s3: 23 x 3 MB = 69 MB +provider-aws-iam 22 x 3 MB = 66 MB +--- +total memory: 429 MB +``` + +In this scenario, you should budget ~430 MB of memory for provider usage on this control plane. + +:::tip +Do this calculation for each provider you plan to install on your control plane. +Then do this calculation for each control plane you plan to run in your Space. +::: + + +#### Total memory usage + +Add the memory usage from the previous sections. Given the preceding examples, +they result in a recommendation to budget ~1 GB memory for each control plane +you plan to run in the Space. + +:::important + +The 1 GB recommendation is an example. +You should input your own provider requirements to arrive at a final number for +your own deployment. + +::: + +### CPU considerations + +#### Managed resource CPU usage + +The number of managed resources under management by a control plane is the largest contributing factor for CPU usage in a Space. CPU usage scales linearly according to the number of managed resources under management by your control plane. In Upbound's testing, CPU usage requirements _does_ vary from provider to provider. Using the Upbound Official Provider families as a baseline: + + +| Provider | MR create operation (CPU core seconds) | MR update or reconciliation operation (CPU core seconds) | +| ---- | ---- | ---- | +| provider-family-aws | 10 | 2 to 3 | +| provider-family-gcp | 7 | 1.5 | +| provider-family-azure | 7 to 10 | 1.5 to 3 | + + +When resources are in a non-ready state, Crossplane providers reconcile often (as fast as every 15 seconds). Once a resource reaches `READY`, each Crossplane provider defaults to a 10 minute poll interval. Given this, a 16-core machine has `16x10x60 = 9600` CPU core seconds available. Interpreting this table: + +- A single control plane that needs to create 100 AWS MRs concurrently would consume 1000 CPU core seconds, or about 1.5 cores. +- A single control plane that continuously reconciles 100 AWS MRs once they've reached a `READY` state would consume 300 CPU core seconds, or a little under half a core. + +Since `provider-family-aws` has the highest recorded numbers for CPU time required, you can use that as an upper limit in your calculations. + +Using these calculations and extrapolating values, given a 16 core machine, it's recommended you don't exceed a single control plane managing 1000 MRs. Suppose you plan to run 10 control planes, each managing 1000 MRs. You want to make sure your node pool has capacity for 160 cores. If you are using a machine type that has 16 cores per machine, that would mean having a node pool of size 10. If you are using a machine type that has 32 cores per machine, that would mean having a node pool of size 5. + +#### Cloud API latency + +Oftentimes, you are using Crossplane providers to talk to external cloud APIs. Those external cloud APIs often have global API rate limits (examples: [Azure limits][azure-limits], [AWS EC2 limits][aws-ec2-limits]). + +For Crossplane providers built on [Upjet][upjet] (such as Upbound Official Provider families), these providers use Terraform under the covers. They expose some knobs (such as `--max-reconcile-rate`) you can use to tweak reconciliation rates. + +### Resource buffers + +The guidance in the preceding sections explains how to calculate CPU and memory usage requirements for: + +- a set of control planes in a Space +- tuned to the number of providers you plan to use +- according to the number of managed resource instances you plan to have managed by your control planes + +Upbound recommends budgeting an extra buffer of 20% to your resource capacity calculations. The numbers shared in the preceding sections don't account for peaks or surges since they're based off average measurements. Upbound recommends budgeting this buffer to account for these things. + +## Deploying more than one Space + +You are welcome to deploy more than one Space. You just need to make sure you have a 1:1 mapping of Space to Kubernetes clusters. Spaces are by their nature constrained to a single Kubernetes Cluster, which are regional entities. If you want to offer control planes in multiple cloud environments or multiple public clouds entirely, these are justifications for deploying >1 Spaces. + +## Cert-manager + +A Spaces deployment uses the [Certificate Custom Resource] from cert-manager to +provision certificates within the Space. This establishes a nice API boundary +between what your platform may need and the Certificate requirements of a +Space. + + +In the event you would like more control over the issuing Certificate Authority +for your deployment or the deployment of cert-manager itself, this guide is for +you. + + +### Deploying + +An Upbound Space deployment doesn't have any special requirements for the +cert-manager deployment itself. The only expectation is that cert-manager and +the corresponding Custom Resources exist in the cluster. + +You should be free to install cert-manager in the cluster in any way that makes +sense for your organization. You can find some [installation ideas] in the +cert-manager docs. + +### Issuers + +A default Upbound Space install includes a [ClusterIssuer]. This `ClusterIssuer` +is a `selfSigned` issuer that other certificates are minted from. You have a +couple of options available to you for changing the default deployment of the +Issuer: +1. Changing the issuer name. +2. Providing your own ClusterIssuer. + + +#### Changing the issuer name + +The `ClusterIssuer` name is controlled by the `certificates.space.clusterIssuer` +Helm property. You can adjust this during installation by providing the +following parameter (assuming your new name is 'SpaceClusterIssuer'): +```shell +--set "certificates.space.clusterIssuer=SpaceClusterIssuer" +``` + + + +#### Providing your own ClusterIssuer + +To provide your own `ClusterIssuer`, you need to first setup your own +`ClusterIssuer` in the cluster. The cert-manager docs have a variety of options +for providing your own. See the [Issuer Configuration] docs for more details. + +Once you have your own `ClusterIssuer` set up in the cluster, you need to turn +off the deployment of the `ClusterIssuer` included in the Spaces deployment. +To do that, provide the following parameter during installation: +```shell +--set "certificates.provision=false" +``` + +###### Considerations +If your `ClusterIssuer` has a name that's different from the default name that +the Spaces installation expects ('spaces-selfsigned'), you need to also specify +your `ClusterIssuer` name during install using: +```shell +--set "certificates.space.clusterIssuer=" +``` + +## Ingress + +To route requests from an external client (kubectl, ArgoCD, etc) to a +control plane, a Spaces deployment includes a default [Ingress] manifest. In +order to ease getting started scenarios, the current `Ingress` includes +configurations (properties and annotations) that assume that you installed the +commonly used [ingress-nginx ingress controller] in the cluster. This section +walks you through using a different `Ingress`, if that's something that your +organization needs. + +### Default manifest + +An example of what the current `Ingress` manifest included in a Spaces install +is below: + +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: mxe-router-ingress + namespace: upbound-system + annotations: + nginx.ingress.kubernetes.io/use-regex: "true" + nginx.ingress.kubernetes.io/ssl-redirect: "false" + nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" + nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" + nginx.ingress.kubernetes.io/proxy-request-buffering: "off" + nginx.ingress.kubernetes.io/proxy-body-size: "0" + nginx.ingress.kubernetes.io/proxy-http-version: "1.1" + nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + nginx.ingress.kubernetes.io/proxy-ssl-verify: "on" + nginx.ingress.kubernetes.io/proxy-ssl-secret: "upbound-system/mxp-hostcluster-certs" + nginx.ingress.kubernetes.io/proxy-ssl-name: spaces-router + nginx.ingress.kubernetes.io/configuration-snippet: | + more_set_headers "X-Request-Id: $req_id"; + more_set_headers "Request-Id: $req_id"; + more_set_headers "Audit-Id: $req_id"; +spec: + ingressClassName: nginx + tls: + - hosts: + - {{ .Values.ingress.host }} + secretName: mxe-router-tls + rules: + - host: {{ .Values.ingress.host }} + http: + paths: + - path: "/v1/controlPlanes" + pathType: Prefix + backend: + service: + name: spaces-router + port: + name: http +``` + +The notable pieces are: +1. Namespace + + + +This property represents the namespace that the spaces-router is deployed to. +In most cases this is `upbound-system`. + + + +2. proxy-ssl-* annotations + +The spaces-router pod terminates TLS using certificates located in the +mxp-hostcluster-certs `Secret` located in the `upbound-system` `Namespace`. + +3. proxy-* annotations + +Requests coming into the ingress-controller can be variable depending on what +the client is requesting. For example, `kubectl get crds` has different +requirements for the connection compared to a 'watch', for example +`kubectl get pods -w`. The ingress-controller is configured to be able to +account for either scenario. + + +4. configuration-snippets + +These commands add headers to the incoming requests that help with telemetry +and diagnosing problems within the system. + +5. Rules + +Requests coming into the control planes use a `/v1/controlPlanes` prefix and +need to be routed to the spaces-router. + + +### Using a different ingress manifest + +Operators can choose to use an `Ingress` manifest and ingress controller that +makes the most sense for their organization. If they want to turn off deploying +the default `Ingress` manifest, they can do so during installation by providing +the following parameter during installation: +```shell +--set ".Values.ingress.provision=false" +``` + +#### Considerations + + + + + +Operators will need to take into account the following considerations when +disabling the default `Ingress` deployment. + +1. Ensure the custom `Ingress` manifest is placed in the same namespace as the +`spaces-router` pod. +2. Ensure that the ingress is configured to use a `spaces-router` as a secure +backend and that the secret used is the mxp-hostcluster-certs secret. +3. Ensure that the ingress is configured to handle long-lived connections. +4. Ensure that the routing rule sends requests prefixed with +`/v1/controlPlanes` to the `spaces-router` using the `http` port. + + + + + + +[cert-manager]: https://cert-manager.io/ +[Certificate Custom Resource]: https://cert-manager.io/docs/usage/certificate/ +[ClusterIssuer]: https://cert-manager.io/docs/concepts/issuer/ +[ingress-nginx ingress controller]: https://kubernetes.github.io/ingress-nginx/deploy/ +[installation ideas]: https://cert-manager.io/docs/installation/ +[Ingress]: https://kubernetes.io/docs/concepts/services-networking/ingress/ +[Issuer Configuration]: https://cert-manager.io/docs/configuration/ +[official-provider-families]: /manuals/packages/providers/provider-families +[aws-eks]: https://aws.amazon.com/eks/ +[google-cloud-gke]: https://cloud.google.com/kubernetes-engine +[microsoft-aks]: https://azure.microsoft.com/en-us/products/kubernetes-service +[upbound-account]: https://www.upbound.io/register/?utm_source=docs&utm_medium=cta&utm_campaign=docs_spaces +[provider-aws-ec2]: https://marketplace.upbound.io/providers/upbound/provider-aws-ec2 +[provider-aws-s3]: https://marketplace.upbound.io/providers/upbound/provider-aws-s3 +[provider-aws-iam]: https://marketplace.upbound.io/providers/upbound/provider-aws-iam +[azure-limits]: https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/request-limits-and-throttling +[aws-ec2-limits]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/throttling.html#throttling-limits-rate-based +[upjet]: https://github.com/upbound/upjet diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/dr.md b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/dr.md new file mode 100644 index 000000000..67ecbfecf --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/dr.md @@ -0,0 +1,412 @@ +--- +title: Disaster Recovery +sidebar_position: 13 +description: Configure Space-wide backups for disaster recovery. +--- + +:::info API Version Information +This guide applies to **Spaces v1.9.0 and later** (Self-Hosted only). Space Backups is a alpha feature enabled by default starting in v1.14.0. + +- **v1.9.0-v1.13.x**: Alpha feature (requires manual enablement) +- **v1.14.0+**: GA (enabled by default) + +For version-specific features and backup resources, see the . control-plane backups, see [Backup and Restore](../backup-and-restore.md). +::: + +:::important +For Connected and Disconnected Spaces, this feature requires Spaces `v1.9.0` and, starting with `v1.14.0`, Spaces enables it by default. + +To enable it on versions earlier than `v1.14.0`, set features.alpha.spaceBackup.enabled=true when you install Spaces. + +```bash +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "features.alpha.spaceBackup.enabled=true" +``` +::: + +Upbound's _Space Backups_ is a built-in Space-wide backup and restore feature. This guide explains how to configure Space Backups and how to restore from one of them in case of disaster recovery. + +This feature is meant for Space administrators. Group or Control Plane users can leverage [Shared Backups][shared-backups] to backup and restore their ControlPlanes. + +## Benefits +The Space Backups feature provides the following benefits: + +* Automatic backups for all resources in a Space and all resources in control planes, without any operational overhead. +* Backup schedules. +* Selectors to specify resources to backup. + +## Prerequisites + +Enabled the Space Backups feature in the Space: + +- Cloud Spaces: Not accessible to users. +- Connected Spaces: Space administrator must enable this feature. +- Disconnected Spaces: Space administrator must enable this feature. + +## Configure a Space Backup Config + +[SpaceBackupConfig][spacebackupconfig] is a cluster-scoped resource. This resource configures the storage details and provider. Whenever a backup executes (either by schedule or manually initiated), it references a SpaceBackupConfig to tell it where store the snapshot. + + +### Backup config provider + + +The `spec.objectStorage.provider` and `spec.objectStorage.config` fields configures: + +* The object storage provider +* The path to the provider +* The credentials needed to communicate with the provider + +You can only set one provider. Upbound currently supports AWS, Azure, and GCP as providers. + + +`spec.objectStorage.config` is a freeform map of configuration options for the object storage provider. See [Thanos object storage][thanos-object-storage] for more information on the formats for each supported cloud provider. `spec.bucket` and `spec.provider` overrides the required values in the config. + + +#### AWS as a storage provider + +This example demonstrates how to use AWS as a storage provider for your backups: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupConfig +metadata: + name: default +spec: + objectStorage: + provider: AWS + bucket: spaces-backup-bucket + config: + endpoint: s3.eu-west-2.amazonaws.com + region: eu-west-2 + credentials: + source: Secret + secretRef: + name: bucket-creds + namespace: upbound-system + key: creds +``` + +This example assumes you've already created an S3 bucket called +`spaces-backup-bucket` in the `eu-west-2` AWS region. To access the bucket, +define the account credentials as a Secret in the specified Namespace +(`upbound-system` in this example). + +#### Azure as a storage provider + +This example demonstrates how to use Azure as a storage provider for your backups: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupConfig +metadata: + name: default + namespace: default +spec: + objectStorage: + provider: Azure + bucket: upbound-backups + config: + storage_account: upbackupstore + container: upbound-backups + endpoint: blob.core.windows.net + credentials: + source: Secret + secretRef: + name: bucket-creds + namespace: upbound-system + key: creds +``` + + +This example assumes you've already created an Azure storage account called +`upbackupstore` and blob `upbound-backups`. To access the blob, +define the account credentials as a Secret in the specified Namespace +(`upbound-system` in this example). + + +#### GCP as a storage provider + +This example demonstrates how to use Google Cloud Storage as a storage provider for your backups: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupConfig +metadata: + name: default + namespace: default +spec: + objectStorage: + provider: GCP + bucket: spaces-backup-bucket + credentials: + source: Secret + secretRef: + name: bucket-creds + namespace: upbound-system + key: creds +``` + + +This example assumes you've already created a Cloud bucket called +"spaces-backup-bucket" and a service account with access to this bucket. Define the key file as a Secret in the specified Namespace +(`upbound-system` in this example). + + +## Configure a Space Backup Schedule + + +[SpaceBackupSchedule][spacebackupschedule] is a cluster-scoped resource. This resource defines a backup schedule for the whole Space. + +Below is an example of a Space Backup Schedule running every day. It backs up all groups having `environment: production` labels and all control planes in those groups having `backup: please` labels. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupSchedule +metadata: + name: daily-schedule +spec: + schedule: "@daily" + configRef: + kind: SpaceBackupConfig + name: default + match: + groups: + labelSelectors: + - matchLabels: + environment: production + controlPlanes: + labelSelectors: + - matchLabels: + backup: please +``` + +### Define a schedule + +The `spec.schedule` field is a [Cron-formatted][cron-formatted] string. Some common examples are below: + +| Entry | Description | +| ----------------- | ------------------------------------------------------------------------------------------------- | +| `@hourly` | Run once an hour. | +| `@daily` | Run once a day. | +| `@weekly` | Run once a week. | +| `0 0/4 * * *` | Run every 4 hours. | +| `0/15 * * * 1-5` | Run every fifteenth minute on Monday through Friday. | +| `@every 1h30m10s` | Run every 1 hour, 30 minutes, and 10 seconds. Hour is the largest measurement of time for @every. | + +### Suspend a schedule + +Use `spec.suspend` field to suspend the schedule. It creates no new backups, but allows running backups to complete. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupSchedule +metadata: + name: daily-schedule +spec: + suspend: true +... +``` + +### Garbage collect backups when the schedule gets deleted + +Set the `spec.useOwnerReferencesInBackup` to garbage collect associated `SpaceBackup` when a `SpaceBackupSchedule` gets deleted. If set to true, backups are garbage collected when the schedule gets deleted. + +### Set the time to live + +Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. + +The time to live is a duration, for example, `168h` for 7 days. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupSchedule +metadata: + name: daily-schedule +spec: + ttl: 168h # Backup is garbage collected after 7 days +... +``` + +## Selecting space resources to backup + +By default, a SpaceBackup selects all groups and, for each of them, all control planes, secrets, and any other group-scoped resources. + +By setting `spec.match`, you can include only specific groups, control planes, secrets, or other Space resources in the backup. + +By setting `spec.exclude`, you can filter out some matched Space API resources from the backup. + +### Including space resources in a backup + +Different fields are available to include resources based on labels or names: +- `spec.match.groups` to include only some groups in the backup. +- `spec.match.controlPlanes` to include only some control planes in the backup. +- `spec.match.secrets` to include only some secrets in the backup. +- `spec.match.extras` to include only some extra resources in the backup. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackup +metadata: + name: my-backup +spec: + configRef: + kind: SpaceBackupConfig + name: default + match: + groups: + labelSelectors: + - matchLabels: + environment: production + controlPlanes: + labelSelectors: + - matchLabels: + backup: please + secrets: + names: + - my-secret + extras: + - apiGroup: "spaces.upbound.io" + kind: "SharedBackupConfig" + names: + - my-shared-backup +``` + +### Excluding Space resources from the backup + +Use the `spec.exclude` field to exclude matched Space API resources from the backup. + +Different fields are available to exclude resources based on labels or names: +- `spec.exclude.groups` to exclude some groups from the backup. +- `spec.exclude.controlPlanes` to exclude some control planes from the backup. +- `spec.exclude.secrets` to exclude some secrets from the backup. +- `spec.exclude.extras` to exclude some extra resources from the backup. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackup +metadata: + name: my-backup +spec: + ttl: 168h # Backup is garbage collected after 7 days + configRef: + kind: SpaceBackupConfig + name: default + match: + groups: + labelSelectors: + - matchLabels: + environment: production + exclude: + groups: + names: + - not-this-one-please +``` + +### Exclude resources in control planes' backups + +By default, it backs up all resources in a selected control plane. + +Use the `spec.controlPlaneBackups.excludedResources` field to exclude resources from control planes' backups. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackup +metadata: + name: my-backup +spec: + ttl: 168h # Backup is garbage collected after 7 days + configRef: + kind: SpaceBackupConfig + name: default + controlPlaneBackups: + excludedResources: + - secrets + - buckets.s3.aws.upbound.io +``` + +## Create a manual backup + +[SpaceBackup][spacebackup] is a cluster-scoped resource that causes a single backup to occur for the whole Space. + +Below is an example of a manual SpaceBackup: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackup +metadata: + name: my-backup +spec: + configRef: + kind: SpaceBackupConfig + name: default + deletionPolicy: Delete +``` + + +The backup specification `DeletionPolicy` defines backup deletion actions, +including the deletion of the backup file from the bucket. The `Deletion Policy` +value defaults to `Orphan`. Set it to `Delete` to remove uploaded files +in the bucket. +For more information on the backup and restore process, review the [Spaces API documentation][spaces-api-documentation]. + +### Set the time to live + +Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackup +metadata: + name: my-backup +spec: + ttl: 168h # Backup is garbage collected after 7 days +... +``` + +## Restore from a space backup + +Space Backup and Restore focuses only on disaster recovery. The restore procedure assumes a new Space installation with no existing resources. The restore procedure is idempotent, so you can run it multiple times without any side effects in case of failures. + +To restore a Space from an existing Space Backup, follow these steps: + +1. Install Spaces from scratch as needed. +2. Create a `SpaceBackupConfig` as needed to access the SpaceBackup from the object storage, for example named `my-backup-config`. +3. Select the backup you want to restore from, for example `my-backup`. +4. Run the following command to restore the Space: + +```shell +export SPACE_BACKUP_CONFIG=my-backup-config +export SPACE_BACKUP=my-backup +kubectl exec -ti -n upbound-system deployments/spaces-controller -c spaces -- hyperspace restore $SPACE_BACKUP $SPACE_BACKUP_CONFIG +``` + +### Restore specific control planes + +:::important +This feature is available from Spaces v1.11. +::: + +Instead of restoring the whole Space, you can choose to restore specific control planes +from a backup using the `--controlplanes` flag. You can also use +the `--skip-space-restore` flag to skip restoring Space objects. +This allows Spaces admins to restore individual control planes without +needing to restore the entire Space. + +```shell +export SPACE_BACKUP_CONFIG=my-backup-config +export SPACE_BACKUP=my-backup +kubectl exec -ti -n upbound-system deployments/spaces-controller -c spaces +-- hyperspace restore $SPACE_BACKUP $SPACE_BACKUP_CONFIG --controlplanes default/ctp1,default/ctp2 --skip-space-restore +``` + + +[shared-backups]: /spaces/howtos/self-hosted/workload-id/backup-restore-config/ +[spacebackupconfig]: /reference/apis/spaces-api/v1_9 +[thanos-object-storage]: https://thanos.io/tip/thanos/storage.md/ +[spacebackupschedule]: /reference/apis/spaces-api/v1_9 +[cron-formatted]: https://en.wikipedia.org/wiki/Cron +[spacebackup]: /reference/apis/spaces-api/v1_9 +[spaces-api-documentation]: /reference/apis/spaces-api/v1_9 + diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/gitops-with-argocd.md b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/gitops-with-argocd.md new file mode 100644 index 000000000..004247a10 --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/gitops-with-argocd.md @@ -0,0 +1,142 @@ +--- +title: GitOps with ArgoCD in Self-Hosted Spaces +sidebar_position: 80 +description: Set up GitOps workflows with Argo CD in self-hosted Spaces +plan: "business" +--- + +:::info Deployment Model +This guide applies to **self-hosted Spaces** deployments. For Upbound Cloud Spaces, see [GitOps with Upbound Control Planes](/spaces/howtos/cloud-spaces/gitops-on-upbound/). +::: + +GitOps is an approach for managing a system by declaratively describing desired resources' configurations in Git and using controllers to realize the desired state. Upbound's control planes are compatible with this pattern and it's strongly recommended you integrate GitOps in the platforms you build on Upbound. + + +## Integrate with Argo CD + + +[Argo CD][argo-cd] is a project in the Kubernetes ecosystem commonly used for +GitOps. You can use it in tandem with Upbound control planes to achieve GitOps +flows. The sections below explain how to integrate these tools with Upbound. + +### Configure connection secrets for control planes + +You can configure control planes to write their connection details to a secret. +Do this by setting the +[`spec.writeConnectionSecretToRef`][spec-writeconnectionsecrettoref] field in a +control plane manifest. For example: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: ctp1 + namespace: default +spec: + writeConnectionSecretToRef: + name: kubeconfig-ctp1 + namespace: default +``` + + +### Configure Argo CD + + +To configure Argo CD for Annotation resource tracking, edit the Argo CD +ConfigMap in the Argo CD namespace. Add `application.resourceTrackingMethod: +annotation` to the data section as below. + +Next, configure the [auto respect RBAC for the Argo CD +controller][auto-respect-rbac-for-the-argo-cd-controller-1]. By default, Argo CD +attempts to discover some Kubernetes resource types that don't exist in a +control plane. You must configure Argo CD to respect the cluster's RBAC rules so +that Argo CD can sync. Add `resource.respectRBAC: normal` to the data section as +below. + +```bash +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-cm +data: + ... + application.resourceTrackingMethod: annotation + resource.respectRBAC: normal +``` + +:::tip +The `resource.respectRBAC` configuration above tells Argo to respect RBAC for +_all_ cluster contexts. If you're using an Argo CD instance to manage more than +only control planes, you should consider changing the `clusters` string match +for the configuration to apply only to control planes. For example, if every +control plane context name followed the convention of being named +`controlplane-`, you could set the string match to be `controlplane-*` +::: + + +### Create a cluster context definition + + +Once the control plane is ready, extract the following values from the secret +containing the kubeconfig: + +```bash +kubeconfig_content=$(kubectl get secrets kubeconfig-ctp1 -n default -o jsonpath='{.data.kubeconfig}' | base64 -d) +server=$(echo "$kubeconfig_content" | grep 'server:' | awk '{print $2}') +bearer_token=$(echo "$kubeconfig_content" | grep 'token:' | awk '{print $2}') +ca_data=$(echo "$kubeconfig_content" | grep 'certificate-authority-data:' | awk '{print $2}') +``` + +Generate a new secret in the cluster where you installed Argo, using the prior +values extracted: + +```yaml +cat < + +import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; + + + + +:::important +This feature is only available for select Business Critical customers. You can't +set up your own Managed Space without the assistance of Upbound. If you're +interested in this deployment mode, please [contact us][contact]. +::: + + + +A Managed Space deployed on AWS is a single-tenant deployment of a control plane +space in your AWS organization in an isolated sub-account. With Managed Spaces, +you can use the same API, CLI, and Console that Upbound offers, with the benefit +of running entirely in a cloud account that you own and Upbound manages for you. + +The following guide walks you through setting up a Managed Space in your AWS +organization. If you have any questions while working through this guide, +contact your Upbound Account Representative for help. + + + + + +A Managed Space deployed on GCP is a single-tenant deployment of a control plane +space in your GCP organization in an isolated project. With Managed Spaces, you +can use the same API, CLI, and Console that Upbound offers, with the benefit of +running entirely in a cloud account that you own and Upbound manages for you. + +The following guide walks you through setting up a Managed Space in your GCP +organization. If you have any questions while working through this guide, +contact your Upbound Account Representative for help. + + + + +## Managed Space on your cloud architecture + + + +A Managed Space is a deployment of the Upbound Spaces software inside an +Upbound-controlled sub-account in your AWS cloud environment. The Spaces +software runs in this sub-account, orchestrated by Kubernetes. Backups and +billing data get stored inside bucket or blob storage in the same sub-account. +The control planes deployed and controlled by the Spaces software runs on the +Kubernetes cluster which gets deployed into the sub-account. + +The diagram below illustrates the high-level architecture of Upbound Managed Spaces: + +![Upbound Managed Spaces arch](/img/managed-arch-aws.png) + +The Spaces software gets deployed on an EKS Cluster in the region of your +choice. This EKS cluster is where your control planes are ultimately run. +Upbound also deploys buckets, 1 for the collection of the billing data and 1 for +control plane backups. + +Upbound doesn't have access to other sub-accounts nor your organization-level +settings in your cloud environment. Outside of your cloud organization, Upbound +runs the Upbound Console, which includes the Upbound API and web application, +including the dashboard you see at `console.upbound.io`. By default, all +connections are encrypted, but public. Optionally, you also have the option to +use private network connectivity through [AWS PrivateLink][aws-privatelink]. + + + + + + +A Managed Space is a deployment of the Upbound Spaces software inside an +Upbound-controlled project in your GCP cloud environment. The Spaces software +runs in this project, orchestrated by Kubernetes. Backups and billing data get +stored inside bucket or blob storage in the same project. The control planes +deployed and controlled by the Spaces software runs on the Kubernetes cluster +which gets deployed into the project. + +The diagram below illustrates the high-level architecture of Upbound Managed Spaces: + +![Upbound Managed Spaces arch](/img/managed-arch-gcp.png) + +The Spaces software gets deployed on a GKE Cluster in the region of your choice. +This GKE cluster is where your control planes are ultimately run. Upbound also +deploys cloud buckets, 1 for the collection of the billing data and 1 for +control plane backups. + +Upbound doesn't have access to other projects nor your organization-level +settings in your cloud environment. Outside of your cloud organization, Upbound +runs the Upbound Console, which includes the Upbound API and web application, +including the dashboard you see at `console.upbound.io`. By default, all +connections are encrypted, but public. Optionally, you also have the option to +use private network connectivity through [GCP Private Service +Connect][gcp-private-service-connect]. + + + +## Prerequisites + +- An organization created on Upbound + + + +- You should have a preexisting AWS organization to complete this guide. +- You must create a new AWS sub-account. Read the [AWS documentation][aws-documentation] to learn how to create a new sub-account in an existing organization on AWS. + +After the sub-account information gets provided to Upbound, **don't change it +any further.** Any changes made to the sub-account or the resources created by +Upbound for the purposes of the Managed Space deployments voids the SLA you have +with Upbound. If you want to make configuration changes, contact your Upbound +Solutions Architect. + + + + + +- You should have a preexisting GCP organization with an active Cloud Billing account to complete this guide. +- You must create a new GCP project. Read the [GCP documentation][gcp-documentation] to learn how to create a new project in an existing organization on GCP. + +After the project information gets provided to Upbound, **don't change it any +further.** Any changes made to the project or the resources created by Upbound +for the purposes of the Managed Space deployments voids the SLA you have with +Upbound. If you want to make configuration changes, contact your Upbound +Solutions Architect. + + + + + +## Set up cross-account management + +Upbound supports using AWS Key Management Service with cross-account IAM +permissions. This enables the isolation of keys so the infrastructure operated +by Upbound has limited access to symmetric keys. + +In the KMS key's account, apply the baseline key policy: + +```json +{ + "Sid": "Allow Upbound to use this key", + "Effect": "Allow", + "Principal": { + "AWS": ["[Managed Space sub-account ID]"] + }, + "Action": ["kms:Encrypt", "kms:Decrypt", "kms:ReEncrypt*", "kms:GenerateDataKey*", "kms:DescribeKey"], + "Resource": "*" +} +``` + +You need another key policy to let the sub-account create persistent resources +with the KMS key: + +```json +{ + "Sid": "Allow attachment of persistent resources for an Upbound Managed Space", + "Effect": "Allow", + "Principal": { + "AWS": "[Managed Space sub-account ID]" + }, + "Action": ["kms:CreateGrant", "kms:ListGrants", "kms:RevokeGrant"], + "Resource": "*", + "Condition": { + "Bool": { + "kms:GrantIsForAWSResource": "true" + } + } +} +``` + +### Configure PrivateLink + +By default, all connections to the Upbound Console are encrypted, but public. +AWS PrivateLink is a feature that allows VPC peering whereby your traffic +doesn't traverse the public internet. To have this configured, contact your +Upbound Account Representative. + + + + + +## Enable APIs + +Enable the following APIs in the new project: + +- Kubernetes Engine API +- Cloud Resource Manager API +- Compute Engine API +- Cloud DNS API + +:::tip +Read how to enable APIs in a GCP project [here][here]. +::: + +## Create a service account + +Create a service account in the new project. Name the service account, +upbound-sa. Give the service account the following roles: + +- Compute Admin +- Project IAM Admin +- Service Account Admin +- DNS Administrator +- Editor + +Select the service account you just created. Select keys. Add a new key and +select JSON. The key gets downloaded to your machine. Save this for later. + +## Create a DNS Zone + +Create a DNS Zone, set the **Zone type** to `Public`. + +### Configure Private Service Connect + +By default, all connections to the Upbound Console are encrypted, but public. +GCP Private Service Connect is a feature that allows VPC peering whereby your +traffic doesn't traverse the public internet. To have this configured, contact +your Upbound Account Representative. + + + +## Provide information to Upbound + +Once these policies get attached to the key, tell your Upbound Account +Representative, providing them the following: + + + +- the full ARN of the KMS key. +- the name of the organization that you created in Upbound. Use the up CLI command, `up org list`, so see this information. +- Confirmation of which region in AWS you want the deployment to target. + + + + + +- The service account JSON key +- The NS records associated with the DNS name created in the last step. +- the name of the organization that you created in Upbound. Use the up CLI command, `up org list`, so see this information. +- Confirmation of which region in GCP you want the deployment to target. + + + +Once Upbound has this information, the request gets processed in a business day. + +## Use your Managed Space + +Once the Managed Space gets deployed, you can see it in the Space selector when browsing your environment on [`console.upbound.io`][console-upbound-io]. + + + + +[contact]: https://www.upbound.io/contact-us +[aws-privatelink]: #configure-privatelink +[aws-documentation]: https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_create.html#orgs_manage_accounts_create-new +[gcp-private-service-connect]: #configure-private-service-connect +[gcp-documentation]: https://cloud.google.com/resource-manager/docs/creating-managing-organization +[here]: https://cloud.google.com/apis/docs/getting-started#enabling_apis +[console-upbound-io]: https://console.upbound.io/ diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/oidc-configuration.md b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/oidc-configuration.md new file mode 100644 index 000000000..cbef4dc42 --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/oidc-configuration.md @@ -0,0 +1,289 @@ +--- +title: Configure OIDC +sidebar_position: 20 +description: Configure OIDC in your Space +--- +:::important +This guide is only applicable for administrators who've deployed self-hosted Spaces. general RBAC in Upbound, read [Upbound RBAC][upbound-rbac]. +::: + +Upbound uses the Kubernetes [Structured Authentication Configuration][structured-auth-config] to validate OIDC tokens sent to the API. Upbound stores this +configuration as a `ConfigMap` and authenticates with the Upbound router +component during installation with Helm. + +This guide walks you through how to create and apply an authentication +configuration to validate Upbound with an external identity provider. Each +section focuses on a specific part of the configuration file. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. + +For details on authentication and access control across versions, see the . related platform authentication features, see the [Platform manual](../../../../platform/). +::: + +## Creating the `AuthenticationConfiguration` file + +First, create a file called `config.yaml` with an `AuthenticationConfiguration` +kind. The `AuthenticationConfiguration` is the initial authentication structure +necessary for Upbound to communicate with your chosen identity provider. + +```yaml +apiVersion: apiserver.config.k8s.io/v1beta1 +kind: AuthenticationConfiguration +jwt: +- issuer: + url: oidc-issuer-url + audiences: + - oidc-client-id + claimMappings: # optional + username: + claim: oidc-username-claim + prefix: oidc-username-prefix + groups: + claim: oidc-groups-claim + prefix: oidc-groups-prefix +``` + + +For detailed configuration options, including the CEL-based token validation, +review the feature [documentation][structured-auth-config]. + + +The `AuthenticationConfiguration` allows you to configure multiple JWT +authenticators as separate issuers. + +### Configure an issuer + +The `jwt` array requires an `issuer` specification and typically contains: + +- A `username` claim mapping +- A `groups` claim mapping +Optionally, the configuration may also include: +- A set of claim validation rules +- A set of user validation rules + +The `issuer` URL must be unique across all configured authenticators. + +```yaml +issuer: + url: https://example.com + discoveryUrl: https://discovery.example.com/.well-known/openid-configuration + certificateAuthority: |- + + audiences: + - client-id-a + - client-id-b + audienceMatchPolicy: MatchAny +``` + +By default, the authenticator assumes the OIDC Discovery URL is +`{issuer.url}/.well-known/openid-configuration`. Most identity providers follow +this structure, and you can omit the `discoveryUrl` field. To use a separate +discovery service, specify the full path to the discovery endpoint in this +field. + +If the CA for the Issuer isn't public, provide the PEM encoded CA for the Discovery URL. + +At least one of the `audiences` entries must match the `aud` claim in the JWT. +For OIDC tokens, this is the Client ID of the application attempting to access +the Upbound API. Having multiple values set allows the same configuration to +apply to multiple client applications, for example the `kubectl` CLI and an +Internal Developer Portal. + +If you specify multiple `audiences` , `audienceMatchPolicy` must equal `MatchAny`. + +### Configure `claimMappings` + +#### Username claim mapping + +By default, the authenticator uses the `sub` claim as the user name. To override this, either: + +- specify *both* `claim` and `prefix`. `prefix` may be explicitly set to the empty string. +or + +- specify a CEL `expression` to calculate the user name. + +```yaml +claimMappings: + username: + claim: "sub" + prefix: "keycloak" + # + expression: 'claims.username + ":external-user"' +``` + + +#### Groups claim mapping + +By default, this configuration doesn't map groups, unless you either: + +- specify both `claim` and `prefix`. `prefix` may be explicitly set to the empty string. +or + +- specify a CEL `expression` that returns a string or list of strings. + + +```yaml +claimMappings: + groups: + claim: "groups" + prefix: "" + # + expression: 'claims.roles.split(",")' +``` + + +### Validation rules + + +Validation rules are outside the scope of this document. Review the +[documentation][structured-auth-config] for more information. Examples include +using CEL expressions to validate authentication such as: + + +- Validating that a token claim has a specific value +- Validating that a token has a limited lifetime +- Ensuring usernames and groups don't contain reserved prefixes + +## Required claims + +To interact with Space and ControlPlane APIs, users must have the `upbound.io/aud` claim set to one of the following: + +| Upbound.io Audience | Notes | +| -------------------------------------------------------- | -------------------------------------------------------------------- | +| `[]` | No Access to Space-level or ControlPlane APIs | +| `['upbound:spaces:api']` | This Identity is only for Space-level APIs | +| `['upbound:spaces:controlplanes']` | This Identity is only for ControlPlane APIs | +| `['upbound:spaces:api', 'upbound:spaces:controlplanes']` | This Identity is for both Space-level and ControlPlane APIs | + + +You can set this claim in two ways: + +- In the identity provider mapped in the ID token. +- Inject in the authenticator with the `jwt.claimMappings.extra` array. + +For example: +```yaml +apiVersion: apiserver.config.k8s.io/v1beta1 +kind: AuthenticationConfiguration +jwt: +- issuer: + url: https://keycloak:8443/realms/master + certificateAuthority: |- + + audiences: + - master-realm + audienceMatchPolicy: MatchAny + claimMappings: + username: + claim: "preferred_username" + prefix: "keycloak:" + groups: + claim: "groups" + prefix: "" + extra: + - key: 'upbound.io/aud' + valueExpression: "['upbound:spaces:controlplanes', 'upbound:spaces:api']" +``` + +## Install the `AuthenticationConfiguration` + +Once you create an `AuthenticationConfiguration` file, specify this file as a +`ConfigMap` in the host cluster for the Upbound Space. + +```sh +kubectl create configmap -n upbound-system --from-file=config.yaml=./path/to/config.yaml +``` + + +To enable OIDC authentication and disable Upbound IAM when installing the Space, +reference the configuration and pass an empty value to the Upbound IAM issuer +parameter: + + +```sh +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "authentication.structuredConfig=" \ + --set "router.controlPlane.extraArgs[0]=--upbound-iam-issuer-url=" +``` + +## Configure RBAC + + +In this scenario, the external identity provider handles authentication, but +permissions for Spaces and ControlPlane APIs use standard RBAC objects. + +### Spaces APIs + +The Spaces APIs include: +```yaml +- apiGroups: + - spaces.upbound.io + resources: + - controlplanes + - sharedexternalsecrets + - sharedsecretstores + - backups + - backupschedules + - sharedbackups + - sharedbackupconfigs + - sharedbackupschedules +- apiGroups: + - observability.spaces.upbound.io + resources: + - sharedtelemetryconfigs +``` + +### ControlPlane APIs + + + +Crossplane specifies three [roles][crossplane-managed-clusterroles] for a +ControlPlane: admin, editor, and viewer. These map to the verbs `admin`, `edit`, +and `view` on the `controlplanes/k8s` resource in the `spaces.upbound.io` API +group. + + +### Control access + +The `groups` claim in the `AuthenticationConfiguration` allows you to control +resource access when you create a `ClusterRoleBinding`. A `ClusterRole` defines +the role parameters and a `ClusterRoleBinding` subject. + +The example below allows `admin` permissions for all ControlPlanes to members of +the `ctp-admins` group: + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: allow-ctp-admin +rules: +- apiGroups: + - spaces.upbound.io + resources: + - controlplanes/k8s + verbs: + - admin +``` + +ctp-admins ClusterRoleBinding +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: allow-ctp-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: allow-ctp-admin +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: Group + name: ctp-admins +``` + +[structured-auth-config]: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#using-authentication-configuration +[crossplane-managed-clusterroles]: https://github.com/crossplane/crossplane/blob/master/design/design-doc-rbac-manager.md#managed-rbac-clusterroles +[upbound-rbac]: /manuals/platform/concepts/authorization/upbound-rbac diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/proxies-config.md b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/proxies-config.md new file mode 100644 index 000000000..3802e4cb0 --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/proxies-config.md @@ -0,0 +1,31 @@ +--- +title: Proxied configuration +sidebar_position: 20 +description: Configure Upbound within a proxied environment +--- + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments in proxied environments. Proxy configuration procedures are consistent across versions. + +For version-specific deployment considerations, see the . +::: + + + +When you install Upbound with Helm in a proxied environment, please update the specified registry with your internal registry. + + + +```bash +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "authentication.hubIdentities=true" \ + --set "authorization.hubRBAC=true" \ + --set "registry=registry.company.corp/spaces" \ + --set "controlPlanes.uxp.registryOverride=registry.company.corp/xpkg.upbound.io" \ + --set "controlPlanes.uxp.repository=registry.company.corp/spaces" \ + --wait +``` diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/query-api.md b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/query-api.md new file mode 100644 index 000000000..c112e9001 --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/query-api.md @@ -0,0 +1,396 @@ +--- +title: Deploy Query API infrastructure +weight: 130 +description: Query API +aliases: + - /all-spaces/self-hosted-spaces/query-api + - /self-hosted-spaces/query-api + - all-spaces/self-hosted-spaces/query-api +--- + + + + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). Query API has evolved across versions: + +- **Cloud Spaces**: Available since v1.6 (enabled by default) +- **Self-Hosted**: Available since v1.8 (requires manual enablement) + +For details on Query API availability across versions, see the . +::: + +:::important + +This feature is in preview. The Query API is available in the Cloud Space offering in `v1.6` and enabled by default. + +This is a requirement to be able to connect a Space since `v1.8.0`, and is off by default, see below to enable it. + +::: + +Upbound's Query API allows users to inspect objects and resources within their control planes. The read-only `up alpha query` and `up alpha get` CLI commands allow you to gather information on your control planes in a fast and efficient package. These commands follow the [`kubectl` conventions][kubectl-conventions] for filtering, sorting, and retrieving information from your Space. + +Query API requires a PostgreSQL database to store the data. You can use the default PostgreSQL instance provided by Upbound or bring your own PostgreSQL instance. + +## Managed setup + +:::tip +If you don't have specific requirements for your setup, Upbound recommends following this approach. +::: + +To enable this feature, set `features.alpha.apollo.enabled=true` and `apollo.apollo.storage.postgres.create=true` when installing Spaces. + +However, you need to install CloudNativePG (`CNPG`) to provide the PostgreSQL instance. You can let the `up` CLI do this for you, or install it manually. + +For more customization, see the [Helm chart reference][helm-chart-reference]. You can modify the number +of PostgreSQL instances, pooling instances, storage size, and more. + +If you have specific requirements not addressed in the Helm chart, see below for more information on how to bring your own [PostgreSQL setup][postgresql-setup]. + +### Using the up CLI + +Before you begin, make sure you have the most recent version of the [`up` CLI installed][up-cli-installed]. + +To enable this feature, set `features.alpha.apollo.enabled=true` and `apollo.apollo.storage.postgres.create=true` when installing Spaces: + +```bash +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "features.alpha.apollo.enabled=true" \ + --set "apollo.apollo.storage.postgres.create=true" +``` + +`up space init` and `up space upgrade` install CloudNativePG automatically, if needed. + +### Helm chart + +If you are installing the Helm chart in some other way, you can manually install CloudNativePG in one of the [supported ways][supported-ways], for example: + +```shell +kubectl apply --server-side -f \ + https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.24.1/cnpg-1.24.1.yaml +kubectl rollout status -n cnpg-system deployment cnpg-controller-manager -w --timeout 120s +``` + +Next, install the Spaces Helm chart with the necessary values, for example: + +```shell +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + ... + --set "features.alpha.apollo.enabled=true" \ + --set "apollo.apollo.storage.postgres.create=true" \ + --wait +``` + +## Self-hosted PostgreSQL configuration + + +If your workflow requires more customization, you can provide your own +PostgreSQL instance and configure credentials manually. + +Using your own PostgreSQL instance requires careful architecture consideration. +Review the architecture and requirements guidelines. + +### Architecture + +The Query API architecture uses three components, other than a PostgreSQL database: +* **Apollo Syncers**: Watching `ETCD` for changes and syncing them to PostgreSQL. One, or more, per control plane. +* **Apollo Server**: Serving the Query API out of the data in PostgreSQL. One, or more, per Space. + +The default setup also uses the `PgBouncer` connection pooler to manage connections from the syncers. +```mermaid +graph LR + User[User] + + subgraph Cluster["Cluster (Spaces)"] + direction TB + Apollo[apollo] + + subgraph ControlPlanes["Control Planes"] + APIServer[API Server] + Syncer[apollo-syncer] + end + end + + PostgreSQL[(PostgreSQL)] + + User -->|requests| Apollo + + Apollo -->|connects| PostgreSQL + Apollo -->|creates schemas & users| PostgreSQL + + Syncer -->|watches| APIServer + Syncer -->|writes| PostgreSQL + + PostgreSQL -->|data| Apollo + + style PostgreSQL fill:#e1f5ff,stroke:#333,stroke-width:2px,color:#000 + style Apollo fill:#ffe1e1,stroke:#333,stroke-width:2px,color:#000 + style Cluster fill:#f0f0f0,stroke:#333,stroke-width:2px,color:#000 + style ControlPlanes fill:#fff,stroke:#666,stroke-width:1px,stroke-dasharray: 5 5,color:#000 +``` + + +Each component needs to connect to the PostgreSQL database. + +In the event of database issues, you can provide a new database and the syncers +automatically repopulate the data. + +### Requirements + +* A PostgreSQL 16 instance or cluster. +* A database, for example named `upbound`. +* **Optional**: A dedicated user for the Apollo Syncers, otherwise the Spaces Controller generates a dedicated set of credentials per syncer with the necessary permissions, for example named `syncer`. +* A dedicated **superuser or admin account** for the Apollo Server. +* **Optional**: A connection pooler, like PgBouncer, to manage connections from the Apollo Syncers. If you didn't provide the optional users, you might have to configure the pooler to allow users to connect using the same credentials as PostgreSQL. +* **Optional**: A read replica for the Apollo Syncers to connect to, to reduce load on the primary database, this might cause a slight delay in the data being available through the Query API. + +Below you can find examples of setups to get you started, you can mix and match the examples to suit your needs. + +### In-cluster setup + +:::tip + +If you don't have strong opinions on your setup, but still want full control on +the resources created for some unsupported customizations, Upbound recommends +the in-cluster setup. + +::: + +For more customization than the managed setup, you can use CloudNativePG for +PostgreSQL in the same cluster. + +For in-cluster setup, manually deploy the operator in one of the [supported ways][supported-ways-1], for example: + +```shell +kubectl apply --server-side -f \ + https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.24.1/cnpg-1.24.1.yaml +kubectl rollout status -n cnpg-system deployment cnpg-controller-manager -w --timeout 120s +``` + +Then create a `Cluster` and `Pooler` in the `upbound-system` namespace, for example: + +```shell +kubectl create ns upbound-system + +kubectl apply -f - < + +### External setup + + +:::tip + +If you want to run your PostgreSQL instance outside the cluster, but are fine with credentials being managed by the `apollo` user, this is the suggested way to proceed. + +::: + +When using this setup, you must manually create the required Secrets in the +`upbound-system` namespace. The `apollo` user must have permissions to create +schemas and users. + +```shell + +kubectl create ns upbound-system + +# A Secret containing the necessary credentials to connect to the PostgreSQL instance +kubectl create secret generic spaces-apollo-pg-app -n upbound-system \ + --from-literal=password=supersecret + +# A Secret containing the necessary CA certificate to verify the connection to the PostgreSQL instance +kubectl create secret generic spaces-apollo-pg-ca -n upbound-system \ + --from-file=ca.crt=/path/to/ca.crt +``` + +Next, install Spaces with the necessary settings: + +```shell +export PG_URL=your-postgres-host:5432 +export PG_POOLED_URL=your-pgbouncer-host:5432 # this could be the same as above + +helm upgrade --install ... \ + --set "features.alpha.apollo.enabled=true" \ + --set "apollo.apollo.storage.postgres.create=false" \ + --set "apollo.apollo.storage.postgres.connection.url=$PG_URL" \ + --set "apollo.apollo.storage.postgres.connection.credentials.secret.name=spaces-apollo-pg-app" \ + --set "apollo.apollo.storage.postgres.connection.credentials.format=basicauth" \ + --set "apollo.apollo.storage.postgres.connection.ca.name=spaces-apollo-pg-ca" \ + --set "apollo.apollo.storage.postgres.connection.syncer.url=$PG_POOLED_URL" +``` + +### External setup with all custom credentials + +For custom credentials with Apollo Syncers or Server, create a new secret in the +`upbound-system` namespace: + +```shell +export APOLLO_SYNCER_USER=syncer +export APOLLO_SERVER_USER=apollo + +kubectl create ns upbound-system + +# A Secret containing the necessary credentials to connect to the PostgreSQL instance +kubectl create secret generic spaces-apollo-pg-app -n upbound-system \ + --from-literal=password=supersecret + +# A Secret containing the necessary CA certificate to verify the connection to the PostgreSQL instance +kubectl create secret generic spaces-apollo-pg-ca -n upbound-system \ + --from-file=ca.crt=/path/to/ca.crt + +# A Secret containing the necessary credentials for the Apollo Syncers to connect to the PostgreSQL instance. +# These will be used by all Syncers in the Space. +kubectl create secret generic spaces-apollo-pg-syncer -n upbound-system \ + --from-literal=username=$APOLLO_SYNCER_USER \ + --from-literal=password=supersecret + +# A Secret containing the necessary credentials for the Apollo Server to connect to the PostgreSQL instance. +kubectl create secret generic spaces-apollo-pg-apollo -n upbound-system \ + --from-literal=username=$APOLLO_SERVER_USER \ + --from-literal=password=supersecret +``` + +Next, install Spaces with the necessary settings: + +```shell +export PG_URL=your-postgres-host:5432 +export PG_POOLED_URL=your-pgbouncer-host:5432 # this could be the same as above + +helm ... \ + --set "features.alpha.apollo.enabled=true" \ + --set "apollo.apollo.storage.postgres.create=false" \ + --set "apollo.apollo.storage.postgres.connection.url=$PG_URL" \ + --set "apollo.apollo.storage.postgres.connection.credentials.secret.name=spaces-apollo-pg-app" \ + --set "apollo.apollo.storage.postgres.connection.credentials.format=basicauth" \ + --set "apollo.apollo.storage.postgres.connection.ca.name=spaces-apollo-pg-ca" \ + --set "apollo.apollo.storage.postgres.connection.syncer.url=$PG_POOLED_URL" \ + + #. the syncers + --set "apollo.apollo.storage.postgres.connection.syncer.credentials.format=basicauth" \ + --set "apollo.apollo.storage.postgres.connection.syncer.credentials.user=$APOLLO_SYNCER_USER" \ + --set "apollo.apollo.storage.postgres.connection.syncer.credentials.secret.name=spaces-apollo-pg-syncer" \ + + #. the server + --set "apollo.apollo.storage.postgres.connection.apollo.credentials.format=basicauth" \ + --set "apollo.apollo.storage.postgres.connection.apollo.credentials.user=$APOLLO_SERVER_USER" \ + --set "apollo.apollo.storage.postgres.connection.apollo.credentials.secret.name=spaces-apollo-pg-apollo" \ + --set "apollo.apollo.storage.postgres.connection.apollo.url=$PG_POOLED_URL" +``` + + +## Using the Query API + + +See the [Query API documentation][query-api-documentation] for more information on how to use the Query API. + + + + +[postgresql-setup]: #self-hosted-postgresql-configuration +[up-cli-installed]: /manuals/cli/overview +[query-api-documentation]: /spaces/howtos/query-api + +[helm-chart-reference]: /reference/helm-reference +[kubectl-conventions]: https://kubernetes.io/docs/reference/kubectl/generated/kubectl_get/ +[supported-ways]: https://cloudnative-pg.io/documentation/current/installation_upgrade/ +[supported-ways-1]: https://cloudnative-pg.io/documentation/current/installation_upgrade/ +[cloudnativepg-documentation]: https://cloudnative-pg.io/documentation/1.24/storage/#configuration-via-a-pvc-template +[postgresql-cluster]: https://cloudnative-pg.io/documentation/1.24/resource_management/ +[pooler]: https://cloudnative-pg.io/documentation/1.24/connection_pooling/#pod-templates +[postgresql-cluster-2]: https://cloudnative-pg.io/documentation/1.24/replication/ +[pooler-3]: https://cloudnative-pg.io/documentation/1.24/connection_pooling/#high-availability-ha +[postgresql-cluster-4]: https://cloudnative-pg.io/documentation/1.24/operator_capability_levels/#override-of-operand-images-through-the-crd +[pooler-5]: https://cloudnative-pg.io/documentation/1.24/connection_pooling/#pod-templates +[cloudnativepg-documentation-6]: https://cloudnative-pg.io/documentation/1.24/postgresql_conf/ diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/scaling-resources.md b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/scaling-resources.md new file mode 100644 index 000000000..7bb04d2c2 --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/scaling-resources.md @@ -0,0 +1,184 @@ +--- +title: Scaling vCluster and etcd Resources +weight: 950 +description: A guide for scaling vCluster and etcd resources in self-hosted Spaces +aliases: + - /all-spaces/self-hosted-spaces/scaling-resources + - /spaces/scaling-resources +--- + +In large workloads or control plane migration, you may performance impacting +resource constraints. This guide explains how to scale vCluster and `etcd` +resources for optimal performance in your self-hosted Space. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Scaling procedures are consistent across versions. + +For version-specific resource requirements and capacity planning, see the . +::: + +## Signs of resource constraints + +You may need to scale your vCluster or `etcd` resources if you observe: + +- API server timeout errors such as `http: Handler timeout` +- Error messages about `too many requests` and requests to `try again later` +- Operations like provider installation failing with errors like `cannot apply provider package secret` +- vCluster pods experiencing continuous restarts +- API performance degrades with high resource volume + + +## Scaling vCluster resources + + +The vCluster component handles Kubernetes API requests for your control planes. +Deployments with multiple control planes or providers may exceed default resource allocations. + +```yaml +# Default settings +controlPlanes.vcluster.resources.limits.cpu: "3000m" +controlPlanes.vcluster.resources.limits.memory: "3960Mi" +controlPlanes.vcluster.resources.requests.cpu: "170m" +controlPlanes.vcluster.resources.requests.memory: "1320Mi" +``` + +For larger workloads, like migrating from an existing control plane with several +providers, increase these resource limits in your Spaces `values.yaml` file. + +```yaml +controlPlanes: + vcluster: + resources: + limits: + cpu: "4000m" # Increase to 4 cores + memory: "6Gi" # Increase to 6GB memory + requests: + cpu: "500m" # Increase baseline CPU request + memory: "2Gi" # Increase baseline memory request +``` + +## Scaling `etcd` storage + +Kubernetes relies on `etcd` performance, which can lead to IOPS (input/output +operations per second) bottlenecks. Upbound allocates `50Gi` volumes for `etcd` +in cloud environments to ensure adequate IOPS performance. + +```yaml +# Default setting +controlPlanes.etcd.persistence.size: "5Gi" +``` + +For production environments or when migrating large control planes, increase +`etcd` volume size and specify an appropriate storage class: + +```yaml +controlPlanes: + etcd: + persistence: + size: "50Gi" # Recommended for production + storageClassName: "fast-ssd" # Use a high-performance storage class +``` + +### Storage class considerations + +For AWS: +- Use GP3 volumes with adequate IOPS +-. AWS GP3 volumes, IOPS scale with volume size (3000 IOPS baseline) +-. optimal performance, provision at least 32Gi to support up to 16,000 IOPS + +For GCP and Azure: +- Use SSD-based persistent disk types for optimal performance +- Consider premium storage options for high-throughput workloads + +## Scaling Crossplane resources + +Crossplane manages provider resources in your control planes. You may need to increase provider resources for larger deployments: + +```yaml +# Default settings +controlPlanes.uxp.resourcesCrossplane.requests.cpu: "370m" +controlPlanes.uxp.resourcesCrossplane.requests.memory: "400Mi" +``` + + +For environments with many providers or managed resources: + + +```yaml +controlPlanes: + uxp: + resourcesCrossplane: + limits: + cpu: "1000m" # Add CPU limit + memory: "1Gi" # Add memory limit + requests: + cpu: "500m" # Increase CPU request + memory: "512Mi" # Increase memory request +``` + +## High availability configuration + +For production environments, enable High Availability mode to ensure resilience: + +```yaml +controlPlanes: + ha: + enabled: true +``` + +## Best practices for migration scenarios + +When migrating from existing control planes into a self-hosted Space: + +1. **Pre-scale resources**: Scale up resources before performing the migration +2. **Monitor resource usage**: Watch resource consumption during and after migration with `kubectl top pods` +3. **Scale incrementally**: If issues persist, increase resources incrementally until performance stabilizes +4. **Consider storage performance**: `etcd` is sensitive to storage I/O performance + +## Helm values configuration + +Apply these settings through your Spaces Helm values file: + +```yaml +controlPlanes: + vcluster: + resources: + limits: + cpu: "4000m" + memory: "6Gi" + requests: + cpu: "500m" + memory: "2Gi" + etcd: + persistence: + size: "50Gi" + storageClassName: "gp3" # Use your cloud provider's fast storage class + uxp: + resourcesCrossplane: + limits: + cpu: "1000m" + memory: "1Gi" + requests: + cpu: "500m" + memory: "512Mi" + ha: + enabled: true #. production environments +``` + +Apply the configuration using Helm: + +```bash +helm upgrade --install spaces oci://xpkg.upbound.io/spaces-artifacts/spaces \ + -f values.yaml \ + -n upbound-system +``` + +## Considerations + +- **Provider count**: Each provider adds resource overhead - consider using provider families to optimize resource usage +- **Managed resources**: The number of managed resources impacts CPU usage more than memory +- **Vertical pod autoscaling**: Consider using vertical pod autoscaling in Kubernetes to automatically adjust resources based on usage +- **Storage performance**: Storage performance is as important as capacity for etcd +- **Network latency**: Low-latency connections between components improve performance + + diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/self-hosted-spaces-deployment.md b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/self-hosted-spaces-deployment.md new file mode 100644 index 000000000..e549e3939 --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/self-hosted-spaces-deployment.md @@ -0,0 +1,461 @@ +--- +title: Deployment Workflow +sidebar_position: 3 +description: A quickstart guide for Upbound Spaces +tier: "business" +--- +import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; + + + + + +This guide deploys a self-hosted Upbound cluster in AWS. + + + + + +This guide deploys a self-hosted Upbound cluster in Azure. + + + + + +This guide deploys a self-hosted Upbound cluster in GCP. + + + +Disconnected Spaces allows you to host control planes in your preferred environment. + +## Prerequisites + +To get started deploying your own Disconnected Space, you need: + +- An Upbound organization account string, provided by your Upbound account representative +- A `token.json` license, provided by your Upbound account representative + + + +- An AWS account and the AWS CLI + + + + + +- An Azure account and the Azure CLI + + + + + +- An GCP account and the GCP CLI + + + +:::important +Disconnected Spaces are a business critical feature of Upbound and requires a license token to successfully complete the installation. [Contact Upbound][contact-upbound] if you want to try out Upbound with Disconnected Spaces. +::: + +## Provision the hosting environment + +### Create a cluster + + + +Configure the name and target region you want the EKS cluster deployed to. + +```ini +export SPACES_CLUSTER_NAME=upbound-space-quickstart +export SPACES_REGION=us-east-1 +``` + +Provision a 3-node cluster using eksctl. + +```bash +cat < + + + +Configure the name and target region you want the AKS cluster deployed to. + +```ini +export SPACES_RESOURCE_GROUP_NAME=upbound-space-quickstart +export SPACES_CLUSTER_NAME=upbound-space-quickstart +export SPACES_LOCATION=westus +``` + +Provision a new Azure resource group. + +```bash +az group create --name ${SPACES_RESOURCE_GROUP_NAME} --location ${SPACES_LOCATION} +``` + +Provision a 3-node cluster. + +```bash +az aks create -g ${SPACES_RESOURCE_GROUP_NAME} -n ${SPACES_CLUSTER_NAME} \ + --enable-managed-identity \ + --node-count 3 \ + --node-vm-size Standard_D4s_v4 \ + --enable-addons monitoring \ + --enable-msi-auth-for-monitoring \ + --generate-ssh-keys \ + --network-plugin kubenet \ + --network-policy calico +``` + +Get the kubeconfig of your AKS cluster. + +```bash +az aks get-credentials --resource-group ${SPACES_RESOURCE_GROUP_NAME} --name ${SPACES_CLUSTER_NAME} +``` + + + + + +Configure the name and target region you want the GKE cluster deployed to. + +```ini +export SPACES_PROJECT_NAME=upbound-spaces-project +export SPACES_CLUSTER_NAME=upbound-spaces-quickstart +export SPACES_LOCATION=us-west1-a +``` + +Create a new project and set it as the current project. + +```bash +gcloud projects create ${SPACES_PROJECT_NAME} +gcloud config set project ${SPACES_PROJECT_NAME} +``` + +Provision a 3-node cluster. + +```bash +gcloud container clusters create ${SPACES_CLUSTER_NAME} \ + --enable-network-policy \ + --num-nodes=3 \ + --zone=${SPACES_LOCATION} \ + --machine-type=e2-standard-4 +``` + +Get the kubeconfig of your GKE cluster. + +```bash +gcloud container clusters get-credentials ${SPACES_CLUSTER_NAME} --zone=${SPACES_LOCATION} +``` + + + +## Configure the pre-install + +### Set your Upbound organization account details + +Set your Upbound organization account string as an environment variable for use in future steps + +```ini +export UPBOUND_ACCOUNT= +``` + +### Set up pre-install configurations + +Export the path of the license token JSON file provided by your Upbound account representative. + +```ini {copy-lines="2"} +# Change the path to where you saved the token. +export SPACES_TOKEN_PATH="/path/to/token.json" +``` + +Set the version of Spaces software you want to install. + +```ini +export SPACES_VERSION= +``` + +Set the router host and cluster type. The `SPACES_ROUTER_HOST` is the domain name that's used to access the control plane instances. It's used by the ingress controller to route requests. + +```ini +export SPACES_ROUTER_HOST="proxy.upbound-127.0.0.1.nip.io" +``` + +:::important +Make sure to replace the placeholder text in `SPACES_ROUTER_HOST` and provide a real domain that you own. +::: + + +## Install the Spaces software + + +### Install cert-manager + +Install cert-manager. + +```bash +kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.11.0/cert-manager.yaml +kubectl wait deployment -n cert-manager cert-manager-webhook --for condition=Available=True --timeout=360s +``` + + + +### Install ALB Load Balancer + +```bash +helm install aws-load-balancer-controller aws-load-balancer-controller --namespace kube-system \ + --repo https://aws.github.io/eks-charts \ + --set clusterName=${SPACES_CLUSTER_NAME} \ + --set serviceAccount.create=false \ + --set serviceAccount.name=aws-load-balancer-controller \ + --wait +``` + + + +### Install ingress-nginx + +Starting with Spaces v1.10.0, you need to configure the ingress-nginx +controller to allow SSL-passthrough mode. You can do so by passing the +`--enable-ssl-passthrough=true` command-line option to the controller. +The following Helm install command enables this with the `controller.extraArgs` +parameter: + + + +```bash +helm upgrade --install ingress-nginx ingress-nginx \ + --create-namespace --namespace ingress-nginx \ + --repo https://kubernetes.github.io/ingress-nginx \ + --version 4.12.1 \ + --set 'controller.service.type=LoadBalancer' \ + --set 'controller.extraArgs.enable-ssl-passthrough=true' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-type=external' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-scheme=internet-facing' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-nlb-target-type=ip' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-healthcheck-protocol=http' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-healthcheck-path=/healthz' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-healthcheck-port=10254' \ + --wait +``` + + + + + +```bash +helm upgrade --install ingress-nginx ingress-nginx \ + --create-namespace --namespace ingress-nginx \ + --repo https://kubernetes.github.io/ingress-nginx \ + --version 4.12.1 \ + --set 'controller.service.type=LoadBalancer' \ + --set 'controller.extraArgs.enable-ssl-passthrough=true' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/azure-load-balancer-health-probe-request-path=/healthz' \ + --wait +``` + + + + + +```bash +helm upgrade --install ingress-nginx ingress-nginx \ + --create-namespace --namespace ingress-nginx \ + --repo https://kubernetes.github.io/ingress-nginx \ + --version 4.12.1 \ + --set 'controller.service.type=LoadBalancer' \ + --set 'controller.extraArgs.enable-ssl-passthrough=true' \ + --wait +``` + + + +### Install Upbound Spaces software + +Create an image pull secret so that the cluster can pull Upbound Spaces images. + +```bash +kubectl create ns upbound-system +kubectl -n upbound-system create secret docker-registry upbound-pull-secret \ + --docker-server=https://xpkg.upbound.io \ + --docker-username="$(jq -r .accessId $SPACES_TOKEN_PATH)" \ + --docker-password="$(jq -r .token $SPACES_TOKEN_PATH)" +``` + +Log in with Helm to be able to pull chart images for the installation commands. + +```bash +jq -r .token $SPACES_TOKEN_PATH | helm registry login xpkg.upbound.io -u $(jq -r .accessId $SPACES_TOKEN_PATH) --password-stdin +``` + +Install the Spaces software. + +```bash +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "authentication.hubIdentities=true" \ + --set "authorization.hubRBAC=true" \ + --wait +``` + +### Create a DNS record + +:::important +If you chose to create a public ingress, you also need to create a DNS record for the load balancer of the public facing ingress. Do this before you create your first control plane. +::: + +Create a DNS record for the load balancer of the public facing ingress. To get the address for the Ingress, run the following: + + + +```bash +kubectl get ingress \ + -n upbound-system mxe-router-ingress \ + -o jsonpath='{.status.loadBalancer.ingress[0].hostname}' +``` + + + + + +```bash +kubectl get ingress \ + -n upbound-system mxe-router-ingress \ + -o jsonpath='{.status.loadBalancer.ingress[0].ip}' +``` + + + + + +```bash +kubectl get ingress \ + -n upbound-system mxe-router-ingress \ + -o jsonpath='{.status.loadBalancer.ingress[0].ip}' +``` + + + +If the preceding command doesn't return a load balancer address then your provider may not have allocated it yet. Once it's available, add a DNS record for the `ROUTER_HOST` to point to the given load balancer address. If it's an IPv4 address, add an A record. If it's a domain name, add a CNAME record. + +## Configure the up CLI + +With your kubeconfig pointed at the Kubernetes cluster where you installed +Upbound Spaces, create a new profile in the `up` CLI. This profile interacts +with your Space: + +```bash +up profile create --use ${SPACES_CLUSTER_NAME} --type=disconnected --organization ${UPBOUND_ACCOUNT} +``` + +Optionally, log in to your Upbound account using the new profile so you can use the Upbound Marketplace with this profile as well: + +```bash +up login +``` + + +## Connect to your Space + + +Use `up ctx` to create a kubeconfig context pointed at your new Space: + +```bash +up ctx disconnected/$(kubectl config current-context) +``` + +## Create your first control plane + +You can now create a control plane with the `up` CLI: + +```bash +up ctp create ctp1 +``` + +You can also create a control plane with kubectl: + +```yaml +cat < +```yaml +observability: + spacesCollector: + env: + - name: API_KEY + valueFrom: + secretKeyRef: + name: my-secret + key: api-key + config: + exporters: + otlphttp: + endpoint: "" + headers: + api-key: ${env:API_KEY} + exportPipeline: + logs: + - otlphttp + metrics: + - otlphttp + traces: + - otlphttp +``` + + +You can export metrics, logs, and traces from your Crossplane installation, Spaces +infrastructure (controller, API, router, etc.), provider-helm, and +provider-kubernetes. + +### Router metrics + +The Spaces router component uses Envoy as a reverse proxy and exposes detailed +metrics about request handling, circuit breakers, and connection pooling. +Upbound collects these metrics in your Space after you enable Space-level +observability. + +Envoy metrics in Upbound include: + +- **Upstream cluster metrics** - Request status codes, timeouts, retries, and latency for traffic to control planes and services +- **Circuit breaker metrics** - Connection and request circuit breaker state for both `DEFAULT` and `HIGH` priority levels +- **Downstream listener metrics** - Client connections and requests received +- **HTTP connection manager metrics** - End-to-end HTTP request processing and latency + +For a complete list of available router metrics and example PromQL queries, see the [Router metrics reference][router-ref]. + +### Router tracing + +The Spaces router generates distributed traces through OpenTelemetry integration, +providing end-to-end visibility into request flow across the system. Use these +traces to debug latency issues, understand request paths, and correlate errors +across services. + +The router uses: + +- **Protocol**: OTLP (OpenTelemetry Protocol) over gRPC +- **Service name**: `spaces-router` +- **Transport**: TLS-encrypted connection to telemetry collector + +#### Trace configuration + +Enable tracing and configure the sampling rate with the following Helm values: + +```yaml +observability: + enabled: true + tracing: + enabled: true + sampling: + rate: 0.1 # Sample 10% of new traces (0.0-1.0) +``` + +The sampling behavior depends on whether a parent trace context exists: + +- **With parent context**: If a `traceparent` header is present, the parent's + sampling decision is respected, enabling proper distributed tracing across services. +- **Root spans**:. new traces without a parent, Envoy samples based on + `x-request-id` hashing. The default sampling rate is 10%. + +#### TLS configuration for external collectors + +To send traces to an external OTLP collector, configure the endpoint and TLS settings: + +```yaml +observability: + enabled: true + tracing: + enabled: true + endpoint: "otlp-gateway.example.com" + port: 443 + tls: + caBundleSecretRef: "custom-ca-secret" +``` + +If `caBundleSecretRef` is set, the router uses the CA bundle from the referenced +Kubernetes secret. The secret must contain a key named `ca.crt` with the +PEM-encoded CA bundle. If not set, the router uses the Spaces CA for the +in-cluster collector. + +#### Custom trace tags + +The router adds custom tags to every span to enable filtering and grouping by +control plane: + +| Tag | Source | Description | +|-----|--------|-------------| +| `controlplane.id` | `x-upbound-mxp-id` header | Control plane UUID | +| `controlplane.name` | `x-upbound-mxp-host` header | Internal vcluster hostname | +| `hostcluster.id` | `x-upbound-hostcluster-id` header | Host cluster identifier | + +These tags enable queries like "show all slow requests to control plane X" or +"find errors for control planes in host cluster Y." + +#### Example trace + +The following example shows the attributes from a successful GET request: + +```text +Span: ingress +├─ Service: spaces-router +├─ Duration: 8.025ms +├─ Attributes: +│ ├─ http.method: GET +│ ├─ http.status_code: 200 +│ ├─ upstream_cluster: ctp-b2b37aaa-ee55-492c-ba0c-4d561a6325fa-api-cluster +│ ├─ controlplane.id: b2b37aaa-ee55-492c-ba0c-4d561a6325fa +│ ├─ controlplane.name: vcluster.mxp-b2b37aaa-ee55-492c-ba0c-4d561a6325fa-system +│ └─ response_size: 1827 +``` + +## Available metrics + +Space-level observability collects metrics from multiple infrastructure components: + +### Infrastructure component metrics + +- Crossplane controller metrics +- Spaces controller, API, and router metrics +- Provider metrics (provider-helm, provider-kubernetes) + +### Router metrics + +The router component exposes Envoy proxy metrics for monitoring traffic flow and +service health. Key metric categories include: + +- `envoy_cluster_upstream_rq_*` - Upstream request metrics (status codes, timeouts, retries, latency) +- `envoy_cluster_circuit_breakers_*` - Circuit breaker state and capacity +- `envoy_listener_downstream_*` - Client connection and request metrics +- `envoy_http_downstream_*` - HTTP request processing metrics + +Example query to monitor total request rate: + +```promql +sum(rate(envoy_cluster_upstream_rq_total{job="spaces-router-envoy"}[5m])) +``` + +Example query for P95 latency: + +```promql +histogram_quantile( + 0.95, + sum by (le) ( + rate(envoy_cluster_upstream_rq_time_bucket{job="spaces-router-envoy"}[5m]) + ) +) +``` + +For detailed router metrics documentation and more query examples, see the [Router metrics reference][router-ref]. + + +## OpenTelemetryCollector image + + +Control plane (`SharedTelemetry`) and Space observability deploy the same custom +OpenTelemetry Collector image. The OpenTelemetry Collector image supports +`otlhttp`, `datadog`, and `debug` exporters. + +For more information on observability configuration, review the [Helm chart reference][helm-chart-reference]. + +## Observability in control planes + +Read the [observability documentation][observability-documentation] to learn +about the features Upbound offers for collecting telemetry from control planes. + + +## Router metrics reference {#router-ref} + +To avoid overwhelming observability tools with hundreds of Envoy metrics, an +allow-list filters metrics to only the following metric families. + +### Upstream cluster metrics + +Metrics tracking requests sent from Envoy to configured upstream clusters. +Individual control planes, spaces-api, and other services are each considered +an upstream cluster. Use these metrics to monitor service health, identify +upstream errors, and measure backend latency. + +| Metric | Description | +|--------|-------------| +| `envoy_cluster_upstream_rq_xx_total` | HTTP status codes (2xx, 3xx, 4xx, 5xx) with label `envoy_response_code_class` | +| `envoy_cluster_upstream_rq_timeout_total` | Requests that timed out waiting for upstream | +| `envoy_cluster_upstream_rq_retry_limit_exceeded_total` | Requests that exhausted retry attempts | +| `envoy_cluster_upstream_rq_total` | Total upstream requests | +| `envoy_cluster_upstream_rq_time_bucket` | Latency histogram (for P50/P95/P99 calculations) | +| `envoy_cluster_upstream_rq_time_sum` | Sum of request durations | +| `envoy_cluster_upstream_rq_time_count` | Count of requests | + +### Circuit breaker metrics + + + +Metrics tracking circuit breaker state and remaining capacity. Circuit breakers +prevent cascading failures by limiting connections and concurrent requests to +unhealthy upstreams. Two priority levels exist: `DEFAULT` for watch requests and +`HIGH` for API requests. + + +| Name | Description | +|--------|-------------| +| `envoy_cluster_circuit_breakers_default_cx_open` | `DEFAULT` priority connection circuit breaker open (gauge) | +| `envoy_cluster_circuit_breakers_default_rq_open` | `DEFAULT` priority request circuit breaker open (gauge) | +| `envoy_cluster_circuit_breakers_default_remaining_cx` | Available `DEFAULT` priority connections (gauge) | +| `envoy_cluster_circuit_breakers_default_remaining_rq` | Available `DEFAULT` priority request slots (gauge) | +| `envoy_cluster_circuit_breakers_high_cx_open` | `HIGH` priority connection circuit breaker open (gauge) | +| `envoy_cluster_circuit_breakers_high_rq_open` | `HIGH` priority request circuit breaker open (gauge) | +| `envoy_cluster_circuit_breakers_high_remaining_cx` | Available `HIGH` priority connections (gauge) | +| `envoy_cluster_circuit_breakers_high_remaining_rq` | Available `HIGH` priority request slots (gauge) | + +### Downstream listener metrics + +Metrics tracking requests received from clients such as kubectl and API consumers. +Use these metrics to monitor client connection patterns, overall request volume, +and responses sent to external users. + +| Name | Description | +|--------|-------------| +| `envoy_listener_downstream_rq_xx_total` | HTTP status codes for responses sent to clients | +| `envoy_listener_downstream_rq_total` | Total requests received from clients | +| `envoy_listener_downstream_cx_total` | Total connections from clients | +| `envoy_listener_downstream_cx_active` | Currently active client connections (gauge) | + + + +### HTTP connection manager metrics + + +Metrics from Envoy's HTTP connection manager tracking end-to-end request +processing. These metrics provide a comprehensive view of the HTTP request +lifecycle including status codes and client-perceived latency. + +| Name | Description | +|--------|-------------| +| `envoy_http_downstream_rq_xx` | HTTP status codes (note: no `_total` suffix for this metric family) | +| `envoy_http_downstream_rq_total` | Total HTTP requests received | +| `envoy_http_downstream_rq_time_bucket` | Downstream request latency histogram | +| `envoy_http_downstream_rq_time_sum` | Sum of downstream request durations | +| `envoy_http_downstream_rq_time_count` | Count of downstream requests | + +[router-ref]: #router-ref +[observability-documentation]: /spaces/howtos/observability +[opentelemetry-collector]: https://opentelemetry.io/docs/collector/ +[opentelemetry-operator]: https://opentelemetry.io/docs/kubernetes/operator/ +[helm-chart-reference]: /reference/helm-reference diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/spaces-management.md b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/spaces-management.md new file mode 100644 index 000000000..3df61c306 --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/spaces-management.md @@ -0,0 +1,219 @@ +--- +title: Interacting with Disconnected Spaces +sidebar_position: 10 +description: Common operations in Spaces +--- + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Spaces management procedures are consistent across versions. + +For version compatibility details, see the . +::: + +## Spaces management + +### Create a Space + +To install an Upbound Space into a cluster, it's recommended you dedicate an entire Kubernetes cluster for the Space. You can use [up space init][up-space-init] to install an Upbound Space. Below is an example: + +```bash +up space init "v1.9.0" +``` +:::tip +For a full guide to get started with Spaces, read the [quickstart][quickstart] guide: +::: + +You can also install the helm chart for Spaces directly. In order for a Spaces install to succeed, you must install some prerequisites first and configure them. This includes: + +- UXP +- provider-helm and provider-kubernetes +- cert-manager + +Furthermore, the Spaces chart requires a pull secret, which Upbound must provide to you. + +```bash +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "v1.9.0" \ + --set "ingress.host=your-host.com" \ + --set "clusterType=eks" \ + --set "account=your-upbound-account" \ + --wait +``` +For a complete tutorial of the helm install, read one of the deployment guides for [AWS][aws], [Azure][azure] , or [GCP][gcp] which cover the step-by-step process. + +### Upgrade a Space + +To upgrade a Space from one version to the next, use [up space upgrade][up-space-upgrade]. Spaces supports upgrading from version `ver x.N.*` to version `ver x.N+1.*`. + +```bash +up space upgrade "v1.9.0" +``` + +You can also upgrade a Space by manually bumping the Helm chart version. Before +upgrading, review the release notes for any breaking changes or +special requirements: + +1. Review the release notes for the target version in the [Spaces Release Notes][spaces-release-notes] +2. Upgrade the Space by updating the helm chart version: + +```bash +helm -n upbound-system upgrade spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "v1.9.0" \ + --reuse-values \ + --wait +``` + +For major version upgrades or configuration changes, extract your current values +and adjust: + +```bash +# Extract current values to a file +helm -n upbound-system get values spaces > spaces-values.yaml + +# Upgrade with modified values +helm -n upbound-system upgrade spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "v1.9.0" \ + -f spaces-values.yaml \ + --wait +``` + +### Downgrade a Space + +To rollback a Space from one version to the previous, use [up space upgrade][up-space-upgrade-1]. Spaces supports downgrading from version `ver x.N.*` to version `ver x.N-1.*`. + +```bash +up space upgrade --rollback +``` + +You can also downgrade a Space manually using Helm by specifying an earlier version: + +```bash +helm -n upbound-system upgrade spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "v1.8.0" \ + --reuse-values \ + --wait +``` + +When downgrading, make sure to: +1. Check the [release notes][release-notes] for specific downgrade instructions +2. Verify compatibility between the downgraded Space and any control planes +3. Back up any critical data before proceeding + +### Uninstall a Space + +To uninstall a Space from a Kubernetes cluster, use [up space destroy][up-space-destroy]. A destroy operation uninstalls core components and orphans control planes and their associated resources. + +```bash +up space destroy +``` + +## Control plane management + +You can manage control planes in a Space via the [up CLI][up-cli] or the Spaces-local Kubernetes API. When you install a Space, it defines new a API type, `kind: Controlplane`, that you can use to create and manage control planes in the Space. + +### Create a control plane + +To create a control plane in a Space using `up`, run the following: + +```bash +up ctp create ctp1 +``` + +You can also declare a new control plane like the example below and apply it to your Spaces cluster: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: ctp1 + namespace: default +spec: + writeConnectionSecretToRef: + name: kubeconfig-ctp1 + namespace: default +``` + +This manifest: + +- Creates a new control plane in the space called `ctp1`. +- Publishes the kubeconfig to connect to the control plane to a secret in the Spaces cluster, called `kubeconfig-ctp1` + +### Connect to a control plane + +To connect to a control plane in a Space using `up`, run the following: + +```bash +up ctp connect new-control-plane +``` + +The command changes your kubeconfig's current context to the control plane you specify. If you want to change your kubeconfig back to a previous context, run: + +```bash +up ctp disconnect +``` + +If you configured your control plane to publish connection details, you can also access it this way. Once the control plane is ready, use the secret (containing connection details) to connect to the API server of your control plane. + +```bash +kubectl get secret -n default -o jsonpath='{.data.kubeconfig}' | base64 -d > /tmp/.yaml +``` + +Reference the kubeconfig whenever you want to interact directly with the API server of the control plane (vs the Space's API server): + +```bash +kubectl get providers --kubeconfig=/tmp/.yaml +``` + +### Configure a control plane + +Spaces offers a built-in feature that allows you to connect a control plane to a Git source. This experience is like when a control plane runs in [Upbound's SaaS environment][upbound-s-saas-environment]. Upbound recommends using the built-in Git integration to drive configuration of your control planes in a Space. + +Learn more in the [Spaces Git integration][spaces-git-integration] documentation. + +### List control planes + +To list all control planes in a Space using `up`, run the following: + +```bash +up ctp list +``` + +Or you can use Kubernetes-style semantics to list the control plane: + +```bash +kubectl get controlplanes +``` + + +### Delete a control plane + +To delete a control plane in a Space using `up`, run the following: + +```bash +up ctp delete ctp1 +``` + +Or you can use Kubernetes-style semantics to delete the control plane: + +```bash +kubectl delete controlplane ctp1 +``` + + +[up-space-init]: /reference/cli-reference +[quickstart]: / +[aws]: /spaces/howtos/self-hosted/self-hosted-spaces-deployment +[azure]:/spaces/howtos/self-hosted/self-hosted-spaces-deployment +[gcp]:/spaces/howtos/self-hosted/self-hosted-spaces-deployment +[up-space-upgrade]: /reference/cli-reference +[spaces-release-notes]: /reference/release-notes/spaces +[up-space-upgrade-1]: /reference/cli-reference +[release-notes]: /reference/release-notes/spaces +[up-space-destroy]: /reference/cli-reference +[up-cli]: /reference/cli-reference +[upbound-s-saas-environment]: /spaces/howtos/self-hosted/spaces-management +[spaces-git-integration]: /spaces/howtos/self-hosted/gitops diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/troubleshooting.md b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/troubleshooting.md new file mode 100644 index 000000000..8d1ca6517 --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/troubleshooting.md @@ -0,0 +1,132 @@ +--- +title: Troubleshooting +sidebar_position: 100 +description: A guide for troubleshooting an issue that occurs in a Space +--- + +Find guidance below on how to find solutions for issues you encounter when deploying and using an Upbound Space. Use the tips below as a supplement to the observability metrics discussed in the [Observability][observability] page. + +## General tips + +Most issues fall into two general categories: + +1. issues with the Spaces management plane +2. issues on a control plane + +If your control plane doesn't reach a `Ready` state, it's indicative of the former. If your control plane is in a created and running state, but resources aren't reconciling, it's indicative of the latter. + +### Spaces component layout + +Run `kubectl get pods -A` against the cluster hosting a Space. You should see a variety of pods across several namespaces. It should look something like this: + +```bash +NAMESPACE NAME READY STATUS RESTARTS AGE +cert-manager cert-manager-6d6769565c-mc5df 1/1 Running 0 25m +cert-manager cert-manager-cainjector-744bb89575-nw4fg 1/1 Running 0 25m +cert-manager cert-manager-webhook-759d6dcbf7-ps4mq 1/1 Running 0 25m +ingress-nginx ingress-nginx-controller-7f8ccfccc6-6szlp 1/1 Running 0 25m +kube-system coredns-5d78c9869d-4p477 1/1 Running 0 26m +kube-system coredns-5d78c9869d-pdxt6 1/1 Running 0 26m +kube-system etcd-kind-control-plane 1/1 Running 0 26m +kube-system kindnet-8s7pq 1/1 Running 0 26m +kube-system kube-apiserver-kind-control-plane 1/1 Running 0 26m +kube-system kube-controller-manager-kind-control-plane 1/1 Running 0 26m +kube-system kube-proxy-l68r8 1/1 Running 0 26m +kube-system kube-scheduler-kind-control-plane 1/1 Running 0 26m +local-path-storage local-path-provisioner-6bc4bddd6b-qsdjt 1/1 Running 0 26m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system coredns-5dc69d6447-f56rh-x-kube-system-x-vcluster 1/1 Running 0 21m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system crossplane-6b6d67bc66-6b8nx-x-upbound-system-x-vcluster 1/1 Running 0 20m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system crossplane-rbac-manager-78f6fc7cb4-pjkhc-x-upbound-s-12253c3c4e 1/1 Running 0 20m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system kube-state-metrics-7f8f4dcc5b-8p8c4 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system mxp-gateway-68f546b9c8-xnz5j-x-upbound-system-x-vcluster 1/1 Running 0 20m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system mxp-ksm-config-54655667bb-hv9br 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system mxp-readyz-5f7f97d967-b98bw 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system otlp-collector-56d7d46c8d-g5sh5-x-upbound-system-x-vcluster 1/1 Running 0 20m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-67c9fb8959-ppb2m 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-api-6bfbccc49d-ffgpj 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-controller-7cc6855656-8c46b 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-etcd-0 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vector-754b494b84-wljw4 1/1 Running 0 22m +mxp-system mxp-charts-chartmuseum-7587f77558-8tltb 1/1 Running 0 23m +upbound-system crossplane-b4dc7b4c9-6hjh5 1/1 Running 0 25m +upbound-system crossplane-contrib-provider-helm-ce18dd03e6e4-7945d8985-4gcwr 1/1 Running 0 24m +upbound-system crossplane-contrib-provider-kubernetes-1f1e32c1957d-577756gs2x4 1/1 Running 0 24m +upbound-system crossplane-rbac-manager-d8cb49cbc-gbvvf 1/1 Running 0 25m +upbound-system spaces-controller-6647677cf9-5zl5q 1/1 Running 0 24m +upbound-system spaces-router-bc78c96d7-kzts2 2/2 Running 0 24m +``` + +What you are seeing is: + +- Pods in the `upbound-system` namespace are components required to run the management plane of the Space. This includes the `spaces-controller`, `spaces-router`, and install of UXP. +- Pods in the `mxp-{GUID}-system` namespace are components that collectively power a control plane. Notable call outs include pod names that look like `vcluster-api-{GUID}` and `vcluster-controller-{GUID}`, which are integral components of a control plane. +- Pods in other notable namespaces, including `cert-manager` and `ingress-nginx`, are prerequisite components that support a Space's successful operation. + + + +### Troubleshooting tips for the Spaces management plane + +Start by getting the status of all the pods in a Space: + +1. Make sure the current context of your kubeconfig points at the Kubernetes cluster hosting your Space +2. Get the status of all the pods in the Space: +```bash +kubectl get pods -A +``` +3. Scan the `Status` column to see if any of the pods report a status besides `Running`. +4. Scan the `Restarts` column to see if any of the pods have restarted. +5. If you notice a Status other than `Running` or see pods that restarted, you should investigate their events by running +```bash +kubectl describe pod -n +``` + +Next, inspect the status of objects and releases: + +1. Make sure the current context of your kubeconfig points at the Kubernetes cluster hosting your Space +2. Inspect the objects in your Space. If any are unhealthy, describe those objects to get the events: +```bash +kubectl get objects +``` +3. Inspect the releases in your Space. If any are unhealthy, describe those releases to get the events: +```bash +kubectl get releases +``` + +### Troubleshooting tips for control planes in a Space + +General troubleshooting in a control plane starts by fetching the events of the control plane: + +1. Make sure the current context of your kubeconfig points at the Kubernetes cluster hosting your Space +2. Run the following to fetch your control planes. +```bash +kubectl get ctp +``` +3. Describe the control plane by providing its name, found in the preceding instruction. +```bash +kubectl describe controlplanes.spaces.upbound.io +``` + +## Issues + + +### Your control plane is stuck in a 'creating' state + +#### Error: unknown field "ports" in io.k8s.api.networking.v1.NetworkPolicySpec + +This error is emitted by a Helm release named `control-plane-host-policies` attempting to be installed by the Spaces software. The full error is: + +_CannotCreateExternalResource failed to install release: unable to build kubernetes objects from release manifest: error validating "": error validating data: ValidationError(NetworkPolicy.spec): unknown field "ports" in io.k8s.api.networking.v1.NetworkPolicySpec_ + +This error may be caused by running a Space on an earlier version of Kubernetes than is supported (`v1.26 or later`). To resolve this issue, upgrade the host Kubernetes cluster version to 1.25 or later. + +### Your Spaces install fails + +#### Error: You tried to install a Space on a previous Crossplane installation + +If you try to install a Space on an existing cluster that previously had Crossplane or UXP on it, you may encounter errors. Due to how the Spaces installer tests for the presence of UXP, it may detect orphaned CRDs that weren't cleaned up by the previous uninstall of Crossplane. You may need to manually [remove old Crossplane CRDs][remove-old-crossplane-crds] for the installer to properly detect the UXP prerequisite. + + + + +[observability]: /spaces/howtos/observability +[remove-old-crossplane-crds]: https://docs.crossplane.io/latest/guides/uninstall-crossplane/ diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/use-argo.md b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/use-argo.md new file mode 100644 index 000000000..d58f7db44 --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/use-argo.md @@ -0,0 +1,228 @@ +--- +title: Use ArgoCD Plugin +sidebar_position: 15 +description: A guide for integrating Argo with control planes in a Space. +aliases: + - /all-spaces/self-hosted-spaces/use-argo + - /deploy/disconnected-spaces/use-argo-flux + - /all-spaces/self-hosted-spaces/use-argo-flux + - /connect/use-argo +--- + + +:::info API Version Information +This guide covers the ArgoCD Plugin, a preview feature available in all supported versions (v1.9-v1.14+) for self-hosted deployments. + +For details on GitOps patterns and related features across versions, see [Automation and GitOps Overview](/spaces/howtos/automation-and-gitops/overview/). +::: + +:::important +This feature is in preview and is off by default. To enable, set `features.alpha.argocdPlugin.enabled=true` when installing Spaces: + +```bash +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "features.alpha.argocdPlugin.enabled=true" +``` +::: + +Spaces provides an optional plugin to assist with integrating a control plane in a Space with Argo CD. You must enable the plugin for the entire Space at Spaces install or upgrade time. The plugin's job is to propagate the connection details of each control plane in a Space to Argo CD. By default, Upbound stores these connection details in a Kubernetes secret named after the control plane. To run Argo CD across multiple namespaces, Upbound recommends enabling the `features.alpha.argocdPlugin.useUIDFormatForCTPSecrets` flag to use a UID-based format for secret names to avoid conflicts. + +:::tip +For general guidance on integrating Upbound with GitOps flows, see [GitOps with Control Planes][gitops-with-control-planes]. +::: + +## On cluster Argo CD + +If you are running Argo CD on the same cluster as the Space, run the following to enable the plugin: + + + + + + +```bash {hl_lines="3-4"} +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "features.alpha.argocdPlugin.enabled=true" \ + --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ + --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" +``` + + + + + +```bash {hl_lines="7-8"} +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "features.alpha.argocdPlugin.enabled=true" \ + --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ + --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ + --wait +``` + + + + + + +The important flags are: + +- `features.alpha.argocdPlugin.enabled=true` +- `features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true` +- `features.alpha.argocdPlugin.target.secretNamespace=argocd` + +The first flag enables the feature and the second indicates the namespace on the cluster where you installed Argo CD. + +Be sure to [configure Argo][configure-argo] after it's installed. + +## External cluster Argo CD + +If you are running Argo CD on an external cluster from where you installed your Space, you need to provide some extra flags: + + + + + + +```bash {hl_lines="3-7"} +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "features.alpha.argocdPlugin.enabled=true" \ + --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ + --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ + --set "features.alpha.argocdPlugin.target.externalCluster.enabled=true" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig" +``` + + + + + +```bash {hl_lines="7-11"} +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "features.alpha.argocdPlugin.enabled=true" \ + --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ + --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ + --set "features.alpha.argocdPlugin.target.externalCluster.enabled=true" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig" \ + --wait +``` + + + + + +```bash +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "features.alpha.argocdPlugin.enabled=true" \ + --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ + --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ + --set "features.alpha.argocdPlugin.target.externalCluster.enabled=true" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig" \ + --wait +``` + +The extra flags are: + +- `features.alpha.argocdPlugin.target.externalCluster.enabled=true` +- `features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true` +- `features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster` +- `features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig` + +These flags tell the plugin (running in Spaces) where your Argo CD instance is. After you've done this at install-time, you also need to create a `Secret` on the Spaces cluster. This secret must contain a kubeconfig pointing to your Argo CD instance. The secret needs to be in the same namespace as the `spaces-controller`, which is `upbound-system`. + +Once you enable the plugin and configure it, the plugin automatically propagates connection details for your control planes to your Argo CD instance. You can then target the control plane and use Argo to sync Crossplane-related objects to it. + +Be sure to [configure Argo][configure-argo-1] after it's installed. + +## Configure Argo + +Argo's default configuration causes it to try to query for resource kinds that don't exist in control planes. You should configure Argo's [general configmap][general-configmap] to include the resource group/kinds which make sense in the context of control planes. example, the concept of `nodes` isn't exposed in control planes. + +To configure Argo CD, connect to the cluster where you've installed it and edit the configmap: + +```bash +kubectl edit configmap argocd-cm -n argocd +``` + +Adjust the resource inclusions and exclusions under the `data` field of the configmap: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-cm + namespace: argocd +data: + resource.exclusions: | + - apiGroups: + - "*" + kinds: + - "*" + clusters: + - "*" + resource.inclusions: | + - apiGroups: + - "*" + kinds: + - Provider + - Configuration + clusters: + - "*" +``` + +The preceding configuration causes Argo to exclude syncing **all** resource group/kinds--except Crossplane `providers` and `configurations`--for **all** control planes. You're encouraged to adjust the `resource.inclusions` to include the types that make sense for your control plane, such as an `XRD` you've built with Crossplane. You're also encouraged to customize the `clusters` pattern to selectively apply these exclusions/inclusions to control planes (for example, `control-plane-prod-*`). + +## Control plane connection secrets + +To deploy control planes through Argo CD, you need to configure the `writeConnectionSecretToRef` field in your control plane spec. This field specifies where to store the control plane's `kubeconfig` and makes connection details available to Argo CD. + +### Basic Configuration + +In your control plane manifest, include the `writeConnectionSecretToRef` field: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: my-control-plane + namespace: my-control-plane-group +spec: + writeConnectionSecretToRef: + name: kubeconfig-my-control-plane + namespace: my-control-plane-group + # ... other control plane configuration +``` + +### Parameters + +The `writeConnectionSecretToRef` field requires two parameters: + +- `name`: A unique name for the secret containing the kubeconfig (`kubeconfig-my-control-plane`) +- `namespace`: The Kubernetes namespace where you store the secret, which must match the metadata namespace. The system copies it into the `argocd` namespace when you set the `features.alpha.argocdPlugin.target.secretNamespace=argocd` configuration parameter. + +Control plane labels automatically propagate to the connection secret, which allows you to use label selectors in Argo CD for automated discovery and management. + +This configuration enables Argo CD to automatically discover and manage resources on your control planes. + + +[gitops-with-control-planes]: /spaces/howtos/cloud-spaces/gitops +[configure-argo]: #configure-argo +[configure-argo-1]: #configure-argo +[general-configmap]: https://argo-cd.readthedocs.io/en/stable/operator-manual/argocd-cm-yaml/ diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/workload-id/_category_.json b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/workload-id/_category_.json new file mode 100644 index 000000000..c5ecc93f6 --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/workload-id/_category_.json @@ -0,0 +1,11 @@ +{ + "label": "Workload Identity Configuration", + "position": 2, + "collapsed": true, + "customProps": { + "plan": "business" + } + +} + + diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/workload-id/backup-restore-config.md b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/workload-id/backup-restore-config.md new file mode 100644 index 000000000..935ca69ec --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/workload-id/backup-restore-config.md @@ -0,0 +1,384 @@ +--- +title: Backup and Restore Workload ID +weight: 1 +description: Configure workload identity for Spaces Backup and Restore +--- +import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; + + + + + + + +Workload-identity authentication lets you use access policies to grant temporary +AWS credentials to your Kubernetes pod with a service account. Assigning IAM roles and service accounts allows the pod to assume the IAM role dynamically and much more securely than static credentials. + +This guide walks you through creating an IAM trust role policy and applying it +to your EKS cluster to handle backup and restore storage. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary Azure credentials to your Kubernetes pod based on +a service account. Assigning managed identities and service accounts allows the pod to +authenticate with Azure resources dynamically and much more securely than static credentials. + +This guide walks you through creating a managed identity and federated credential for your AKS +cluster to handle backup and restore storage. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary GCP credentials to your Kubernetes pod based on +a service account. Assigning IAM roles and service accounts allows the pod to +access cloud resources dynamically and much more securely than static credentials. + +This guide walks you through configuring workload identity for your GKE +cluster to handle backup and restore storage. + + + +## Prerequisites + + +To set up a workload-identity, you'll need: + + +- A self-hosted Space cluster +- Administrator access in your cloud provider +- Helm and `kubectl` + +## About the backup and restore component + +The `mxp-controller` component handles backup and restore workloads. It needs to +access your cloud storage to store and retrieve backups. By default, this +component runs in each control plane's host namespace. + +## Configuration + + + +Upbound supports workload-identity configurations in AWS with IAM Roles for +Service Accounts and EKS pod identity association. + +#### IAM Roles for Service Accounts (IRSA) + +With IRSA, you can associate a Kubernetes service account in an EKS cluster with +an AWS IAM role. Upbound authenticates workloads with that service account as +the IAM role using temporary credentials instead of static role credentials. +IRSA relies on AWS `AssumeRoleWithWebIdentity` `STS` to exchange OIDC ID tokens with +the IAM role's temporary credentials. IRSA uses the `eks.amazon.aws/role-arn` +annotation to link the service account and the IAM role. + +First, create an IAM role with appropriate permissions to access your S3 bucket: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:ListBucket", + "s3:DeleteObject" + ], + "Resource": [ + "arn:aws:s3:::${YOUR_BACKUP_BUCKET}", + "arn:aws:s3:::${YOUR_BACKUP_BUCKET}/*" + ] + } + ] +} +``` + +Next, ensure your EKS cluster has an OIDC identity provider: + +```shell +eksctl utils associate-iam-oidc-provider --cluster ${YOUR_CLUSTER_NAME} --approve +``` + +Configure the IAM role trust policy with the namespace for each +provisioned control plane. + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:oidc-provider/${YOUR_OIDC_PROVIDER}" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + "${YOUR_OIDC_PROVIDER}:aud": "sts.amazonaws.com", + "${YOUR_OIDC_PROVIDER}:sub": "system:serviceaccount:${YOUR_NAMESPACE}:mxp-controller" + } + } + } + ] +} +``` + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the Backup and Restore component: + +```shell +--set controlPlanes.mxpController.serviceAccount.annotations."eks\.amazonaws\.com/role-arn"="${SPACES_BR_IAM_ROLE_ARN}" +``` + +This command allows the backup and restore component to authenticate with your +dedicated IAM role in your EKS cluster environment. + +#### EKS pod identities + +Upbound also supports EKS Pod Identity configuration. EKS Pod Identities allow +you to create a pod identity association with your Kubernetes namespace, a +service account, and an IAM role, which allows the EKS control plane to +automatically handle the credential exchange. + +First, create an IAM role with appropriate permissions to access your S3 bucket: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:ListBucket", + "s3:DeleteObject" + ], + "Resource": [ + "arn:aws:s3:::${YOUR_BACKUP_BUCKET}", + "arn:aws:s3:::${YOUR_BACKUP_BUCKET}/*" + ] + } + ] +} +``` + +When you install or upgrade your Space with Helm, add the backup/restore values: + +```shell +helm upgrade spaces spaces-helm-chart \ + --set "billing.enabled=true" \ + --set "backup.enabled=true" \ + --set "backup.storage.provider=aws" \ + --set "backup.storage.aws.region= ${YOUR_AWS_REGION}" \ + --set "backup.storage.aws.bucket= ${YOUR_BACKUP_BUCKET}" +``` + +After Upbound provisions your control plane, create a Pod Identity Association +with the `aws` CLI: + +```shell +aws eks create-pod-identity-association \ + --cluster-name ${YOUR_CLUSTER_NAME} \ + --namespace ${YOUR_CONTROL_PLANE_NAMESPACE} \ + --service-account mxp-controller \ + --role-arn arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/backup-restore-role +``` + + + + + +Upbound supports workload-identity configurations in Azure with Azure's built-in +workload identity feature. + +#### Prepare your cluster + +First, enable the OIDC issuer and workload identity in your AKS cluster: + +```shell +az aks update --resource-group ${YOUR_RESOURCE_GROUP} --name ${YOUR_AKS_CLUSTER_NAME} --enable-oidc-issuer --enable-workload-identity +``` + +Next, find and store the OIDC issuer URL as an environment variable: + +```shell +export AKS_OIDC_ISSUER="$(az aks show --name ${YOUR_AKS_CLUSTER_NAME} --resource-group ${YOUR_RESOURCE_GROUP} --query "oidcIssuerProfile.issuerUrl" --output tsv)" +``` + +#### Create a User-Assigned Managed Identity + +Create a new managed identity to associate with the backup and restore component: + +```shell +az identity create --name backup-restore-identity --resource-group ${YOUR_RESOURCE_GROUP} --location ${YOUR_LOCATION} +``` + +Retrieve the client ID and store it as an environment variable: + +```shell +export USER_ASSIGNED_CLIENT_ID="$(az identity show --name backup-restore-identity --resource-group ${YOUR_RESOURCE_GROUP} --query clientId -otsv)" +``` + +Grant the managed identity you created to access your Azure Storage account: + +```shell +az role assignment create \ + --role "Storage Blob Data Contributor" \ + --assignee ${USER_ASSIGNED_CLIENT_ID} \ + --scope /subscriptions/${YOUR_SUBSCRIPTION_ID}/resourceGroups/${YOUR_RESOURCE_GROUP}/providers/Microsoft.Storage/storageAccounts/${YOUR_STORAGE_ACCOUNT} +``` + +#### Apply the managed identity role + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the backup and restore component: + +```shell +--set controlPlanes.mxpController.serviceAccount.annotations."azure\.workload\.identity/client-id"="${YOUR_USER_ASSIGNED_CLIENT_ID}" +--set controlPlanes.mxpController.pod.customLabels."azure\.workload\.identity/use"="true" +``` + +#### Create a Federated Identity credential + +```shell +az identity federated-credential create \ + --name backup-restore-federated-identity \ + --identity-name backup-restore-identity \ + --resource-group ${YOUR_RESOURCE_GROUP} \ + --issuer ${AKS_OIDC_ISSUER} \ + --subject system:serviceaccount:${YOUR_CONTROL_PLANE_NAMESPACE}:mxp-controller +``` + + + + + +Upbound supports workload-identity configurations in GCP with IAM principal +identifiers and service account impersonation. + +#### Prepare your cluster + +First, enable Workload Identity Federation on your GKE cluster: + +```shell +gcloud container clusters update ${YOUR_CLUSTER_NAME} \ + --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ + --region=${YOUR_REGION} +``` + +#### Create a Google Service Account + +Create a service account for the backup and restore component: + +```shell +gcloud iam service-accounts create backup-restore-sa \ + --display-name "Backup Restore Service Account" \ + --project ${YOUR_PROJECT_ID} +``` + +Grant the service account access to your Google Cloud Storage bucket: + +```shell +gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ + --member "serviceAccount:backup-restore-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" \ + --role "roles/storage.objectAdmin" +``` + +#### Configure Workload Identity + +Create an IAM binding to grant the Kubernetes service account access to the Google service account: + +```shell +gcloud iam service-accounts add-iam-policy-binding \ + backup-restore-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com \ + --role roles/iam.workloadIdentityUser \ + --member "serviceAccount:${YOUR_PROJECT_ID}.svc.id.goog[${YOUR_CONTROL_PLANE_NAMESPACE}/mxp-controller]" +``` + +#### Apply the service account configuration + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the backup and restore component: + +```shell +--set controlPlanes.mxpController.serviceAccount.annotations."iam\.gke\.io/gcp-service-account"="backup-restore-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" +``` + + + +## Verify your configuration + +After you apply the configuration use `kubectl` to verify the service account +has the correct annotation: + +```shell +kubectl get serviceaccount mxp-controller -n ${YOUR_CONTROL_PLANE_NAMESPACE} -o yaml +``` + +Verify the `mxp-controller` pod is running: + +```shell +kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep mxp-controller +``` + +## Restart workload + +You must manually restart a workload's pod when you add the workload identity annotations to the running pod's service account. + + + +This restart enables the EKS pod identity webhook to inject the necessary +environment for using IRSA. + + + + + +This restart enables the workload identity webhook to inject the necessary +environment for using Azure workload identity. + + + + + +This restart enables the workload identity webhook to inject the necessary +environment for using GCP workload identity. + + + +```shell +kubectl rollout restart deployment mxp-controller -n ${YOUR_CONTROL_PLANE_NAMESPACE} +``` + +## Use cases + + +Configuring backup and restore with workload identity eliminates the need for +static credentials in your cluster and the overhead of credential rotation. +These benefits are helpful in: + +* Disaster recovery scenarios +* Control plane migration +* Compliance requirements +* Rollbacks after unsuccessful upgrades + +## Next steps + +Now that you have a workload identity configured for the backup and restore +component, visit the [Backup Configuration][backup-restore-guide] documentation. + +Other workload identity guides are: +* [Billing][billing] +* [Shared Secrets][secrets] + +[backup-restore-guide]: /spaces/howtos/backup-and-restore +[billing]: /spaces/howtos/self-hosted/workload-id/billing-config +[secrets]: /spaces/howtos/self-hosted/workload-id/eso-config diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/workload-id/billing-config.md b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/workload-id/billing-config.md new file mode 100644 index 000000000..323a6122f --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/workload-id/billing-config.md @@ -0,0 +1,454 @@ +--- +title: Billing Workload ID +weight: 1 +description: Configure workload identity for Spaces Billing +--- +import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; + + + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary AWS credentials to your Kubernetes pod based on +a service account. Assigning IAM roles and service accounts allows the pod to +assume the IAM role dynamically and much more securely than static credentials. + +This guide walks you through creating an IAM trust role policy and applying it to your EKS +cluster for billing in your Space cluster. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary Azure credentials to your Kubernetes pod based on +a service account. Assigning managed identities and service accounts allows the pod to +authenticate with Azure resources dynamically and much more securely than static credentials. + +This guide walks you through creating a managed identity and federated credential for your AKS +cluster for billing in your Space cluster. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary GCP credentials to your Kubernetes pod based on +a service account. Assigning IAM roles and service accounts allows the pod to +access cloud resources dynamically and much more securely than static +credentials. + +This guide walks you through configuring workload identity for your GKE +cluster's billing component. + + + +## Prerequisites + + +To set up a workload-identity, you'll need: + + +- A self-hosted Space cluster +- Administrator access in your cloud provider +- Helm and `kubectl` + +## About the billing component + +The `vector.dev` component handles billing metrics collection in spaces. It +stores account data in your cloud storage. By default, this component runs in +each control plane's host namespace. + +## Configuration + + + +Upbound supports workload-identity configurations in AWS with IAM Roles for +Service Accounts and EKS pod identity association. + +#### IAM Roles for Service Accounts (IRSA) + +With IRSA, you can associate a Kubernetes service account in an EKS cluster with +an AWS IAM role. Upbound authenticates workloads with that service account as +the IAM role using temporary credentials instead of static role credentials. +IRSA relies on AWS `AssumeRoleWithWebIdentity` `STS` to exchange OIDC ID tokens with +the IAM role's temporary credentials. IRSA uses the `eks.amazon.aws/role-arn` +annotation to link the service account and the IAM role. + +**Create an IAM role and trust policy** + +First, create an IAM role appropriate permissions to access your S3 bucket: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:ListBucket", + "s3:DeleteObject" + ], + "Resource": [ + "arn:aws:s3:::${YOUR_BILLING_BUCKET}", + "arn:aws:s3:::${YOUR_BILLING_BUCKET}/*" + ] + } + ] +} +``` + +You must configure the IAM role trust policy with the exact match for each +provisioned control plane. An example of a trust policy for a single control +plane is below: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:oidc-provider/${YOUR_OIDC_PROVIDER}" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + ":aud": "sts.amazonaws.com", + ":sub": "system:serviceaccount:${YOUR_NAMESPACE}:vector" + } + } + } + ] +} +``` + +**Configure the EKS OIDC provider** + +Next, ensure your EKS cluster has an OIDC identity provider: + +```shell +eksctl utils associate-iam-oidc-provider --cluster ${YOUR_CLUSTER_NAME} --approve +``` + +**Apply the IAM role** + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the Billing component: + +```shell +--set "billing.enabled=true" +--set "billing.storage.provider=aws" +--set "billing.storage.aws.region=${YOUR_AWS_REGION}" +--set "billing.storage.aws.bucket=${YOUR_BILLING_BUCKET}" +--set "billing.storage.secretRef.name=" +--set controlPlanes.vector.serviceAccount.customAnnotations."eks\.amazonaws\.com/role-arn"="arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_BILLING_ROLE_NAME}" +``` + +:::important +You **must** set the `billing.storage.secretRef.name` to an empty string to +enable workload identity for the billing component +::: + +#### EKS pod identities + +Upbound also supports EKS Pod Identity configuration. EKS Pod Identities allow +you to create a pod identity association with your Kubernetes namespace, a +service account, and an IAM role, which allows the EKS control plane to +automatically handle the credential exchange. + +**Create an IAM role** + +First, create an IAM role appropriate permissions to access your S3 bucket: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:ListBucket" + ], + "Resource": [ + "arn:aws:s3:::${YOUR_BILLING_BUCKET}", + "arn:aws:s3:::${YOUR_BILLING_BUCKET}/*" + ] + } + ] +} +``` + +**Configure your Space with Helm** + +When you install or upgrade your Space with Helm, add the billing values: + +```shell +helm upgrade spaces spaces-helm-chart \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=aws" \ + --set "billing.storage.aws.region=${YOUR_AWS_REGION}" \ + --set "billing.storage.aws.bucket=${YOUR_BILLING_BUCKET}" \ + --set "billing.storage.secretRef.name=" +``` + +**Create a Pod Identity Association** + +After Upbound provisions your control plane, create a Pod Identity Association +with the `aws` CLI: + +```shell +aws eks create-pod-identity-association \ + --cluster-name ${YOUR_CLUSTER_NAME} \ + --namespace ${YOUR_CONTROL_PLANE_NAMESPACE} \ + --service-account vector \ + --role-arn arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_BILLING_ROLE_NAME} +``` + + + + + +Upbound supports workload-identity configurations in Azure with Azure's built-in +workload identity feature. + +First, enable the OIDC issuer and workload identity in your AKS cluster: + +```shell +az aks update --resource-group ${YOUR_RESOURCE_GROUP} --name ${YOUR_AKS_CLUSTER_NAME} --enable-oidc-issuer --enable-workload-identity +``` + +Next, find and store the OIDC issuer URL as an environment variable: + +```shell +export AKS_OIDC_ISSUER="$(az aks show --name ${YOUR_AKS_CLUSTER_NAME} --resource-group ${YOUR_RESOURCE_GROUP} --query "oidcIssuerProfile.issuerUrl" --output tsv)" +``` + +Create a new managed identity to associate with the billing component: + +```shell +az identity create --name billing-identity --resource-group ${YOUR_RESOURCE_GROUP} --location ${YOUR_LOCATION} +``` + +Retrieve the client ID and store it as an environment variable: + +```shell +export USER_ASSIGNED_CLIENT_ID="$(az identity show --name billing-identity --resource-group ${YOUR_RESOURCE_GROUP} --query clientId -otsv)" +``` + +Grant the managed identity you created to access your Azure Storage account: + +```shell +az role assignment create --role "Storage Blob Data Contributor" --assignee $USER_ASSIGNED_CLIENT_ID --scope /subscriptions/${YOUR_SUBSCRIPTION_ID}/resourceGroups/${YOUR_RESOURCE_GROUP}/providers/Microsoft.Storage/storageAccounts/${YOUR_STORAGE_ACCOUNT} +``` + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the billing component: + +```shell +--set "billing.enabled=true" +--set "billing.storage.provider=azure" +--set "billing.storage.azure.storageAccount=${SPACES_BILLING_STORAGE_ACCOUNT}" +--set "billing.storage.azure.container=${SPACES_BILLING_STORAGE_CONTAINER}" +--set "billing.storage.secretRef.name=" +--set controlPlanes.vector.serviceAccount.customAnnotations."azure\.workload\.identity/client-id"="${SPACES_BILLING_APP_ID}" +--set controlPlanes.vector.pod.customLabels."azure\.workload\.identity/use"="true" +``` + +Create a federated credential to establish trust between the managed identity +and your AKS OIDC provider: + +```shell +az identity federated-credential create \ + --name billing-federated-identity \ + --identity-name billing-identity \ + --resource-group ${YOUR_RESOURCE_GROUP} \ + --issuer ${AKS_OIDC_ISSUER} \ + --subject system:serviceaccount:${YOUR_CONTROL_PLANE_NAMESPACE}:vector +``` + + + + + +Upbound supports workload-identity configurations in GCP with IAM principal +identifiers or service account impersonation. + +#### IAM principal identifiers + +IAM principal identifiers allow you to grant permissions directly to +Kubernetes service accounts without additional annotation. Upbound recommends +this approach for ease-of-use and flexibility. + +First, enable Workload Identity Federation on your GKE cluster: + +```shell +gcloud container clusters update ${YOUR_CLUSTER_NAME} \ + --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ + --region=${YOUR_REGION} +``` + +Next, configure your Spaces installation with the Spaces Helm chart parameters: + +```shell +--set "billing.enabled=true" +--set "billing.storage.provider=gcp" +--set "billing.storage.gcp.bucket=${YOUR_BILLING_BUCKET}" +--set "billing.storage.secretRef.name=" +``` + +:::important +You **must** set the `billing.storage.secretRef.name` to an empty string to +enable workload identity for the billing component. +::: + +Grant the necessary permissions to your Kubernetes service account: + +```shell +gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ + --member="principalSet://iam.googleapis.com/projects/${YOUR_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${YOUR_PROJECT_ID}.svc.id.goog/attribute.kubernetes_namespace/${YOUR_CONTROL_PLANE_NAMESPACE}/attribute.kubernetes_service_account/vector" \ + --role="roles/storage.objectAdmin" +``` + +Enable uniform bucket-level access on your storage bucket: + +```shell +gcloud storage buckets update gs://${YOUR_BILLING_BUCKET} --uniform-bucket-level-access +``` + +#### Service account impersonation + +Service account impersonation allows you to link a Kubernetes service account to +a GCP service account. The Kubernetes service account assumes the permissions of +the GCP service account you specify. + +Enable workload id federation on your GKE cluster: + +```shell +gcloud container clusters update ${YOUR_CLUSTER_NAME} \ + --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ + --region=${YOUR_REGION} +``` + +Next, create a dedicated service account for your billing operations: + +```shell +gcloud iam service-accounts create billing-sa \ + --project=${YOUR_PROJECT_ID} +``` + +Grant storage permissions to the service account you created: + +```shell +gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ + --member="serviceAccount:billing-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" \ + --role="roles/storage.objectAdmin" +``` + +Link the Kubernetes service account to the GCP service account: + +```shell +gcloud iam service-accounts add-iam-policy-binding \ + billing-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com \ + --role="roles/iam.workloadIdentityUser" \ + --member="serviceAccount:${YOUR_PROJECT_ID}.svc.id.goog[${YOUR_CONTROL_PLANE_NAMESPACE}/vector]" +``` + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the billing component: + +```shell +--set "billing.enabled=true" +--set "billing.storage.provider=gcp" +--set "billing.storage.gcp.bucket=${YOUR_BILLING_BUCKET}" +--set "billing.storage.secretRef.name=" +--set controlPlanes.vector.serviceAccount.customAnnotations."iam\.gke\.io/gcp-service-account"="billing-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" +``` + + + +## Verify your configuration + +After you apply the configuration use `kubectl` to verify the service account +has the correct annotation: + +```shell +kubectl get serviceaccount vector -n ${YOUR_CONTROL_PLANE_NAMESPACE} -o yaml +``` + +Verify the `vector` pod is running: + +```shell +kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep vector +``` + +## Restart workload + + + +You must manually restart a workload's pod when you add the +`eks.amazonaws.com/role-arn key` annotation to the running pod's service +account. + +This restart enables the EKS pod identity webhook to inject the necessary +environment for using IRSA. + + + + + +You must manually restart a workload's pod when you add the workload identity annotations to the running pod's service account. + +This restart enables the workload identity webhook to inject the necessary +environment for using Azure workload identity. + + + + + +GCP workload identity doesn't require pod restarts after configuration changes. +If you do need to restart the workload, use the `kubectl` command to force the +component restart: + + + +```shell +kubectl rollout restart deployment vector +``` + + +## Use cases + + +Using workload identity authentication for billing eliminates the need for static +credentials in your cluster as well as the overhead of credential rotation. +These benefits are helpful in: + +* Resource usage tracking across teams/projects +* Cost allocation for multi-tenant environments +* Financial auditing requirements +* Capacity billing and resource optimization +* Automated billing workflows + +## Next steps + +Now that you have workload identity configured for the billing component, visit +the [Billing guide][billing-guide] for more information. + +Other workload identity guides are: +* [Backup and restore][backuprestore] +* [Shared Secrets][secrets] + +[billing-guide]: /spaces/howtos/self-hosted/billing +[backuprestore]: /spaces/howtos/self-hosted/workload-id/backup-restore-config +[secrets]: /spaces/howtos/self-hosted/workload-id/eso-config diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/workload-id/eso-config.md b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/workload-id/eso-config.md new file mode 100644 index 000000000..c1418c171 --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/self-hosted/workload-id/eso-config.md @@ -0,0 +1,503 @@ +--- +title: Shared Secrets Workload ID +weight: 1 +description: Configure workload identity for Spaces Shared Secrets +--- +import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; + + + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary AWS credentials to your Kubernetes pod based on +a service account. Assigning IAM roles and service accounts allows the pod to +assume the IAM role dynamically and much more securely than static credentials. + +This guide walks you through creating an IAM trust role policy and applying it to your EKS +cluster for secret sharing with Kubernetes. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary Azure credentials to your Kubernetes pod based on +a service account. Assigning managed identities and service accounts allows the pod to +authenticate with Azure resources dynamically and much more securely than static credentials. + +This guide walks you through creating a managed identity and federated credential for your AKS +cluster for shared secrets in your Space cluster. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary GCP credentials to your Kubernetes pod based on +a service account. Assigning IAM roles and service accounts allows the pod to +access cloud resources dynamically and much more securely than static +credentials. + +This guide walks you through configuring workload identity for your GKE +cluster's Shared Secrets component. + + + +## Prerequisites + + +To set up a workload-identity, you'll need: + + +- A self-hosted Space cluster +- Administrator access in your cloud provider +- Helm and `kubectl` + + +## About the Shared Secrets component + + + + +The External Secrets Operator (ESO) runs in each control plane's host namespace as `external-secrets-controller`. It needs to access +your external secrets management service like AWS Secrets Manager. + +To configure your shared secrets workflow controller, you must: + +* Annotate the Kubernetes service account to associate it with a cloud-side + principal (such as an IAM role, service account, or enterprise application). The workload must then + use this service account. +* Label the workload (pod) to allow the injection of a temporary credential set, + enabling authentication. + + + + + +The External Secrets Operator (ESO) component runs in each control plane's host +namespace as `external-secrets-controller`. It synchronizes secrets from +external APIs into Kubernetes secrets. Shared secrets allow you to manage +credentials outside your Kubernetes cluster while making them available to your +application + + + + + +The External Secrets Operator (ESO) component runs in each control plane's host +namespace as `external-secrets-controller`. It synchronizes secrets from +external APIs into Kubernetes secrets. Shared secrets allow you to manage +credentials outside your Kubernetes cluster while making them available to your +application + + + +## Configuration + + + +Upbound supports workload-identity configurations in AWS with IAM Roles for +Service Accounts or EKS pod identity association. + +#### IAM Roles for Service Accounts (IRSA) + +With IRSA, you can associate a Kubernetes service account in an EKS cluster with +an AWS IAM role. Upbound authenticates workloads with that service account as +the IAM role using temporary credentials instead of static role credentials. +IRSA relies on AWS `AssumeRoleWithWebIdentity` `STS` to exchange OIDC ID tokens with +the IAM role's temporary credentials. IRSA uses the `eks.amazon.aws/role-arn` +annotation to link the service account and the IAM role. + +**Create an IAM role and trust policy** + +First, create an IAM role with appropriate permissions to access AWS Secrets Manager: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "secretsmanager:GetSecretValue", + "secretsmanager:DescribeSecret", + "ssm:GetParameter" + ], + "Resource": [ + "arn:aws:secretsmanager:${YOUR_REGION}:${YOUR_AWS_ACCOUNT_ID}:secret:${YOUR_SECRET_PREFIX}*", + "arn:aws:ssm:${YOUR_REGION}:${YOUR_AWS_ACCOUNT_ID}:parameter/${YOUR_PARAMETER_PREFIX}*" + ] + } + ] +} +``` + +You must configure the IAM role trust policy with the exact match for each +provisioned control plane. An example of a trust policy for a single control +plane is below: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:oidc-provider/${YOUR_OIDC_PROVIDER}" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + ":aud": "sts.amazonaws.com" + }, + "StringLike": { + ":sub": "system:serviceaccount:*:external-secrets-controller" + } + } + } + ] +} +``` + +**Configure the EKS OIDC provider** + +Next, ensure your EKS cluster has an OIDC identity provider: + +```shell +eksctl utils associate-iam-oidc-provider --cluster ${YOUR_CLUSTER_NAME} --approve +``` + +**Apply the IAM role** + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the shared secrets component: + +```yaml +--set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."eks\.amazonaws\.com/role-arn"="arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_ESO_ROLE_NAME}" +``` + +This command allows the shared secrets component to authenticate with your +dedicated IAM role in your EKS cluster environment. + +#### EKS pod identities + +Upbound also supports EKS Pod Identity configuration. EKS Pod Identities allow +you to create a pod identity association with your Kubernetes namespace, a +service account, and an IAM role, which allows the EKS control plane to +automatically handle the credential exchange. + +**Create an IAM role** + +First, create an IAM role with appropriate permissions to access AWS Secrets Manager: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "secretsmanager:GetSecretValue", + "secretsmanager:DescribeSecret", + "ssm:GetParameter" + ], + "Resource": [ + "arn:aws:secretsmanager:${YOUR_AWS_REGION}:${YOUR_AWS_ACCOUNT_ID}:secret:${YOUR_SECRET_PREFIX}*", + "arn:aws:ssm:${YOUR_AWS_REGION}:${YOUR_AWS_ACCOUNT_ID}:parameter/${YOUR_PARAMETER_PREFIX}*" + ] + } + ] +} +``` + +**Configure your Space with Helm** + +When you install or upgrade your Space with Helm, add the shared secrets value: + +```shell +helm upgrade spaces spaces-helm-chart \ + --set "sharedSecrets.enabled=true" +``` + +**Create a Pod Identity Association** + +After Upbound provisions your control plane, create a Pod Identity Association +with the `aws` CLI: + +```shell +aws eks create-pod-identity-association \ + --cluster-name ${YOUR_CLUSTER_NAME} \ + --namespace ${YOUR_CONTROL_PLANE_NAMESPACE} \ + --service-account external-secrets-controller \ + --role-arn arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_ROLE_NAME} +``` + + + + + +Upbound supports workload-identity configurations in Azure with Azure's built-in +workload identity feature. + +First, enable the OIDC issuer and workload identity in your AKS cluster: + +```shell +az aks update --resource-group ${YOUR_RESOURCE_GROUP} --name ${YOUR_AKS_CLUSTER_NAME} --enable-oidc-issuer --enable-workload-identity +``` + +Next, find and store the OIDC issuer URL as an environment variable: + +```shell +export AKS_OIDC_ISSUER="$(az aks show --name ${YOUR_AKS_CLUSTER_NAME} --resource-group ${YOUR_RESOURCE_GROUP} --query "oidcIssuerProfile.issuerUrl" --output tsv)" +``` + +Create a new managed identity to associate with the shared secrets component: + +```shell +az identity create --name secrets-identity --resource-group ${YOUR_RESOURCE_GROUP} --location ${YOUR_LOCATION} +``` + +Retrieve the client ID and store it as an environment variable: + +```shell +export USER_ASSIGNED_CLIENT_ID="$(az identity show --name secrets-identity --resource-group ${YOUR_RESOURCE_GROUP} --query clientId -otsv)" +``` + +Grant the managed identity you created to access your Azure Storage account: + +```shell +az keyvault set-policy --name ${YOUR_KEY_VAULT_NAME} \ + --resource-group ${YOUR_RESOURCE_GROUP} \ + --object-id $(az identity show --name secrets-identity --resource-group ${YOUR_RESOURCE_GROUP} --query principalId -otsv) \ + --secret-permissions get list +``` + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the shared secrets component: + +```shell +--set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."azure\.workload\.identity/client-id"="${USER_ASSIGNED_CLIENT_ID}" +--set controlPlanes.sharedSecrets.pod.customLabels."azure\.workload\.identity/use"="true" +``` + +Next, create a federated credential to establish trust between the managed identity +and your AKS OIDC provider: + +```shell +az identity federated-credential create \ + --name secrets-federated-identity \ + --identity-name secrets-identity \ + --resource-group ${YOUR_RESOURCE_GROUP} \ + --issuer ${AKS_OIDC_ISSUER} \ + --subject system:serviceaccount:${YOUR_CONTROL_PLANE_NAMESPACE}:external-secrets-controller +``` + + + + + +Upbound supports workload-identity configurations in GCP with IAM principal +identifiers or service account impersonation. + +#### IAM principal identifiers + +IAM principal identifiers allow you to grant permissions directly to +Kubernetes service accounts without additional annotation. Upbound recommends +this approach for ease-of-use and flexibility. + +First, enable Workload Identity Federation on your GKE cluster: + +```shell +gcloud container clusters update ${YOUR_CLUSTER_NAME} \ + --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ + --region=${YOUR_REGION} +``` + +Next, grant the necessary permissions to your Kubernetes service account: + +```shell +gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ + --member="principalSet://iam.googleapis.com/projects/${YOUR_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${YOUR_PROJECT_ID}.svc.id.goog/attribute.kubernetes_namespace/${YOUR_CONTROL_PLANE_NAMESPACE}/attribute.kubernetes_service_account/external-secrets-controller" \ + --role="roles/secretmanager.secretAccessor" +``` + +#### Service account impersonation + +Service account impersonation allows you to link a Kubernetes service account to +a GCP service account. The Kubernetes service account assumes the permissions of +the GCP service account you specify. + +Enable workload id federation on your GKE cluster: + +```shell +gcloud container clusters update ${YOUR_CLUSTER_NAME} \ + --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ + --region=${YOUR_REGION} +``` + +Next, create a dedicated service account for your secrets operations: + +```shell +gcloud iam service-accounts create secrets-sa \ + --project=${YOUR_PROJECT_ID} +``` + +Grant secret access permissions to the service account you created: + +```shell +gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ + --member="serviceAccount:secrets-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" \ + --role="roles/secretmanager.secretAccessor" +``` + +Link the Kubernetes service account to the GCP service account: + +```shell +gcloud iam service-accounts add-iam-policy-binding \ + secrets-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com \ + --role="roles/iam.workloadIdentityUser" \ + --member="serviceAccount:${YOUR_PROJECT_ID}.svc.id.goog[${YOUR_CONTROL_PLANE_NAMESPACE}/external-secrets-controller]" +``` + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the shared secrets component: + +```shell +--set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."iam\.gke\.io/gcp-service-account"="secrets-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" +``` + + + +## Verify your configuration + +After you apply the configuration use `kubectl` to verify the service account +has the correct annotation: + +```shell +kubectl get serviceaccount external-secrets-controller -n ${YOUR_CONTROL_PLANE_NAMESPACE} -o yaml +``` + + + +Verify the `external-secrets` pod is running correctly: + +```shell +kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep external-secrets +``` + + + + + +Verify the External Secrets Operator pod is running correctly: + +```shell +kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep external-secrets +``` + + + + + +Verify the `external-secrets` pod is running correctly: + +```shell +kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep external-secrets +``` + + + +## Restart workload + + + +You must manually restart a workload's pod when you add the +`eks.amazonaws.com/role-arn key` annotation to the running pod's service +account. + +This restart enables the EKS pod identity webhook to inject the necessary +environment for using IRSA. + + + + + +You must manually restart a workload's pod when you add the workload identity annotations to the running pod's service account. + +This restart enables the workload identity webhook to inject the necessary +environment for using Azure workload identity. + + + + + +GCP workload identity doesn't require pod restarts after configuration changes. +If you do need to restart the workload, use the `kubectl` command to force the +component restart: + + + +```shell +kubectl rollout restart deployment external-secrets +``` + +## Use cases + + + + +Shared secrets with workload identity eliminates the need for static credentials +in your cluster. These benefits are particularly helpful in: + +* Secure application credentials management +* Database connection string storage +* API token management +* Compliance with secret rotation security standards +* Multi-environment configuration with centralized secret management + + + + + +Using workload identity authentication for shared secrets eliminates the need for static +credentials in your cluster as well as the overhead of credential rotation. +These benefits are particularly helpful in: + +* Secure application credentials management +* Database connection string storage +* API token management +* Compliance with secret rotation security standards + + + + + +Configuring the external secrets operator with workload identity eliminates the need for +static credentials in your cluster and the overhead of credential rotation. +These benefits are particularly helpful in: + +* Secure application credentials management +* Database connection string storage +* API token management +* Compliance with secret rotation security standards + + + +## Next steps + +Now that you have workload identity configured for the shared secrets component, visit +the [Shared Secrets][eso-guide] guide for more information. + +Other workload identity guides are: +* [Backup and restore][backuprestore] +* [Billing][billing] + +[eso-guide]: /spaces/howtos/secrets-management +[backuprestore]: /spaces/howtos/self-hosted/workload-id/backup-restore-config +[billing]: /spaces/howtos/self-hosted/workload-id/billing-config diff --git a/spaces_versioned_docs/version-v1.11/howtos/simulations.md b/spaces_versioned_docs/version-v1.11/howtos/simulations.md new file mode 100644 index 000000000..26cb0e657 --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/howtos/simulations.md @@ -0,0 +1,110 @@ +--- +title: Simulate changes to your Control Plane Projects +sidebar_position: 100 +description: Use the Up CLI to mock operations before deploying to your environments. +--- + +:::info API Version Information +This guide covers Simulations, available in v1.10+ (GA since v1.13). version-specific availability and features, see the . + +For API specifications on the Simulations CRD, see the [Spaces API Reference](../../../reference/apis/spaces-api/). +::: + +:::important +The Simulations feature is in private preview. more information, [reach out to Upbound][reach-out-to-upbound]. +::: + +Control plane simulations allow you to preview changes to your resources before +applying them to your control planes. Like a plan or dry-run operation, +simulations expose the impact of updates to compositions or claims without +changing your actual resources. + +A control plane simulation creates a temporary copy of your control plane and +returns a preview of the desired changes. The simulation change plan helps you +reduce the risk of unexpected behavior based on your changes. + +## Simulation benefits + +Control planes are dynamic systems that automatically reconcile resources to +match your desired state. Simulations provide visibility into this +reconciliation process by showing: + + +* New resources to create +* Existing resources to change +* Existing resources to delete +* How configuration changes propagate through the system + +These insights are crucial when planning complex changes or upgrading Crossplane +packages. + +## Requirements + +Simulations are available to select customers on Upbound Cloud with Team +Tier or higher. more information, [reach out to Upbound][reach-out-to-upbound-1]. + +## How to simulate your control planes + +Before you start a simulation, build your project and use the `up +project run` command to run your control plane. + +Use the `up project simulate` command with your control plane name to start the +simulation: + +```ini {copy-lines="all"} +up project simulate --complete-after=60s --terminate-on-finish +``` + +The `complete-after` flag determines how long to run the simulation before it completes and calculates the results. Depending on the change, a simulation may not complete within your defined interval leaving unaffected resources as `unchanged`. + +The `terminate-on-finish` flag terminates the simulation after the time +you set - deleting the control plane that ran the simulation. + +At the end of your simulation, your CLI returns: +* A summary of the resources created, modified, or deleted +* Diffs for each resource affected + +## View your simulation in the Upbound Console +You can also view your simulation results in the Upbound Console: + +1. Navigate to your base control plane in the Upbound Console +2. Select the "Simulations" tab in the menu +3. Select a simulation object for a change list of all + resources affected. + +The Console provides visual indications of changes: + +- Created Resources: Marked with green +- Modified Resources: Marked with yellow +- Deleted Resources: Marked with red +- Unchanged Resources: Displayed in gray + +![Upbound Console Simulation](/img/simulations.png) + +## Considerations + +Simulations is a **private preview** feature. + +Be aware of the following limitations: + +- Simulations can't predict the exact behavior of external systems due to the + complexity and non-deterministic reconciliation pattern in Crossplane. + +- The only completion criteria for a simulation is time. Your simulation may not + receive a conclusive result within that interval. Upbound recommends the + default `60s` value. + +- Providers don't run in simulations. Simulations can't compose resources that + rely on the status of Managed Resources. + + +The Upbound team is working to improve these limitations. Your feedback is always appreciated. + +## Next steps + +For more information, follow the [tutorial][tutorial] on Simulations. + + +[tutorial]: /manuals/cli/howtos/simulations +[reach-out-to-upbound]: https://www.upbound.io/contact-us +[reach-out-to-upbound-1]: https://www.upbound.io/contact-us diff --git a/spaces_versioned_docs/version-v1.11/overview/_category_.json b/spaces_versioned_docs/version-v1.11/overview/_category_.json new file mode 100644 index 000000000..54bb16430 --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/overview/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Overview", + "position": 0 +} diff --git a/spaces_versioned_docs/version-v1.11/overview/index.md b/spaces_versioned_docs/version-v1.11/overview/index.md new file mode 100644 index 000000000..7b79f6e44 --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/overview/index.md @@ -0,0 +1,14 @@ +--- +title: Spaces Overview +sidebar_position: 0 +--- + +# Upbound Spaces + +Welcome to the Upbound Spaces documentation. This section contains comprehensive documentation for Spaces API and Spaces operations across all supported versions (v1.9 through v1.15). + +## Get Started + +- **[Concepts](/spaces/concepts/control-planes)** - Core concepts for Spaces +- **[How-To Guides](/spaces/howtos/auto-upgrade)** - Step-by-step guides for operating Spaces +- **[API Reference](/spaces/reference/)** - API specifications and resources diff --git a/spaces_versioned_docs/version-v1.11/reference/_category_.json b/spaces_versioned_docs/version-v1.11/reference/_category_.json new file mode 100644 index 000000000..4a6a139c4 --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/reference/_category_.json @@ -0,0 +1,5 @@ +{ + "label": "Spaces API", + "position": 1, + "collapsed": true +} diff --git a/spaces_versioned_docs/version-v1.11/reference/index.md b/spaces_versioned_docs/version-v1.11/reference/index.md new file mode 100644 index 000000000..5e68b0768 --- /dev/null +++ b/spaces_versioned_docs/version-v1.11/reference/index.md @@ -0,0 +1,72 @@ +--- +title: Spaces API Reference +description: Documentation for the Spaces API resources (v1.15 - Latest) +sidebar_position: 1 +--- +import CrdDocViewer from '@site/src/components/CrdViewer'; + + +This page documents the Custom Resource Definitions (CRDs) for the Spaces API. + + +## Control Planes +### Control Planes + + +## Observability +### Shared Telemetry Configs + + +## `pkg` +### Controller Revisions + + +### Controller Runtime Configs + + +### Controllers + + +### Remote Configuration Revisions + + +### Remote Configurations + + +## Policy +### Shared Upbound Policies + + +## References +### Referenced Objects + + +## Scheduling +### Environments + + +## Secrets +### Shared External Secrets + + +### Shared Secret Stores + + +## Simulations + + +## Spaces Backups +### Backups + + +### Backup Schedules + + +### Shared Backup Configs + + +### Shared Backups + + +### Shared Backup Schedules + diff --git a/spaces_versioned_docs/version-v1.12/concepts/_category_.json b/spaces_versioned_docs/version-v1.12/concepts/_category_.json new file mode 100644 index 000000000..4b8667e29 --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/concepts/_category_.json @@ -0,0 +1,7 @@ +{ + "label": "Concepts", + "position": 2, + "collapsed": true +} + + diff --git a/spaces_versioned_docs/version-v1.12/concepts/control-planes.md b/spaces_versioned_docs/version-v1.12/concepts/control-planes.md new file mode 100644 index 000000000..7066343de --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/concepts/control-planes.md @@ -0,0 +1,227 @@ +--- +title: Control Planes +weight: 1 +description: An overview of control planes in Upbound +--- + + +Control planes in Upbound are fully isolated Crossplane control plane instances that Upbound manages for you. This means: + +- the underlying lifecycle of infrastructure (compute, memory, and storage) required to power your instance. +- scaling of the infrastructure. +- the maintenance of the core Crossplane components that make up a control plane. + +This lets users focus on building their APIs and operating their control planes, while Upbound handles the rest. Each control plane has its own dedicated API server connecting users to their control plane. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). + +For detailed information about Control Plane API specifications and CRD fields, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +## Control plane architecture + +![Managed Control Plane Architecture](/img/mcp.png) + +Along with underlying infrastructure, Upbound manages the Crossplane system components. You don't need to manage the Crossplane API server or core resource controllers because Upbound manages your control plane lifecycle from creation to deletion. + +### Crossplane API + +Each control plane offers a unified endpoint. You interact with your control plane through Kubernetes and Crossplane API calls. Each control plane runs a Kubernetes API server to handle API requests. You can make API calls in the following ways: + +- Direct calls: HTTP/gRPC +- Indirect calls: the up CLI, Kubernetes clients such as kubectl, or the Upbound Console. + +Like in Kubernetes, the API server is the hub for all communication for the control plane. All internal components such as system processes and provider controllers act as clients of the API server. + +Your API requests tell Crossplane your desired state for the resources your control plane manages. Crossplane attempts to constantly maintain that state. Crossplane lets you configure objects in the API either imperatively or declaratively. + +### Crossplane versions and features + +Upbound automatically upgrades Crossplane system components on control planes to new Crossplane versions for updated features and improvements in the open source project. With [automatic upgrades][automatic-upgrades], you choose the cadence that Upbound automatically upgrades the system components in your control plane. You can also choose to manually upgrade your control plane to a different Crossplane version. + +For detailed information on versions and upgrades, refer to the [release notes][release-notes] and the automatic upgrade documentation. If you don't enroll a control plane in a release channel, Upbound doesn't apply automatic upgrades. + +Features considered "alpha" in Crossplane are by default not supported in a control plane unless otherwise specified. + +### Hosting environments + +Every control plane in Upbound belongs to a [control plane group][control-plane-group]. Control plane groups are a logical grouping of one or more control planes with shared objects (such as secrets or backup configuration). Every group resides in a [Space][space] in Upbound, which are hosting environments for control planes. + +Think of a Space as being conceptually the same as an AWS, Azure, or GCP region. Regardless of the Space type you run a control plane in, the core experience is identical. + +## Management + +### Create a control plane + +You can create a new control plane from the Upbound Console, [up CLI][up-cli], or with Kubernetes clients such as `kubectl`. + + + + + +To use the CLI, run the following: + +```shell +up ctp create +``` + +To learn more about control plane-related commands in `up`, go to the [CLI reference][cli-reference] documentation. + + + +You can create and manage control planes declaratively in Upbound. Before you +begin, ensure you're logged into Upbound and set the correct context: + +```bash +up login +# Example: acmeco/upbound-gcp-us-west-1/default +up ctx ${yourOrganization}/${yourSpace}/${yourGroup} +```` + +```yaml +#controlplane-a.yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: controlplane-a +spec: + crossplane: + autoUpgrade: + channel: Rapid +``` + +```bash +kubectl apply -f controlplane-a.yaml +``` + + + + + +### Connect directly to your control plane + +Each control plane offers a unified endpoint. You interact with your control plane through Kubernetes and Crossplane API calls. Each control plane runs a Kubernetes API server to handle API requests. + +You can connect to a control plane's API server directly via the up CLI. Use the [`up ctx`][up-ctx] command to set your kubeconfig's current context to a control plane: + +```shell +# Example: acmeco/upbound-gcp-us-west-1/default/ctp1 +up ctx ${yourOrganization}/${yourSpace}/${yourGroup}/${yourControlPlane} +``` + +To disconnect from your control plane and revert your kubeconfig's current context to the previous entry, run the following: + +```shell +up ctx .. +``` + +You can also generate a `kubeconfig` file for a control plane with [`up ctx -f`][up-ctx-f]. + +```shell +up ctx ${yourOrganization}/${yourSpace}/${yourGroup}/${yourControlPlane} -f - > ctp-kubeconfig.yaml +``` + +:::tip +To learn more about how to use `up ctx` to navigate different contexts in Upbound, read the [CLI documentation][cli-documentation]. +::: + +## Configuration + +When you create a new control plane, Upbound provides you with a fully isolated instance of Crossplane. Configure your control plane by installing packages that extend its capabilities, like to create and manage the lifecycle of new types of infrastructure resources. + +You're encourage to install any available Crossplane package type (Providers, Configurations, Functions) available in the [Upbound Marketplace][upbound-marketplace] on your control planes. + +### Install packages + +Below are a couple ways to install Crossplane packages on your control plane. + + + + + + +Use the `up` CLI to install Crossplane packages from the [Upbound Marketplace][upbound-marketplace-1] on your control planes. Connect directly to your control plane via `up ctx`. Then, to install a provider: + +```shell +up ctp provider install xpkg.upbound.io/upbound/provider-family-aws +``` + +To install a Configuration: + +```shell +up ctp configuration install xpkg.upbound.io/upbound/platform-ref-aws +``` + +To install a Function: + +```shell +up ctp function install xpkg.upbound.io/crossplane-contrib/function-kcl +``` + + +You can use kubectl to directly apply any Crossplane manifest. Below is an example for installing a Crossplane provider: + +```yaml +cat < + + + +For production-grade scenarios, it's recommended you configure your control plane declaratively via Git plus a Continuous Delivery (CD) Engine such as Argo. guidance on this topic, read [GitOps with control planes][gitops-with-control-planes]. + + + + + + +### Configure Crossplane ProviderConfigs + +#### ProviderConfigs with OpenID Connect + +Use OpenID Connect (`OIDC`) to authenticate to Upbound control planes without credentials. OIDC lets your control plane exchange short-lived tokens directly with your cloud provider. Read how to [connect control planes to external services][connect-control-planes-to-external-services] to learn more. + +#### Generic ProviderConfigs + +The Upbound Console doesn't allow direct editing of ProviderConfigs that don't support `Upbound` authentication. To edit these ProviderConfigs on your control plane, connect to the control plane directly by following the instructions in the previous section and using `kubectl`. + +### Configure secrets + +Upbound gives users the ability to configure the synchronization of secrets from external stores into control planes. Configure this capability at the group-level, explained in the [Spaces documentation][spaces-documentation]. + +### Configure backups + +Upbound gives users the ability to configure backup schedules, take impromptu backups, and conduct self-service restore operations. Configure this capability at the group-level, explained in the [Spaces documentation][spaces-documentation-1]. + +### Configure telemetry + + +Upbound gives users the ability to configure the collection of telemetry (logs, metrics, and traces) in their control planes. Using Upbound's built-in [OTEL][otel] support, you can stream this data out to your preferred observability solution. Configure this capability at the group-level, explained in the [Spaces documentation][spaces-documentation-2]. + + + +[automatic-upgrades]: /spaces/howtos/auto-upgrade +[release-notes]: https://github.com/upbound/universal-crossplane/releases +[control-plane-group]: /spaces/concepts/groups +[space]: /spaces/overview +[up-cli]: /reference/cli-reference +[cli-reference]: /reference/cli-reference +[up-ctx]: /reference/cli-reference +[up-ctx-f]: /reference/cli-reference +[cli-documentation]: /manuals/cli/concepts/contexts +[upbound-marketplace]: https://marketplace.upbound.io +[upbound-marketplace-1]: https://marketplace.upbound.io +[gitops-with-control-planes]: /spaces/howtos/cloud-spaces/gitops +[connect-control-planes-to-external-services]: /manuals/platform/howtos/oidc +[spaces-documentation]: /spaces/howtos/secrets-management +[spaces-documentation-1]: /spaces/howtos/backup-and-restore +[otel]: https://otel.com +[spaces-documentation-2]: /spaces/howtos/observability diff --git a/spaces_versioned_docs/version-v1.12/concepts/deployment-modes.md b/spaces_versioned_docs/version-v1.12/concepts/deployment-modes.md new file mode 100644 index 000000000..f5e718f88 --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/concepts/deployment-modes.md @@ -0,0 +1,53 @@ +--- +title: Deployment Modes +sidebar_position: 10 +description: An overview of deployment modes for Spaces +--- + +Upbound Spaces can be deployed and used in a variety of modes: + +- **Cloud Spaces:** Multi-tenant Upbound-hosted, Upbound-managed Space environment. Cloud Spaces provide a typical SaaS experience. +- **[Dedicated Spaces][dedicated-spaces]:** Single-tenant Upbound-hosted, Upbound-managed Space environment. Dedicated Spaces provide a SaaS experience, with additional isolation guarantees that your workloads run in a fully isolated context. +- **[Managed Spaces][managed-spaces]:** Single-tenant customer-hosted, Upbound-managed Space environment. Managed Spaces provide a SaaS-like experience, with additional guarantees of all hosting infrastructure being served from your own cloud account. +- **[Self-Hosted Spaces][self-hosted-spaces]:** Single-tenant customer-hosted, customer-managed Space environment. This is a fully self-hosted, self-managed software experience for using Spaces. Upbound delivers the Spaces software and you run it yourself. + +The Upbound platform uses a federated model to connect each Space back to a +central service called the [Upbound Console][console], which is deployed and +managed by Upbound. + +By default, customers have access to a set of Cloud Spaces. + +## Supported clouds + +You can use host Upbound Spaces on Amazon Web Services (AWS), Microsoft Azure, +and Google Cloud Platform (GCP). Regardless of the hosting platform, you can use +Spaces to deploy control planes that manage the lifecycle of your resources. + +## Supported regions + +This table lists the cloud service provider regions supported by Upbound. + +### GCP + +| Region | Location | +| --- | --- | +| `us-west-1` | Western US (Oregon) +| `us-central-1` | Central US (Iowa) +| `eu-west-3` | Eastern Europe (Frankfurt) + +### AWS + +| Region | Location | +| --- | --- | +| `us-east-1` | Eastern US (Northern Virginia) + +### Azure + +| Region | Location | +| --- | --- | +| `us-east-1` | Eastern US (Iowa) + +[dedicated-spaces]: /spaces/howtos/cloud-spaces/dedicated-spaces-deployment +[managed-spaces]: /spaces/howtos/self-hosted/managed-spaces-deployment +[self-hosted-spaces]: /spaces/howtos/self-hosted/self-hosted-spaces-deployment +[console]: /manuals/console/upbound-console/ diff --git a/spaces_versioned_docs/version-v1.12/concepts/groups.md b/spaces_versioned_docs/version-v1.12/concepts/groups.md new file mode 100644 index 000000000..d2ccacdb3 --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/concepts/groups.md @@ -0,0 +1,115 @@ +--- +title: Control Plane Groups +sidebar_position: 2 +description: An introduction to the Control Plane Groups in Upbound +plan: "enterprise" +--- + + + +In Upbound, Control Plane Groups (or just, 'groups') are a logical grouping of one or more control planes with shared resources like [secrets][secrets] or [backups][backups]. It's a mechanism for isolating these groups of resources within a single [Space][space]. All role-based access control in Upbound happens at the control plane group-level. + +## When to use multiple groups + +You should use groups in environments where there's a need to have Crossplane manage infrastructure across multiple cloud accounts or projects. For users who only need to deploy and manage resources in a couple cloud accounts, you shouldn't need to think about groups at all. + +Groups are a way to divide access in Upbound between multiple teams. Think of a group as being analogous to a Kubernetes _namespace_. + +## The 'default' group + +Every Cloud Space in Upbound has a group named _default_ available. + +## Working with groups + +### View groups + +You can list groups in a Space using: + +```shell +up group list +``` + +If you're operating in a single-tenant Space and have access to the underlying cluster, you can list namespaces that have the group label: + +```shell +kubectl get namespaces -l spaces.upbound.io/group=true +``` + +### Set the group for a request + +Several commands in _up_ have a group context. To set the group for a request, use the `--group` flag: + +```shell +up ctp list --group=team1 +``` +```shell +up ctp create new-ctp --group=team2 +``` + +### Set the group preference + +The _up_ CLI operates upon a single [Upbound context][upbound-context]. Whatever context gets set is then used as the preference for other commands. An Upbound context is capable of pointing at a variety of altitudes: + +1. A Space in Upbound +2. A group within a Space +3. a control plane within a group + +To set the group preference, use `up ctx` to choose a group as your preferred Upbound context. For example: + +```shell +# This sets the context for the up CLI to the default group in an Upbound-managed Cloud Space (gcp-us-west-1) for an organization called 'acmeco' +up ctx acmeco/upbound-gcp-us-west-1/default/ +``` + +### Create a group + +To create a group, login to Upbound and set your context to your desired Space: + +```shell +up login +up ctx '/' +# Example: up ctx acmeco/upbound-gcp-us-west-1 +``` + + +Create a group: + +```shell +up group create my-new-group +``` + +### Delete a group + +To delete a group, login to Upbound and set your context to your desired Space: + +```shell +up login +up ctx '/' +# Example: up ctx acmeco/upbound-gcp-us-west-1 +``` + +Delete a group: + +```shell +up group delete my-new-group +``` + +### Protected groups + +Once a control plane gets created in a group, Upbound enforces a protection policy on the group. Upbound prevents accidental deletion of the group. To delete a group that has control planes in it, you should first delete all control planes in the group. + +## Groups in the context of single-tenant Spaces + +Upbound offers a variety of deployment models to use the product. If you deploy your own single-tenant Upbound Space (whether connected or disconnected), you're self-hosting Upbound software in a Kubernetes cluster. In these environments, a control plane group maps to a corresponding namespace in the cluster which hosts the Space. + +Most Kubernetes clusters come with some set of predefined namespaces. Because a group maps to a corresponding Kubernetes namespace, whenever a group gets created, there too must be a Kubernetes namespace accordingly. When the Spaces software is newly installed, no groups exist. You _can_ elevate a Kubernetes namespace to become a group by doing the following: + +1. Creating a group with the same name as a preexisting Kubernetes namespace +2. Creating a control plane in a preexisting Kubernetes namespace +3. Labeling a Kubernetes namespace with the label `spaces.upbound.io/group=true` + + +[secrets]: /spaces/howtos/secrets-management +[backups]: /spaces/howtos/self-hosted/workload-id/backup-restore-config/ +[space]: /spaces/overview +[upbound-context]: /manuals/cli/concepts/contexts diff --git a/spaces_versioned_docs/version-v1.12/howtos/_category_.json b/spaces_versioned_docs/version-v1.12/howtos/_category_.json new file mode 100644 index 000000000..d3a8547aa --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/_category_.json @@ -0,0 +1,7 @@ +{ + "label": "How-tos", + "position": 3, + "collapsed": true +} + + diff --git a/spaces_versioned_docs/version-v1.12/howtos/api-connector.md b/spaces_versioned_docs/version-v1.12/howtos/api-connector.md new file mode 100644 index 000000000..a14468f52 --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/api-connector.md @@ -0,0 +1,413 @@ +--- +title: API Connector +weight: 90 +description: Connect Kubernetes clusters to remote Crossplane control planes for resource synchronization +aliases: + - /api-connector + - /concepts/api-connector +--- +:::info API Version Information +This guide covers API Connector, a preview feature available in all supported versions (v1.9-v1.14+). + +For related API specifications and available resources, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +:::warning +API Connector is currently in **Preview**. The feature is under active +development and subject to breaking changes. Use for testing and evaluation +purposes only. +::: + +API Connector enables seamless integration between Kubernetes application +clusters consuming APIs and remote Crossplane control planes providing and +reconciling APIs. + +You can use the API Connector to decouple where Crossplane is running (for +example in an Upbound control plane), and where APIs are consumed +(for example in an existing Kubernetes cluster). This gives you flexibility and +consistency in your control plane operations. + + + +Unlike the [Control Plane Connector](ctp-connector.md) which offers only +coarse-grained connectivity between app clusters and a control plane, API +connector offers fine-grained configuration of which APIs get offered along with +multi-cluster connectivity. + +## Architecture overview + +![API Connector Architecture](/img/api-connector.png) + +API Connector uses a **provider-consumer** model: + +- **Provider control plane**: The Upbound control plane that provides APIs and manages infrastructure. +- **Consumer cluster**: Any Kubernetes cluster where its users wants to use APIs provided by the provider control plane, without having to run Crossplane. API connector gets installed in the consumer cluster, and bidirectionally syncs API objects to the provider. + +### Key components + +**Custom Resource Definitions (CRDs)**: + + +- `ClusterConnection`: Establishes a connection from the consumer to the provider cluster. Pulls bindable CRD APIs from the provider into the consumer cluster for use. + +- `ClusterAPIBinding`: Instructs API connector to sync all API objects cluster-wide with a given API group to a given provider cluster. +- `APIBinding`: Namespaced version of `ClusterAPIBinding`. Instructs API connector to sync API objects within a given namespace and with a given API group to a given provider cluster. + + +## Prerequisites + +Before using API Connector, ensure: + +1. **Consumer cluster** has network access to the provider control plane +1. You have an license to use API connector. If you are unsure, [contact Upbound][contact] or your sales representative. + +This guide walks through how to automate connecting your cluster to an Upbound +control plane. You can also manually configure the API Connector. + +## Publishing APIs in the provider cluster + + + + +First, log in to your provider control plane, and choose which CRD APIs you want +to make accessible to the consumer cluster's. API connector only syncs +these "bindable" CRDs. + + + + + + +Use the `up` CLI to login: + +```bash +up login +``` + +Connect to your control plane: + +```bash +up ctx +``` + +Check what CRDs are available: + +```bash +kubectl get crds +``` + + +Label all CRDs you want to publish with the bindable label: + + +```bash +kubectl label crd 'connect.upbound.io/bindable'='true' --overwrite +``` + + + + +Change context to the provider cluster: +```bash +kubectl config set-context +``` + +Check what CRDs are available: +```bash +kubectl get crds +``` + + +Label all CRDs you want to publish with the bindable label + +```bash +kubectl label crd 'connect.upbound.io/bindable'='true' --overwrite +``` + + + +## Installation + + + + +The up CLI provides the simplest installation method with automatic +configuration: + +Make sure the current Kubeconfig context is set to the **provider control plane** +```bash +up ctx + +up controlplane api-connector install --consumer-kubeconfig [OPTIONS] +``` + +The command: +1. creates a Robot account (named ``) in the Upbound Cloud organization ``, +1. Gives the created robot account `admin` permissions to the provider control plane `` +1. Generates a JWT token for the robot account, and stores it in a Kubernetes Secret in the consumer cluster. +1. Installs the API connector Helm chart in the consumer cluster. +1. Creates a `ClusterConnection` object in the consumer cluster, referring to the newly generated Secret, so that API connector can authenticate successfully to the provider control plane. +1. API connector pulls all published CRDs from the previous step into the consumer cluster. + +**Example**: +```bash +up controlplane api-connector install \ + --consumer-kubeconfig ~/.kube/config \ + --consumer-context my-cluster \ + --upbound-token +``` + +This command uses provided token to authenticate with the **Provider control plane** +and create a `ClusterConnection` resource in the **Consumer cluster** to connect to the +**Provider control plane**. + +**Key Options**: +- `--consumer-kubeconfig`: Path to consumer cluster kubeconfig (required) +- `--consumer-context`: Context name for consumer cluster (required) +- `--name`: Custom name for connection resources (optional) +- `--upbound-token`: API token for authentication (optional) +- `--upgrade`: Upgrade existing installation (optional) +- `--version`: Specific version to install (optional) + + + + +For manual installation or custom configurations: + +```bash +helm upgrade --install api-connector oci://xpkg.upbound.io/spaces-artifacts/api-connector \ + --namespace upbound-system \ + --create-namespace \ + --version \ + --set consumerClusterDisplayName= +``` + +### Authentication methods + +API Connector supports two authentication methods: + + + + +For Upbound Spaces integration: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: spaces-secret + namespace: upbound-system +type: Opaque +stringData: + token: + organization: + spacesBaseURL: + controlPlaneGroupName: + controlPlaneName: +``` + + + +For direct cluster access: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: provider-kubeconfig + namespace: upbound-system +type: Opaque +data: + kubeconfig: +``` + + + + +### Connection setup + +Create a `ClusterConnection` to establish connectivity: + + + + +```yaml +apiVersion: connect.upbound.io/v1alpha1 +kind: ClusterConnection +metadata: + name: spaces-connection + namespace: upbound-system +spec: + secretRef: + kind: UpboundRobotToken + name: spaces-secret + namespace: upbound-system + crdManagement: + pullBehavior: Pull +``` + + + + +```yaml +apiVersion: connect.upbound.io/v1alpha1 +kind: ClusterConnection +metadata: + name: provider-connection + namespace: upbound-system +spec: + secretRef: + kind: KubeConfig + name: provider-kubeconfig + namespace: upbound-system + crdManagement: + pullBehavior: Pull +``` + + + + + + + +### Configuration + +Bind APIs to make them available in your consumer cluster: + +```yaml +apiVersion: connect.upbound.io/v1alpha1 +kind: ClusterAPIBinding +metadata: + name: +spec: + connectionRef: + kind: ClusterConnection + name: # Or --name value +``` + + + + +The `ClusterAPIBinding` name must match the **Resource.Group** (name of the CustomResourceDefinition) of the CRD you want to bind. + + + + +## Usage example + +After configuration, you can create API objects (in the consumer cluster) that +will be synchronized to the provider cluster: + +```yaml +apiVersion: nop.example.org/v1alpha1 +kind: NopResource +metadata: + name: my-resource + namespace: default +spec: + coolField: "Synchronized resource" + compositeDeletePolicy: Foreground +``` + +Verify the resource status: + +```bash +kubectl get nopresource my-resource -o yaml + +``` +When the `APIBound=True` condition is present, it means that the API object has +been synced to the provider cluster, and is being reconciled there. Whenever the +API object in the provider cluster gets status updates (for example +`Ready=True`), that status is synced back to the consumer cluster. + +Switch contexts to the provider cluster to see the API object being created: + +```bash +up ctx +# or kubectl config set-context +``` + +```bash +kubectl get nopresource my-resource -o yaml +``` + +Note that in the provider cluster, the API object is labeled with information on +where the API object originates from, and `connect.upbound.io/managed=true`. + +## Monitoring and troubleshooting + +### Check connection status + +```bash +kubectl get clusterconnection +``` + +Expected output: +``` +NAME STATUS MESSAGE +spaces-connection Ready Provider controlplane is available +``` + +### View available APIs + +```bash +kubectl get clusterconnection spaces-connection -o jsonpath='{.status.offeredAPIs[*].name}' +``` + +### Check API binding status + +```bash +kubectl get clusterapibinding +``` + +### Debug resource synchronization + +```bash +kubectl describe +``` + +## Removal + +### Using the up CLI + +```bash +up controlplane api-connector uninstall \ + --consumer-kubeconfig ~/.kube/config \ + --all +``` + +The `--all` flag removes all resources including connections and secrets. +Without the flag, only runtime related resources won't be removed. + +:::note +Uninstall doesn't remove any API objects in the provider control plane. If you +want to clean up all API objects there, delete all API objects from the consumer +cluster before API connector uninstallation, and wait for the objects to get +deleted. +::: + + +### Using Helm + +```bash +helm uninstall api-connector -n upbound-system +``` + +## Limitations + +- **Preview feature**: Subject to breaking changes. Not yet production grade. +- **CRD updates**: CRDs are pulled once but not automatically updated. If multiple Crossplane clusters offer the same CRD API, API changes must be synchronized out of band, for example using a [Crossplane Configuration](https://docs.crossplane.io/latest/packages/). +- **Network requirements**: Consumer cluster must have direct network access to provider cluster. +- **Wide permissions needed in consumer cluster**: Because the API connector doesn't know up front the names of the APIs it needs to reconcile, it currently runs with full "root" privileges in the consumer cluster. + +- **Connector polling**: API Connector checks for drift between the consumer and provider cluster + periodically through polling. The poll interval can be changed with the `pollInterval` Helm value. + + +## Advanced configuration + +### Multiple connections + +You can connect to multiple provider clusters simultaneously by creating multiple `ClusterConnection` resources with different names and configurations. + +[contact]: https://www.upbound.io/contact-us diff --git a/spaces_versioned_docs/version-v1.12/howtos/auto-upgrade.md b/spaces_versioned_docs/version-v1.12/howtos/auto-upgrade.md new file mode 100644 index 000000000..249056fb4 --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/auto-upgrade.md @@ -0,0 +1,131 @@ +--- +title: Automatically upgrade control planes +sidebar_position: 50 +description: How to configure automatic upgrades of Crossplane in a control plane +plan: "standard" +--- + + + +Upbound Spaces can automatically upgrade the version of Upbound Crossplane in managed control plane instances. You can edit the `spec.crossplane.autoUpgrade` field in your `ControlPlane` specification with the available release channels below. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). The `spec.crossplane.autoUpgrade` field has been available since v1.9. + +For ControlPlane API specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +| Channel | Description | Example | +|------------|-----------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------| +| **None** | Disables auto upgrades. | _Uses version specified in `spec.crossplane.version`._ | +| **Patch** | Upgrades to the latest supported patch release. | _Control plane version 1.12.2-up.2 auto upgrades to 1.12.3-up.1 upon release._ | +| **Stable** | Default setting. Upgrades to the latest supported patch release on minor version _N-1_ where N is the latest supported minor version. | _If latest supported minor version is 1.14, auto upgrades to latest patch - 1.13.2-up.3_ | +| **Rapid** | Upgrades to the latest supported patch release on the latest supported minor version. | _If the latest supported minor version is 1.14, auto upgrades to the latest patch of minor version. 1.14 upgrades to 1.14.5-up.1_ | + + +:::warning + +The `Rapid` channel is only recommended for users willing to accept the risk of new features and potentially breaking changes. + +::: + +## Examples + +The specs below are examples of how to edit the `autoUpgrade` channel in your `ControlPlane` specification. + +To run a control plane with the `Rapid` auto upgrade channel, your spec should look like this: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: example-ctp +spec: + crossplane: + autoUpgrade: + channel: Rapid + writeConnectionSecretToRef: + name: kubeconfig-example-ctp +``` + +To run a control plane with a pinned version of Crossplane, specify in the `version` field: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: example-ctp +spec: + crossplane: + version: 1.14.3-up.1 + autoUpgrade: + channel: None + writeConnectionSecretToRef: + name: kubeconfig-example-ctp +``` + +## Supported Crossplane versions + +Spaces supports the three [preceding minor versions][preceding-minor-versions] from the last supported minor version. example, if the last supported minor version is `1.14`, minor versions `1.13` and `1.12` are also supported. Versions older than the three most recent minor versions aren't supported. Only supported Crossplane versions are valid specifications for new control planes. + +Current Crossplane version support by Spaces version: + +| Spaces Version | Crossplane Version Min | Crossplane Version Max | +|:--------------:|:----------------------:|:----------------------:| +| 1.2 | 1.13 | 1.15 | +| 1.3 | 1.13 | 1.15 | +| 1.4 | 1.14 | 1.16 | +| 1.5 | 1.14 | 1.16 | +| 1.6 | 1.14 | 1.16 | +| 1.7 | 1.14 | 1.16 | +| 1.8 | 1.15 | 1.17 | +| 1.9 | 1.16 | 1.18 | +| 1.10 | 1.16 | 1.18 | +| 1.11 | 1.16 | 1.18 | +| 1.12 | 1.17 | 1.19 | + + +Upbound offers extended support for all installed Crossplane versions released within a 12 month window since the last Spaces release. Contact your Upbound sales representative for more information on version support. + + +:::warning + +If the auto upgrade channel is `Stable` or `Rapid`, the Crossplane version will always stay within the support window after auto upgrade. If set to `Patch` or `None`, the minor version may be outside the support window. You are responsible for upgrading to a supported version + +::: + +To view the support status of a control plane instance, use `kubectl get ctp`. + +```bash +kubectl get ctp +NAME CROSSPLANE VERSION SUPPORTED READY MESSAGE AGE +example-ctp 1.13.2-up.3 True True 31m + +``` + +Unsupported versions return `SUPPORTED: False`. + +```bash +kubectl get ctp +NAME CROSSPLANE VERSION SUPPORTED READY MESSAGE AGE +example-ctp 1.11.5-up.1 False True 31m + +``` + +For more information, use the `-o yaml` flag to return more information. + +```bash +kubectl get controlplanes.spaces.upbound.io example-ctp -o yaml +status: +conditions: +... +- lastTransitionTime: "2024-01-23T06:36:10Z" + message: Crossplane version 1.11.5-up.1 is outside of the support window. + Oldest supported minor version is 1.12. + reason: UnsupportedCrossplaneVersion + status: "False" + type: Supported +``` + + +[preceding-minor-versions]: /reference/usage/lifecycle/#maintenance-and-updates diff --git a/spaces_versioned_docs/version-v1.12/howtos/automation-and-gitops/_category_.json b/spaces_versioned_docs/version-v1.12/howtos/automation-and-gitops/_category_.json new file mode 100644 index 000000000..b65481af6 --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/automation-and-gitops/_category_.json @@ -0,0 +1,8 @@ +{ + "label": "Automation & GitOps", + "position": 11, + "collapsed": true, + "customProps": { + "plan": "business" + } +} diff --git a/spaces_versioned_docs/version-v1.12/howtos/automation-and-gitops/overview.md b/spaces_versioned_docs/version-v1.12/howtos/automation-and-gitops/overview.md new file mode 100644 index 000000000..57eeb15fc --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/automation-and-gitops/overview.md @@ -0,0 +1,138 @@ +--- +title: Automation and GitOps Overview +sidebar_label: Overview +sidebar_position: 1 +description: Guide to automating control plane deployments with GitOps and Argo CD +plan: "business" +--- + +Automating control plane deployments with GitOps enables declarative, version-controlled infrastructure management. This section covers integrating GitOps workflows with Upbound control planes using Argo CD and related tools. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). GitOps patterns are compatible across all versions of Upbound Spaces. + +For details on deployment models and their differences, see the [Deployment Modes](../../concepts/deployment-modes.md) concept guide. version-specific features, see the . +::: + +## What is GitOps? + +GitOps is an approach for managing infrastructure by: +- **Declaratively describing** desired system state in Git +- **Using controllers** to continuously reconcile actual state with desired state +- **Treating Git as the source of truth** for all configuration and deployments + +Upbound control planes are fully compatible with GitOps patterns and we strongly recommend integrating GitOps in the platforms you build on Upbound. + +## Key Concepts + +### Argo CD +[Argo CD](https://argo-cd.readthedocs.io/) is a popular Kubernetes-native GitOps controller. It continuously monitors Git repositories and automatically applies changes to your infrastructure when commits are detected. + +### Deployment Models + +The way you configure GitOps depends on your deployment model: + +| Aspect | Cloud Spaces | Self-Hosted Spaces | +|--------|--------------|-------------------| +| **Access Method** | Upbound API with tokens | Kubernetes native (secrets/kubeconfig) | +| **Configuration** | Kubeconfig via `up` CLI | Control plane connection secrets | +| **Setup Complexity** | More involved (API integration) | Simpler (native Kubernetes) | +| **Typical Use Case** | Managing Upbound resources | Managing workloads on control planes | + +## Getting Started + +**Choose your path based on your deployment model:** + +###. Cloud Spaces +If you're using Upbound Cloud Spaces (Dedicated or Managed): +1. Start with [GitOps with Upbound Control Planes](../cloud-spaces/gitops-on-upbound.md) +2. Learn how to integrate Argo CD with Cloud Spaces +3. Manage both control plane infrastructure and Upbound resources declaratively + +###. Self-Hosted Spaces +If you're running self-hosted Spaces: +1. Start with [GitOps with ArgoCD in Self-Hosted Spaces](../self-hosted/gitops-with-argocd.md) +2. Learn how to configure control plane connection secrets +3. Manage workloads deployed to your control planes + +## Common Workflows + +### Workflow 1: Managing Control Planes with GitOps +Create and manage control planes themselves declaratively using provider-kubernetes: + +```yaml +apiVersion: kubernetes.crossplane.io/v1alpha2 +kind: Object +metadata: + name: my-controlplane +spec: + forProvider: + manifest: + apiVersion: spaces.upbound.io/v1beta1 + kind: ControlPlane + # ... control plane configuration +``` + +### Workflow 2: Managing Workloads on Control Planes +Deploy applications and resources to control planes using standard Kubernetes GitOps patterns: + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + name: my-app +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-app + namespace: my-app +# ... deployment configuration +``` + +### Workflow 3: Managing Upbound Resources +Use provider-upbound to manage Upbound IAM and repository resources: + +- Teams +- Robots and their team memberships +- Repositories and permissions + +## Advanced Topics + +### Argo CD Plugin for Upbound +Learn more in the [ArgoCD Plugin guide](../self-hosted/use-argo.md) for enhanced integration with self-hosted Spaces. + +### Declarative Control Plane Creation +See [Declaratively create control planes](../self-hosted/declarative-ctps.md) for advanced automation patterns. + +### Consuming Control Plane APIs +Understand how to [consume control plane APIs in your app cluster](../mcp-connector-guide.md) with Argo CD. + +## Prerequisites + +Before implementing GitOps with control planes, ensure you have: + +**For Cloud Spaces:** +- Access to Upbound Cloud Spaces +- `up` CLI installed and configured +- API token with appropriate permissions +- Argo CD or similar GitOps controller running +- Familiarity with Kubernetes RBAC + +**For Self-Hosted Spaces:** +- Self-hosted Spaces deployed and running +- Argo CD deployed in your infrastructure +- Kubectl access to the cluster hosting Spaces +- Understanding of control plane architecture + +## Next Steps + +1. **Choose your deployment model** above +2. **Review the relevant getting started guide** +3. **Set up your GitOps controller** (Argo CD) +4. **Deploy your first automated control plane** +5. **Explore advanced topics** as needed + +:::tip +Start with simple deployments to test your GitOps workflow before moving to production. Use [simulations](../simulations.md) to preview changes before applying them. +::: diff --git a/spaces_versioned_docs/version-v1.12/howtos/backup-and-restore.md b/spaces_versioned_docs/version-v1.12/howtos/backup-and-restore.md new file mode 100644 index 000000000..3b8d026cb --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/backup-and-restore.md @@ -0,0 +1,530 @@ +--- +title: Backup and restore +sidebar_position: 13 +description: Configure and manage backups in your Upbound Space. +plan: "enterprise" +--- + + + +Upbound's _Shared Backups_ is a built-in backup and restore feature. Shared Backups lets you configure automatic schedules for taking snapshots of your control planes. You can restore data from these backups by making new control planes. This guide explains how to use Shared Backups for disaster recovery or upgrade scenarios. + +:::info API Version Information & Available Versions +This guide applies to **all supported versions** (v1.9-v1.15+). + +**Select your API version**: +- [Spaces API v1.15 (Latest)](../../../next/reference/apis/spaces-api/) +- [Spaces API v1.14](../../../1.14/reference/apis/spaces-api/) +- [Spaces API v1.13](../../../1.13/reference/apis/spaces-api/) +- [Spaces API v1.12](../../../1.12/reference/apis/spaces-api/) +- [Spaces API v1.11](../../../1.11/reference/apis/spaces-api/) +- [Spaces API v1.10](../../../1.10/reference/apis/spaces-api/) +- [Spaces API v1.9](../../../1.9/reference/apis/spaces-api/) + +For version support policy, see . version compatibility details, see the . +::: + +## Benefits + +The Shared Backups feature provides the following benefits: + +* Automatic backups for control planes without any operational overhead +* Backup schedules for multiple control planes in a group +* Shared Backups are available across all hosting environments of Upbound (Disconnected, Connected or Cloud Spaces) + + +## Configure a Shared Backup Config + + +[SharedBackupConfig][sharedbackupconfig] is a [group-scoped][group-scoped] resource. You should create them in a group containing one or more control planes. This resource configures the storage details and provider. Whenever a backup executes (either by schedule or manually initiated), it references a SharedBackupConfig to tell it where store the snapshot. + + +### Backup config provider + + +The `spec.objectStorage.provider` and `spec.objectStorage.config` fields configures: + +* The object storage provider +* The path to the provider +* The credentials needed to communicate with the provider + +You can only set one provider. Upbound currently supports AWS, Azure, and GCP as providers. + + +`spec.objectStorage.config` is a freeform map of configuration options for the object storage provider. See [Thanos object storage][thanos-object-storage] for more information on the formats for each supported cloud provider. `spec.bucket` and `spec.provider` overrides the required values in the config. + + + +#### AWS as a storage provider + +:::important +For Cloud Spaces, static credentials are currently the only supported auth method. +::: + +This example demonstrates how to use AWS as a storage provider for your backups: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupConfig +metadata: + name: default + namespace: default +spec: + objectStorage: + provider: AWS + bucket: spaces-backup-bucket + config: + endpoint: s3.eu-west-2.amazonaws.com + region: eu-west-2 + credentials: + source: Secret + secretRef: + name: bucket-creds + key: creds +``` + + +This example assumes you've already created an S3 bucket called "spaces-backup-bucket" in AWS `eu-west-2` region. The account credentials to access the bucket should exist in a secret of the same namespace as the Shared Backup Config. + +#### Azure as a storage provider + +:::important +For Cloud Spaces, static credentials are currently the only supported auth method. +::: + +This example demonstrates how to use Azure as a storage provider for your backups: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupConfig +metadata: + name: default + namespace: default +spec: + objectStorage: + provider: Azure + bucket: upbound-backups + config: + storage_account: upbackupstore + container: upbound-backups + endpoint: blob.core.windows.net + credentials: + source: Secret + secretRef: + name: bucket-creds + key: creds +``` + + +This example assumes you've already created an Azure storage account called `upbackupstore` and blob `upbound-backups`. The storage account key to access the blob should exist in a secret of the same namespace as the Shared Backup Config. + + +#### GCP as a storage provider + +:::important +For Cloud Spaces, static credentials are currently the only supported auth method. +::: + +This example demonstrates how to use Google Cloud Storage as a storage provider for your backups: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupConfig +metadata: + name: default + namespace: default +spec: + objectStorage: + provider: GCP + bucket: spaces-backup-bucket + credentials: + source: Secret + secretRef: + name: bucket-creds + key: creds +``` + + +This example assumes you've already created a Cloud bucket called "spaces-backup-bucket" and a service account with access to this bucket. The key file should exist in a secret of the same namespace as the Shared Backup Config. + + +## Configure a Shared Backup Schedule + + +[SharedBackupSchedule][sharedbackupschedule] is a [group-scoped][group-scoped-1] resource. You should create them in a group containing one or more control planes. This resource defines a backup schedule for control planes within its corresponding group. + +Below is an example of a Shared Backup Schedule that takes backups every day of all control planes having `environment: production` labels: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: daily-schedule + namespace: default +spec: + schedule: "@daily" + configRef: + kind: SharedBackupConfig + name: default + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +### Define a schedule + +The `spec.schedule` field is a [Cron-formatted][cron-formatted] string. Some common examples are below: + + +| Entry | Description | +| ----------------- | ------------------------------------------------------------------------------------------------- | +| `@hourly` | Run once an hour. | +| `@daily` | Run once a day. | +| `@weekly` | Run once a week. | +| `0 0/4 * * *` | Run every 4 hours. | +| `0/15 * * * 1-5` | Run every fifteenth minute on Monday through Friday. | +| `@every 1h30m10s` | Run every 1 hour, 30 minutes, and 10 seconds. Hour is the largest measurement of time for @every. | + + +### Exclude resources from the backup + +The `spec.excludedResources` field is an array of resource names to exclude from each backup. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: daily-schedule +spec: + excludedResources: + - "xclusters.aws.platformref.upbound.io" + - "xdatabase.aws.platformref.upbound.io" + - "xrolepolicyattachment.iam.aws.crossplane.io" +``` + +:::warning +You must specify resource names in lowercase "resource.group" format (for example, `xclusters.aws.platformref.upbound.io`). Using only the resource kind (for example, `XCluster`) isn't supported. +::: + +### Suspend a schedule + +Use `spec.suspend` field to suspend the schedule. It creates no new backups, but allows running backups to complete. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: daily-schedule +spec: + suspend: true +``` + +### Set the time to live + +Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: daily-schedule +spec: + ttl: 168h # Backup is garbage collected after 7 days +``` +:::tip +By default, this setting doesn't delete uploaded files. Review the next section to define +the deletion policy. +::: + +### Define the deletion policy + +Set the `spec.deletionPolicy` to define backup deletion actions, including the +deletion of the backup file from the bucket. The Deletion Policy value defaults +to `Orphan`. Set it to `Delete` to remove uploaded files in the bucket. more +information on the backup and restore process, review the [Spaces API +documentation][spaces-api-documentation]. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: daily-schedule +spec: + ttl: 168h # Backup is garbage collected after 7 days + deletionPolicy: Delete # Defaults to Orphan +``` + +### Garbage collect backups when the schedule gets deleted + +Set the `spec.useOwnerReferencesInBackup` to garbage collect associated backups when a shared schedule gets deleted. If set to true, backups are garbage collected when the schedule gets deleted. + +### Control plane selection + +To configure which control planes in a group you want to create a backup schedule for, use the `spec.controlPlaneSelector` field. You can either use `labelSelectors` or the `names` of a control plane directly. A control plane matches if any of the label selectors match. + +This example matches all control planes in the group that have `environment: production` as a label: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: my-backup-schedule +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +You can use the more complex `matchExpressions` to match labels based on an expression. This example matches control planes that have label `environment: production` or `environment: staging`: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: my-backup-schedule +spec: + controlPlaneSelector: + labelSelectors: + - matchExpressions: + - { key: environment, operator: In, values: [production,staging] } +``` + +You can also specify the names of control planes directly: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: my-backup-schedule +spec: + controlPlaneSelector: + names: + - controlplane-dev + - controlplane-staging + - controlplane-prod +``` + + +## Configure a Shared Backup + + + +[SharedBackup][sharedbackup] is a [group-scoped][group-scoped-2] resource. You should create them in a group containing one or more control planes. This resource causes a backups to occur for control planes within its corresponding group. + +Below is an example of a Shared Backup that takes a backup of all control planes having `environment: production` labels: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup + namespace: default +spec: + configRef: + kind: SharedBackupConfig + name: default + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +### Exclude resources from the backup + +The `spec.excludedResources` field is an array of resource names to exclude from each backup. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup +spec: + excludedResources: + - "xclusters.aws.platformref.upbound.io" + - "xdatabase.aws.platformref.upbound.io" + - "xrolepolicyattachment.iam.aws.crossplane.io" +``` + +:::warning +You must specify resource names in lowercase "resource.group" format (for example, `xclusters.aws.platformref.upbound.io`). Using only the resource kind (for example, `XCluster`) isn't supported. +::: + +### Set the time to live + +Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup +spec: + ttl: 168h # Backup is garbage collected after 7 days +``` + + +### Garbage collect backups on Shared Backup deletion + + + +Set the `spec.useOwnerReferencesInBackup` to define whether to garbage collect associated backups when a shared backup gets deleted. If set to true, backups are garbage collected when the shared backup gets deleted. + +### Control plane selection + +To configure which control planes in a group you want to create a backup for, use the `spec.controlPlaneSelector` field. You can either use `labelSelectors` or the `names` of a control plane directly. A control plane matches if any of the label selectors match. + +This example matches all control planes in the group that have `environment: production` as a label: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +You can use the more complex `matchExpressions` to match labels based on an expression. This example matches control planes that have label `environment: production` or `environment: staging`: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup +spec: + controlPlaneSelector: + labelSelectors: + - matchExpressions: + - { key: environment, operator: In, values: [production,staging] } +``` + +You can also specify the names of control planes directly: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup +spec: + controlPlaneSelector: + names: + - controlplane-dev + - controlplane-staging + - controlplane-prod +``` + +## Create a manual backup + +[Backup][backup] is a [group-scoped][group-scoped-3] resource that causes a single backup to occur for a control planes in its corresponding group. + +Below is an example of a manual Backup of a control plane: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: Backup +metadata: + name: my-backup + namespace: default +spec: + configRef: + kind: SharedBackupConfig + name: default + controlPlane: my-awesome-ctp + deletionPolicy: Delete +``` + +The backup specification `DeletionPolicy` defines backup deletion actions, +including the deletion of the backup file from the bucket. The `Deletion Policy` +value defaults to `Orphan`. Set it to `Delete` to remove uploaded files +in the bucket. +For more information on the backup and restore process, review the [Spaces API documentation][spaces-api-documentation-1]. + + +### Choose a control plane to backup + +The `spec.controlPlane` field defines which control plane to execute a backup against. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: Backup +metadata: + name: my-backup + namespace: default +spec: + controlPlane: my-awesome-ctp +``` + +If the control plane doesn't exist, the backup fails after multiple failed retry attempts. + +### Exclude resources from the backup + +The `spec.excludedResources` field is an array of resource names to exclude from the manual backup. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: Backup +metadata: + name: my-backup +spec: + excludedResources: + - "xclusters.aws.platformref.upbound.io" + - "xdatabase.aws.platformref.upbound.io" + - "xrolepolicyattachment.iam.aws.crossplane.io" +``` + +:::warning +You must specify resource names in lowercase "resource.group" format (for example, `xclusters.aws.platformref.upbound.io`). Using only the resource kind (for example, `XCluster`) isn't supported. +::: + +### Set the time to live + +Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: Backup +metadata: + name: my-backup +spec: + ttl: 168h # Backup is garbage collected after 7 days +``` + +## Restore a control plane from a backup + +You can restore a control plane's state from a backup. Below is an example of creating a new control plane from a previous backup called `restore-me`: + + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: my-awesome-restored-ctp + namespace: default +spec: + restore: + source: + kind: Backup + name: restore-me +``` + + +[group-scoped]: /spaces/concepts/groups +[group-scoped-1]: /spaces/concepts/groups +[group-scoped-2]: /spaces/concepts/groups +[group-scoped-3]: /spaces/concepts/groups +[sharedbackupconfig]: /reference/apis/spaces-api/latest +[thanos-object-storage]: https://thanos.io/tip/thanos/storage.md/ +[sharedbackupschedule]: /reference/apis/spaces-api/latest +[cron-formatted]: https://en.wikipedia.org/wiki/Cron +[spaces-api-documentation]: /reference/apis/spaces-api/v1_9 +[sharedbackup]: /reference/apis/spaces-api/latest +[backup]: /reference/apis/spaces-api/latest +[spaces-api-documentation-1]: /reference/apis/spaces-api/v1_9 + + + diff --git a/spaces_versioned_docs/version-v1.12/howtos/cloud-spaces/_category_.json b/spaces_versioned_docs/version-v1.12/howtos/cloud-spaces/_category_.json new file mode 100644 index 000000000..1e1869a38 --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/cloud-spaces/_category_.json @@ -0,0 +1,10 @@ +{ + "label": "Cloud Spaces", + "position": 1, + "collapsed": true, + "customProps": { + "plan": "standard" + } +} + + diff --git a/spaces_versioned_docs/version-v1.12/howtos/cloud-spaces/dedicated-spaces-deployment.md b/spaces_versioned_docs/version-v1.12/howtos/cloud-spaces/dedicated-spaces-deployment.md new file mode 100644 index 000000000..ebad9493e --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/cloud-spaces/dedicated-spaces-deployment.md @@ -0,0 +1,33 @@ +--- +title: Dedicated Spaces +sidebar_position: 4 +description: A guide to Upbound Dedicated Spaces +plan: business +--- + + +## Benefits + +Dedicated Spaces offer the following benefits: + +- **Single-tenancy** A control plane space where Upbound guarantees you're the only tenant operating in the environment. +- **Connectivity to your private network** Establish secure network connections between your Dedicated Cloud Space running in Upbound and your own resources behind your private network. +- **Reduced Overhead.** Offload day-to-day operational burdens to Upbound while focusing on your job of building your platform. + +## Architecture + +A Dedicated Space is a deployment of the Upbound Spaces software inside an +Upbound-controlled cloud account and network. The control planes you run. + +The diagram below illustrates the high-level architecture of Upbound Dedicated Spaces: + +![Upbound Managed Spaces arch](/img/managed-arch-gcp.png) + +## How to get access to Dedicated Spaces + +If you have an interest in Upbound Dedicated Spaces, contact +[Upbound][contact-us]. We can chat more about your +requirements and see if Dedicated Spaces are a good fit for you. + +[contact-us]: https://www.upbound.io/contact-us +[managed-space]: /spaces/howtos/self-hosted/managed-spaces-deployment diff --git a/spaces_versioned_docs/version-v1.12/howtos/cloud-spaces/gitops-on-upbound.md b/spaces_versioned_docs/version-v1.12/howtos/cloud-spaces/gitops-on-upbound.md new file mode 100644 index 000000000..fa59a8dce --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/cloud-spaces/gitops-on-upbound.md @@ -0,0 +1,318 @@ +--- +title: GitOps with Upbound Control Planes +sidebar_position: 80 +description: An introduction to doing GitOps with control planes on Upbound Cloud Spaces +tier: "business" +--- + +:::info Deployment Model +This guide applies to **Upbound Cloud Spaces** (Dedicated and Managed Spaces). For self-hosted Spaces deployments, see [GitOps with ArgoCD in Self-Hosted Spaces](/spaces/howtos/self-hosted/gitops-with-argocd/). +::: + +GitOps is an approach for managing a system by declaratively describing desired resources' configurations in Git and using controllers to realize the desired state. Upbound's control planes are compatible with this pattern and it's strongly recommended you integrate GitOps in the platforms you build on Upbound. + + +## Integrate with Argo CD + + +[Argo CD][argo-cd] is a project in the Kubernetes ecosystem commonly used for GitOps. You can use it in tandem with Upbound control planes to achieve GitOps flows. The sections below explain how to integrate these tools with Upbound. + +### Generate a kubeconfig for your control plane + +Use the up CLI to [generate a kubeconfig][generate-a-kubeconfig] for your control plane. + +```bash +up ctx /// -f - > context.yaml +``` + +### Create an API token + + +You need a personal access token (PAT). You create PATs on a per-user basis in the Upbound Console. Go to [My Account - API tokens][my-account-api-tokens] and select Create New Token. Give the token a name and save the secret value to somewhere safe. + + +### Add the up CLI init container to Argo + +Create a new file called `up-plugin-values.yaml` and paste the following YAML: + +```yaml +controller: + volumes: + - name: up-plugin + emptyDir: {} + - name: up-home + emptyDir: {} + + volumeMounts: + - name: up-plugin + mountPath: /usr/local/bin/up + subPath: up + - name: up-home + mountPath: /home/argocd/.up + + initContainers: + - name: up-plugin + image: xpkg.upbound.io/upbound/up-cli:v0.39.0 + command: ["cp"] + args: + - /usr/local/bin/up + - /plugin/up + volumeMounts: + - name: up-plugin + mountPath: /plugin + +server: + volumes: + - name: up-plugin + emptyDir: {} + - name: up-home + emptyDir: {} + + volumeMounts: + - name: up-plugin + mountPath: /usr/local/bin/up + subPath: up + - name: up-home + mountPath: /home/argocd/.up + + initContainers: + - name: up-plugin + image: xpkg.upbound.io/upbound/up-cli:v0.39.0 + command: ["cp"] + args: + - /usr/local/bin/up + - /plugin/up + volumeMounts: + - name: up-plugin + mountPath: /plugin +``` + +### Install or upgrade Argo using the values file + +Install or upgrade Argo via Helm, including the values from the `up-plugin-values.yaml` file: + +```bash +helm upgrade --install -n argocd -f up-plugin-values.yaml --reuse-values argocd argo/argo-cd +``` + + +### Configure Argo CD + + +To configure Argo CD for Annotation resource tracking, edit the Argo CD ConfigMap in the Argo CD namespace. +Add `application.resourceTrackingMethod: annotation` to the data section as below. +This configuration turns off Argo CD auto pruning, preventing the deletion of Crossplane resources. + +Next, configure the [auto respect RBAC for the Argo CD controller][auto-respect-rbac-for-the-argo-cd-controller]. +By default, Argo CD attempts to discover some Kubernetes resource types that don't exist in a control plane. +You must configure Argo CD to respect the cluster's RBAC rules so that Argo CD can sync. +Add `resource.respectRBAC: normal` to the data section as below. + +```bash +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-cm +data: + ... + application.resourceTrackingMethod: annotation + resource.respectRBAC: normal +``` + +:::tip +The `resource.respectRBAC` configuration above tells Argo to respect RBAC for _all_ cluster contexts. If you're using an Argo CD instance to manage more than only control planes, you should consider changing the `clusters` string match for the configuration to apply only to control planes. For example, if every control plane context name followed the convention of being named `controlplane-`, you could set the string match to be `controlplane-*` +::: + + +### Create a cluster context definition + + +Replace the variables and run the following script to configure a new Argo cluster context definition. + +To configure Argo for a control plane in a Connected Space, replace `stringData.server` with the ingress URL of the control plane. This URL is what's outputted when using `up ctx`. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: my-control-plane + namespace: argocd + labels: + argocd.argoproj.io/secret-type: cluster +type: Opaque +stringData: + name: my-control-plane-context + server: https://.spaces.upbound.io/apis/spaces.upbound.io/v1beta1/namespaces//controlplanes//k8s + config: | + { + "execProviderConfig": { + "apiVersion": "client.authentication.k8s.io/v1", + "command": "up", + "args": [ "org", "token" ], + "env": { + "ORGANIZATION": "", + "UP_TOKEN": "" + } + }, + "tlsClientConfig": { + "insecure": false, + "caData": "" + } + } +``` + + +## GitOps for Upbound resources + + +Like any other cloud service, you can drive the lifecycle of Upbound Cloud resources with Crossplane. This lets you establish GitOps flows to declaratively create and manage: + +- [control plane groups][control-plane-groups] +- [control planes][control-planes] +- [Upbound IAM resources][upbound-iam-resources] + +Use a control plane installed with [provider-upbound][provider-upbound] and [provider-kubernetes][provider-kubernetes] to achieve this. + +### Provider-upbound + +[Provider-upbound][provider-upbound-2] is a Crossplane provider built by Upbound to interact with Upbound resources. use _provider-upbound_ to declaratively create and manage the lifecycle of IAM resources and repositories: + +- [Robots][robots] and their membership to teams +- [Teams][teams] +- [Repositories][repositories] and [permissions][permissions] on those repositories. + +:::tip +This provider defines managed resources for control planes, their auth, and permissions. These resources only applicable for customers who run in Upbound's **Legacy Spaces** control plane hosting environments. Customers should use provider-kubernetes explained below to manage the lifecycle of control planes with Crossplane. +::: + +### Provider-kubernetes + +[Provider-kubernetes][provider-kubernetes-3] is a Crossplane provider that defines an [Object][object] resource. Use _Objects_ as general-purpose resources to wrap _any_ Kubernetes resource for Crossplane to manage. + +Upbound [Space APIs][space-apis] are Kube-like APIs and have implemented support for most Kubernetes-style API concepts. You can use kubectl or any other Kubernetes-compatible tooling to interact with the API. This means you can use _provider-kubernetes_ to drive interactions with Space APIs. + +:::warning +When interacting with a Cloud Space's API, the Kubernetes [watch][watch] feature **isn't implemented.** Argo CD requires _watch_ support to function as expected, meaning you can't point Argo directly at a Cloud Space until it's implemented. +::: + +Use _provider-kubernetes_ to declaratively drive interactions with all [Space APIs][space-apis-1]. Wrap the desired API resource in an _Object_. See the example below for a control plane: + +```yaml +apiVersion: kubernetes.crossplane.io/v1alpha2 +kind: Object +metadata: + name: my-controlplane +spec: + forProvider: + manifest: + apiVersion: spaces.upbound.io/v1beta1 + kind: ControlPlane + metadata: + name: my-controlplane + namespace: default + spec: + crossplane: + autoUpgrade: + channel: Rapid +``` + +[Control plane groups][control-plane-groups-2] are a special case because they technically map to an underlying Kubernetes namespace. You should create a `kind: namespace` with the `spaces.upbound.io/group` label to create a control plane group in a Space. See the example below: + +```yaml +apiVersion: kubernetes.crossplane.io/v1alpha2 +kind: Object +metadata: + name: group1 +spec: + forProvider: + manifest: + apiVersion: v1 + kind: Namespace + metadata: + name: group1 + labels: + spaces.upbound.io/group: "true" + spec: {} +``` + +### Configure auth for provider-kubernetes + +Like any other Crossplane provider, _provider-kubernetes_ requires a valid [ProviderConfig][providerconfig] to authenticate with Upbound before interacting with its APIs. Follow the steps below to configure auth for a ProviderConfig on a control plane that you want to use to interact with Upbound resources. + +1. Define an environment variable for the name of your Upbound org account. Use `up org list` to retrieve this value. +```ini +export UPBOUND_ACCOUNT="" +``` + +2. Create a [personal access token][personal-access-token] and store it as an environment variable. +```shell +export UPBOUND_TOKEN="" +``` + +3. Log on to Upbound. +```shell +up login +``` + +4. Create a kubeconfig for the desired Cloud Space instance you want to interact with. +```shell +export CONTROLPLANE_CONFIG=/tmp/controlplane-kubeconfig +KUBECONFIG=$CONTROLPLANE_CONFIG up ctx $UPBOUND_ACCOUNT/upbound-gcp-us-west-1 # Replace this path with whichever Cloud Space you want to communicate with. +``` + +5. On the control plane you want to use to interact with Upbound resources, create a secret containing the credentials: +```shell +kubectl -n crossplane-system create secret generic cluster-config --from-file=kubeconfig=$CONTROLPLANE_CONFIG +kubectl -n crossplane-system create secret generic upbound-credentials --from-literal=token=$UPBOUND_TOKEN +``` + +6. Create a ProviderConfig that references the credentials created in the prior step. Create this resource in your control plane: +```yaml +apiVersion: kubernetes.crossplane.io/v1alpha1 +kind: ProviderConfig +metadata: + name: default +spec: + credentials: + source: Secret + secretRef: + namespace: crossplane-system + name: cluster-config + key: kubeconfig + identity: + type: UpboundTokens + source: Secret + secretRef: + name: upbound-credentials + namespace: crossplane-system + key: token +``` + +You can now create _Objects_ in the control plane which wrap Space APIs. + +[generate-a-kubeconfig]: /manuals/cli/concepts/contexts +[control-plane-groups]: /spaces/concepts/groups +[control-planes]: /spaces/concepts/control-planes +[upbound-iam-resources]: /manuals/platform/concepts/identity-management +[space-apis]: /reference/apis/spaces-api/v1_9 +[space-apis-1]: /reference/apis/spaces-api/v1_9 +[control-plane-groups-2]: /spaces/concepts/groups + + +[argo-cd]: https://argo-cd.readthedocs.io/en/stable/ +[my-account-api-tokens]: https://accounts.upbound.io/settings/tokens +[auto-respect-rbac-for-the-argo-cd-controller]: https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#auto-respect-rbac-for-controller +[spec-writeconnectionsecrettoref]: /reference/apis/spaces-api/latest +[auto-respect-rbac-for-the-argo-cd-controller-1]: https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#auto-respect-rbac-for-controller +[provider-upbound]: https://marketplace.upbound.io/providers/upbound/provider-upbound +[provider-kubernetes]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes +[provider-upbound-2]: https://marketplace.upbound.io/providers/upbound/provider-upbound +[robots]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/iam.upbound.io/Robot/v1alpha1 +[teams]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/iam.upbound.io/Team/v1alpha1 +[repositories]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/repository.upbound.io/Repository/v1alpha1 +[permissions]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/repository.upbound.io/Permission/v1alpha1 +[provider-kubernetes-3]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes +[object]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes/v0.17.0/resources/kubernetes.crossplane.io/Object/v1alpha2 +[watch]: https://kubernetes.io/docs/reference/using-api/api-concepts/#watch-bookmarks +[providerconfig]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes/v0.17.0/resources/kubernetes.crossplane.io/ProviderConfig/v1alpha1 +[personal-access-token]: https://accounts.upbound.io/settings/tokens diff --git a/spaces_versioned_docs/version-v1.12/howtos/control-plane-topologies.md b/spaces_versioned_docs/version-v1.12/howtos/control-plane-topologies.md new file mode 100644 index 000000000..9020e5a41 --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/control-plane-topologies.md @@ -0,0 +1,566 @@ +--- +title: Control Plane Topologies +sidebar_position: 15 +description: Configure scheduling of composites to remote control planes +--- + +:::info API Version Information +This guide is for the Control Plane Topology feature, which is in **private preview**. interested customers with access to this feature, it applies to v1.12+. + +For related API specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). details on feature availability, see the . +::: + +:::important +This feature is in private preview for select customers in Upbound Spaces. If you're interested in this deployment mode, please [contact us](https://www.upbound.io/support/contact). +::: + +Upbound's _Control Plane Topology_ feature lets you build and deploy a platform +of multiple control planes. These control planes work together for a unified platform +experience. + + +With the _Topology_ feature, you can install resource APIs that are +reconciled by other control planes and configure the routing that occurs between +control planes. You can also build compositions that reference other resources +running on your control plane or elsewhere in Upbound. + +This guide explains how to use Control Plane Topology APIs to install, configure +remote APIs, and build powerful compositions that reference other resources. + +## Benefits + +The Control Plane Topology feature provides the following benefits: + +* Decouple your platform architecture into independent offerings to improve your platform's software development lifecycle. +* Install composite APIs from Configurations as CRDs which are fulfilled and reconciled by other control planes. +* Route APIs to other control planes by configuring an _Environment_ resource, which define a set of routable dimensions. + +## How it works + + +Imagine the scenario where you want to let a user reference a subnet when creating a database instance. To your control plane, the `kind: database` and `kind: subnet` are independent resources. To you as the composition author, these resources have an important relationship. It may be that: + +- you don't want your user to ever be able to create a database without specifying a subnet. +- you want to let them create a subnet when they create the database, if it doesn't exist. +- you want to allow them to reuse a subnet that got created elsewhere or gets shared by another user. + +In each of these scenarios, you must resort to writing complex composition logic +to handle each case. The problem is compounded when the resource exists in a +context separate from the current control plane's context. Imagine a scenario +where one control plane manages Database resources and a second control plane +manages networking resources. With the _Topology_ feature, you can offload these +concerns to Upbound machinery. + + +![Control Plane Topology feature arch](/img/topology-arch.png) + +## Prerequisites + +Enable the Control Plane Topology feature in the Space you plan to run your control plane in: + +- Cloud Spaces: Not available yet +- Connected Spaces: Space administrator must enable this feature +- Disconnected Spaces: Space administrator must enable this feature + + + +## Compose resources with _ReferencedObjects_ + + + +_ReferencedObject_ is a resource type available in an Upbound control plane that lets you reference other Kubernetes resources in Upbound. + +:::tip +This feature is useful for composing resources that exist in a +remote context, like another control plane. You can also use +_ReferencedObjects_ to resolve references to any other Kubernetes object +in the current control plane context. This could be a secret, another Crossplane +resource, or more. +::: + +### Declare the resource reference in your XRD + +To compose a _ReferencedObject_, you should start by adding a resource reference +in your Composite Resource Definition (XRD). The convention for the resource +reference follows the shape shown below: + +```yaml +Ref: + type: object + properties: + apiVersion: + type: string + default: "" + enum: [ "" ] + kind: + type: string + default: "" + enum: [ "" ] + grants: + type: array + default: [ "Observe" ] + items: + type: string + enum: [ "Observe", "Create", "Update", "Delete", "*" ] + name: + type: string + namespace: + type: string + required: + - name +``` + +The `Ref` should be the kind of resource you want to reference. The `apiVersion` and `kind` should be the associated API version and kind of the resource you want to reference. + +The `name` and `namespace` strings are inputs that let your users specify the resource instance. + +#### Grants + +The `grants` field is a special array that lets you give users the power to influence the behavior of the referenced resource. You can configure which of the available grants you let your user select and which it defaults to. Similar in behavior as [Crossplane management policies][crossplane-management-policies], each grant value does the following: + +- **Observe:** The composite may observe the state of the referenced resource. +- **Create:** The composite may create the referenced resource if it doesn't exist. +- **Update:** The composite may update the referenced resource. +- **Delete:** The composite may delete the referenced resource. +- **\*:** The composite has full control over the referenced resource. + +Here are some examples that show how it looks in practice: + +
+ +Show example for defining the reference to another composite resource + +```yaml +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xsqlinstances.database.platform.upbound.io +spec: + type: object + properties: + parameters: + type: object + properties: + networkRef: + type: object + properties: + apiVersion: + type: string + default: "networking.platform.upbound.io" + enum: [ "networking.platform.upbound.io" ] + grants: + type: array + default: [ "Observe" ] + items: + type: string + enum: [ "Observe" ] + kind: + type: string + default: "Network" + enum: [ "Network" ] + name: + type: string + namespace: + type: string + required: + - name +``` + +
+ + +
+Show example for defining the reference to a secret +```yaml +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xsqlinstances.database.platform.upbound.io +spec: + type: object + properties: + parameters: + type: object + properties: + secretRef: + type: object + properties: + apiVersion: + type: string + default: "v1" + enum: [ "v1" ] + grants: + type: array + default: [ "Observe" ] + items: + type: string + enum: [ "Observe", "Create", "Update", "Delete", "*" ] + kind: + type: string + default: "Secret" + enum: [ "Secret" ] + name: + type: string + namespace: + type: string + required: + - name +``` +
+ +### Manually add the jsonPath + +:::important +This step is a known limitation of the preview. We're working on tooling that +removes the need for authors to do this step. +::: + +During the preview timeframe of this feature, you must add an annotation by hand +to the XRD. In your XRD's `metadata.annotations`, set the +`references.upbound.io/schema` annotation. It should be a JSON string in the +following format: + +```json +{ + "apiVersion": "references.upbound.io/v1alpha1", + "kind": "ReferenceSchema", + "references": [ + { + "jsonPath": ".spec.parameters.secretRef", + "kinds": [ + { + "apiVersion": "v1", + "kind": "Secret" + } + ] + } + ] +} +``` + +Flatten this JSON into a string and set the annotation on your XRD. View the +example below for an illustration: + +
+Show example setting the references.upbound.io/schema annotation +```yaml +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xthings.networking.acme.com + annotations: + references.upbound.io/schema: '{"apiVersion":"references.upbound.io/v1alpha1","kind":"ReferenceSchema","references":[{"jsonPath":".spec.secretRef","kinds":[{"apiVersion":"v1","kind":"Secret"}]},{"jsonPath":".spec.configMapRef","kinds":[{"apiVersion":"v1","kind":"ConfigMap"}]}]}' +``` +
+ +
+Show example for setting multiples references in the references.upbound.io/schema annotation +```yaml +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xthings.networking.acme.com + annotations: + references.upbound.io/schema: '{"apiVersion":"references.upbound.io/v1alpha1","kind":"ReferenceSchema","references":[{"jsonPath":".spec.parameters.secretRef","kinds":[{"apiVersion":"v1","kind":"Secret"}]},{"jsonPath":".spec.parameters.configMapRef","kinds":[{"apiVersion":"v1","kind":"ConfigMap"}]}]}' +``` +
+ + +You can use a VSCode extension like [vscode-pretty-json][vscode-pretty-json] to make this task easier. + + +### Compose a _ReferencedObject_ + +To pair with the resource reference declared in your XRD, you must compose the referenced resource. Use the _ReferencedObject_ resource type to bring the resource into your composition. _ReferencedObject_ has the following schema: + +```yaml +apiVersion: references.upbound.io/v1alpha1 +kind: ReferencedObject +spec: + managementPolicies: + - Observe + deletionPolicy: Orphan + composite: + apiVersion: + kind: + name: + jsonPath: .spec.parameters.secretRef +``` + +The `spec.composite.apiVersion` and `spec.composite.kind` should match the API version and kind of the `compositeTypeRef` declared in your composition. The `spec.composite.name` should be the name of the composite resource instance. + +The `spec.composite.jsonPath` should be the path to the root of the resource ref you declared in your XRD. + +
+Show example for composing a resource reference to a secret + +```yaml +apiVersion: apiextensions.crossplane.io/v1 +kind: Composition +metadata: + name: demo-composition +spec: + compositeTypeRef: + apiVersion: networking.acme.com/v1alpha1 + kind: XThing + mode: Pipeline + pipeline: + - step: patch-and-transform + functionRef: + name: crossplane-contrib-function-patch-and-transform + input: + apiVersion: pt.fn.crossplane.io/v1beta1 + kind: Resources + resources: + - name: secret-ref-object + base: + apiVersion: references.upbound.io/v1alpha1 + kind: ReferencedObject + spec: + managementPolicies: + - Observe + deletionPolicy: Orphan + composite: + apiVersion: networking.acme.com/v1alpha1 + kind: XThing + name: TO_BE_PATCHED + jsonPath: .spec.parameters.secretRef + patches: + - type: FromCompositeFieldPath + fromFieldPath: metadata.name + toFieldPath: spec.composite.name +``` +
+ +By declaring a resource reference in your XRD, Upbound handles resolution of the desired resource. + +## Deploy APIs + +To configure routing resource requests between control planes, you need to deploy APIs in at least two control planes. + +### Deploy into a service-level control plane + +Package the APIs you build into a Configuration package an deploy it on a +control plane in an Upbound Space. In Upbound, it's common to refer to the +control plane where the Configuration package is deployed as a **service-level +control plane**. This control plane runs the controllers that processes the API +requests and provisions underlying resources. In a later section, you learn how +you can use _Topology_ features to [configure routing][configure-routing]. + +### Deploy as Remote APIs on a platform control plane + +You should use the same package source as deployed in the **service-level +control planes**, but this time deploy the Configuration in a separate control +plane as a _RemoteConfiguration_. The _RemoteConfiguration_ installs Kubernetes +CustomResourceDefinitions for the APIs defined in the Configuration package, but +no controllers get deployed. + +### Install a _RemoteConfiguration_ + +_RemoteConfiguration_ is a resource type available in an Upbound manage control +planes that acts like a sort of Crossplane [Configuration][configuration] +package. Unlike standard Crossplane Configurations, which install XRDs, +compositions, and functions into a desired control plane, _RemoteConfigurations_ +install only the CRDs for claimable composite resource types. + +#### Install directly + +Install a _RemoteConfiguration_ by defining the following and applying it to +your control plane: + +```yaml +apiVersion: pkg.upbound.io/v1alpha1 +kind: RemoteConfiguration +metadata: + name: +spec: + package: +``` + +#### Declare as a project dependency + +You can declare _RemoteConfigurations_ as dependencies in your control plane's +[project file][project-file]. Use the up CLI to add the dependency, providing +the `--remote` flag: + +```tsx live +up dep add --remote +``` + +This command adds a declaration in the `spec.apiDependencies` stanza of your +project's `upbound.yaml` as demonstrated below: + +```yaml +apiVersion: meta.dev.upbound.io/v1alpha1 +kind: Project +metadata: + name: service-controlplane +spec: + apiDependencies: + - configuration: xpkg.upbound.io/upbound/remote-configuration + version: '>=v0.0.0' + dependsOn: + - provider: xpkg.upbound.io/upbound/provider-kubernetes + version: '>=v0.0.0' +``` + +Like a Configuration, a _RemoteConfigurationRevision_ gets created when the +package gets installed on a control plane. Unlike Configurations, XRDs and +compositions **don't** get installed by a _RemoteConfiguration_. Only the CRDs +for claimable composite types get installed and Crossplane thereafter manages +their lifecycle. You can tell when a CRD gets installed by a +_RemoteConfiguration_ because it has the `internal.scheduling.upbound.io/remote: +true` label: + +```yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: things.networking.acme.com + labels: + internal.scheduling.upbound.io/remote: "true" +``` + +## Use an _Environment_ to route resources + +_Environment_ is a resource type available in Upbound control planes that works +in tandem with resources installed by _RemoteConfigurations_. _Environment_ is a +namespace-scoped resource that lets you configure how to route remote resources +to other control planes by a set of user-defined dimensions. + +### Define a routing dimension + +To establish a routing dimensions between two control planes, you must do two +things: + +1. Annotate the service control plane with the name and value of a dimension. +2. Configure an environment on another control plane with a dimension matching the field and value of the service control plane. + +The example below demonstrates the creation of a service control plane with a +`region` dimension: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + labels: + dimension.scheduling.upbound.io/region: "us-east-1" + name: prod-1 + namespace: default +spec: +``` + +Upbound's Spaces controller keeps an inventory of all declared dimensions and +listens for control planes to route to them. + +### Create an _Environment_ + +Next, create an _Environment_ on a separate control plane, referencing the +dimension from before. The example below demonstrates routing all remote +resource requests in the `default` namespace of the control plane based on a +single `region` dimension: + +```yaml +apiVersion: scheduling.upbound.io/v1alpha1 +kind: Environment +metadata: + name: default + namespace: default +spec: + dimensions: + region: us-east-1 +``` + +You can specify whichever dimensions as you want. The example below demonstrates +multiple dimensions: + +```yaml +apiVersion: scheduling.upbound.io/v1alpha1 +kind: Environment +metadata: + name: default + namespace: default +spec: + dimensions: + region: us-east-1 + env: prod + offering: databases +``` + +In order for the routing controller to match, _all_ dimensions must match for a +given service control plane. + +You can specify dimension overrides on a per-resource group basis. This lets you +configure default routing rules for a given _Environment_ and override routing +on a per-offering basis. + +```yaml +apiVersion: scheduling.upbound.io/v1alpha1 +kind: Environment +metadata: + name: default + namespace: default +spec: + dimensions: + region: us-east-1 + resourceGroups: + - name: database.platform.upbound.io # database + dimensions: + region: "us-east-1" + env: "prod" + offering: "databases" + - name: networking.platform.upbound.io # networks + dimensions: + region: "us-east-1" + env: "prod" + offering: "networks" +``` + +### Confirm the configured route + +After you create an _Environment_ on a control plane, the routes selected get +reported in the _Environment's_ `.status.resourceGroups`. This is illustrated +below: + +```yaml +apiVersion: scheduling.upbound.io/v1alpha1 +kind: Environment +metadata: + name: default +... +status: + resourceGroups: + - name: database.platform.upbound.io # database + proposed: + controlPlane: ctp-1 + group: default + space: upbound-gcp-us-central1 + dimensions: + region: "us-east-1" + env: "prod" + offering: "databases" +``` + +If you don't see a response in the `.status.resourceGroups`, this indicates a +match wasn't found or an error establishing routing occurred. + +:::tip +There's no limit to the number of control planes you can route to. You can also +stack routing and form your own topology of control planes, with multiple layers +of routing. +::: + +### Limitations + + +Routing from one control plane to another is currently scoped to control planes +that exist in a single Space. You can't route resource requests to control +planes that exist on a cross-Space boundary. + + +[project-file]: /manuals/cli/howtos/project +[contact-us]: https://www.upbound.io/usage/support/contact +[crossplane-management-policies]: https://docs.crossplane.io/latest/managed-resources/managed-resources/#managementpolicies +[vscode-pretty-json]: https://marketplace.visualstudio.com/items?itemName=chrismeyers.vscode-pretty-json +[configure-routing]: #use-an-environment-to-route-resources +[configuration]: https://docs.crossplane.io/latest/packages/providers diff --git a/spaces_versioned_docs/version-v1.12/howtos/ctp-connector.md b/spaces_versioned_docs/version-v1.12/howtos/ctp-connector.md new file mode 100644 index 000000000..b2cc48c49 --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/ctp-connector.md @@ -0,0 +1,508 @@ +--- +title: Control Plane Connector +weight: 80 +description: A guide for how to connect a Kubernetes app cluster to a control plane in Upbound using the Control Plane connector feature +plan: "standard" +--- + + + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). Control Plane Connector functionality is consistent across versions. + +For related API specifications and resources, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +Control Plane Connector connects arbitrary Kubernetes application clusters outside the +Upbound Spaces to your control planes running in Upbound Spaces. +This lets you interact with your control plane's API from the app cluster. The claim APIs and the namespaced XR APIs +you define via CompositeResourceDefinitions (XRDs) in the control plane, are available in +your app cluster alongside Kubernetes workload APIs like Pod. Control Plane Connector +enables the same experience as a locally installed Crossplane. + +![control plane connector operations flow](/img/ConnectorFlow.png) + +### Control Plane Connector operations + +Control Plane Connector leverages the [Kubernetes API AggregationLayer][kubernetes-api-aggregationlayer] +to create an extension API server and serve the claim APIs and the namespaced XR APIs in the control plane. It +discovers the claim APIs and the namespaced XR APIs available in the control plane and registers corresponding +APIService resources on the app cluster. Those APIService resources refer to the +extension API server of Control Plane Connector. + +The claim APIs and the namespaced XR APIs are available in your Kubernetes cluster, just like all native +Kubernetes APIs. + +The Control Plane Connector processes every request targeting the claim APIs and the namespaced XR APIs and makes the +relevant requests to the connected control plane. + +Only the connected control plane stores and processes all claims and namespaced XRs created in the app +cluster, eliminating any storage use at the application cluster. The control plane +connector provisions a target namespace at the control plane for the app cluster and stores +all claims and namespaced XRs in this target namespace. + +For managing the claims and namespaced XRs, the Control Plane Connector creates a unique identifier for a +resource by combining input parameters from claims, including: +- `metadata.name` +- `metadata.namespace` +- `your cluster name` + + +It employs SHA-256 hashing to generate a hash value and then extracts the first +16 characters of that hash. This ensures the resulting identifier remains within +the 64-character limit in Kubernetes. + + + +For instance, if a claim named `my-bucket` exists in the test namespace in +`cluster-dev`, the system calculates the SHA-256 hash from +`my-bucket-x-test-x-00000000-0000-0000-0000-000000000000` and takes the first 16 +characters. The control plane side then names the claim `claim-c603e518969b413e`. + +For namespaced XRs, the process is similar, only the prefix is different. +The name becomes `nxr-c603e518969b413e`. + + +### Installation + + + + + +Log in with the up CLI: + +```bash +up login +``` + +Connect your app cluster to a namespace in an Upbound control plane with `up controlplane connector install `. This command creates a user token and installs the Control Plane Connector to your cluster. It's recommended you create a values file called `connector-values.yaml` and provide the following below. Select the tab according to which environment your control plane is running in. + + + + + + +```yaml +upbound: + # This is your org account in Upbound e.g. the name displayed after executing `up org list` + account: + # This is a personal access token generated in the Upbound Console + token: + +spaces: + # If your control plane is running in Upbound's GCP Cloud Space, else use upbound-aws-us-east-1.spaces.upbound.io + host: "upbound-gcp-us-west-1.spaces.upbound.io" + insecureSkipTLSVerify: true + controlPlane: + # The name of the control plane you want the Connector to attach to + name: + # The control plane group the control plane resides in + group: + # The namespace within the control plane to sync claims from the app cluster to. NOTE: This must be created before you install the connector. + claimNamespace: +``` + + + + + +1. Create a [kubeconfig][kubeconfig] for the control plane. Update your Upbound context to the path for your desired control plane. +```ini +up login +up ctx /upbound-gcp-us-central-1/default/your-control-plane +up ctx . -f - > context.yaml +``` + +2. Write it to a secret in the cluster where you plan to +install the Control Plane Connector to. +```ini +kubectl create secret generic my-controlplane-kubeconfig --from-file=context.yaml +``` + +3. Reference this secret in the +`spaces.controlPlane.kubeconfigSecret` field below. + +```yaml +spaces: + controlPlane: + # The namespace within the control plane to sync claims from the app cluster to. NOTE: This must be created before you install the connector. + claimNamespace: + kubeconfigSecret: + name: my-controlplane-kubeconfig + key: kubeconfig +``` + + + + + + +Provide the values file above when you run the CLI command: + + +```bash {copy-lines="3"} +up controlplane connector install my-control-plane my-app-ns-1 --file=connector-values.yaml +``` + +The Claim APIs and the namespaced XR APIs from your control plane are now visible in the cluster. +You can verify this with `kubectl api-resources`. + +```bash +kubectl api-resources +``` + +### Uninstall + +Disconnect an app cluster that you prior installed the Control Plane Connector on by +running the following: + +```bash +up ctp connector uninstall +``` + +This command uninstalls the helm chart for the Control Plane Connector from an app +cluster. It moves any claims in the app cluster into the control plane +at the specified namespace. + +:::tip +Make sure your kubeconfig's current context is pointed at the app cluster where +you want to uninstall Control Plane Connector from. +::: + + + + +It's recommended you create a values file called `connector-values.yaml` and +provide the following below. Select the tab according to which environment your +control plane is running in. + + + + + + +```yaml +upbound: + # This is your org account in Upbound e.g. the name displayed after executing `up org list` + account: + # This is a personal access token generated in the Upbound Console + token: + +spaces: + # Upbound GCP US-West-1 upbound-gcp-us-west-1.spaces.upbound.io + # Upbound AWS US-East-1 upbound-aws-us-east-1.spaces.upbound.io + # Upbound GCP US-Central-1 upbound-gcp-us-central-1.spaces.upbound.io + host: "" + insecureSkipTLSVerify: true + controlPlane: + # The name of the control plane you want the Connector to attach to + name: + # The control plane group the control plane resides in + group: + # The namespace within the control plane to sync claims from the app cluster to. + # NOTE: This must be created before you install the connector. + claimNamespace: +``` + + + + +Create a [kubeconfig][kubeconfig-1] for the +control plane. Write it to a secret in the cluster where you plan to +install the Control Plane Connector to. Reference this secret in the +`spaces.controlPlane.kubeconfigSecret` field below. + +```yaml +spaces: + controlPlane: + # The namespace within the control plane to sync claims from the app cluster to. NOTE: This must be created before you install the connector. + claimNamespace: + kubeconfigSecret: + name: my-controlplane-kubeconfig + key: kubeconfig +``` + + + + + + +Provide the values file above when you `helm install` the Control Plane Connector: + + +```bash +helm install --wait mcp-connector oci://xpkg.upbound.io/spaces-artifacts/mcp-connector -n kube-system -f connector-values.yaml +``` +:::tip +Create an API token from the Upbound user account settings page in the console by following [these instructions][these-instructions]. +::: + +### Uninstall + +You can uninstall Control Plane Connector with Helm by running the following: + +```bash +helm uninstall mcp-connector +``` + + + + + +### Example usage + +This example creates a control plane using [Configuration +EKS][configuration-eks]. `KubernetesCluster` is +available as a claim API in your control plane. The following is [an +example][an-example] +object you can create in your control plane. + +```yaml +apiVersion: k8s.starter.org/v1alpha1 +kind: KubernetesCluster +metadata: + name: my-cluster + namespace: default +spec: + id: my-cluster + parameters: + nodes: + count: 3 + size: small + services: + operators: + prometheus: + version: "34.5.1" + writeConnectionSecretToRef: + name: my-cluster-kubeconfig +``` + +After connecting your Kubernetes app cluster to the control plane, you +can create the `KubernetesCluster` object in your app cluster. Although your +local cluster has an Object, the actual resources is in your managed control +plane inside Upbound. + +```bash {copy-lines="3"} +# Applying the claim YAML above. +# kubectl is set up to talk with your Kubernetes cluster. +kubectl apply -f claim.yaml + + +kubectl get claim -A +NAME SYNCED READY CONNECTION-SECRET AGE +my-cluster True True my-cluster-kubeconfig 2m +``` + +Once Kubernetes creates the object, view the console to see your object. + +![Claim by connector in console](/img/ClaimInConsole.png) + +You can interact with the object through your cluster just as if it +lives in your cluster. + +### Migration to control planes + +This guide details the migration of a Crossplane installation to Upbound-managed +control planes using the Control Plane Connector to manage claims on an application +cluster. + +![migration flow application cluster to control plane](/img/ConnectorMigration.png) + +#### Export all resources + +Before proceeding, ensure that you have set the correct kubecontext for your application +cluster. + +```bash +up controlplane migration export --pause-before-export --output=my-export.tar.gz --yes +``` + +This command performs the following: +- Pauses all claim, composite, and managed resources before export. +- Scans the control plane for resource types. +- Exports Crossplane and native resources. +- Archives the exported state into `my-export.tar.gz`. + +Example output: +```bash +Exporting control plane state... + ✓ Pausing all claim resources before export... 1 resources paused! ⏸️ + ✓ Pausing all composite resources before export... 7 resources paused! ⏸️ + ✓ Pausing all managed resources before export... 34 resources paused! ⏸️ + ✓ Scanning control plane for types to export... 231 types found! 👀 + ✓ Exporting 231 Crossplane resources...125 resources exported! 📤 + ✓ Exporting 3 native resources...19 resources exported! 📤 ✓ Archiving exported state... archived to "my-export.tar.gz"! 📦 + +Successfully exported control plane state! +``` + +#### Import all resources + +The system restores the target control plane with the exported +resources, which serves as the destination for the Control Plane Connector. + + +Log into Upbound and select the correct context: + +```bash +up login +up ctx +up ctp create ctp-a +``` + +Output: +```bash +ctp-a created +``` + +Verify that the Crossplane version on both the application cluster and the new managed +control plane matches the core Crossplane version. + +Use the following command to import the resources: +```bash +up controlplane migration import -i my-export.tar.gz \ + --unpause-after-import \ + --mcp-connector-cluster-id=my-appcluster \ + --mcp-connector-claim-namespace=my-appcluster +``` + +This command: +- Note: `--mcp-connector-cluster-id` needs to be unique per application cluster +- Note: `--mcp-connector-claim-namespace` is the namespace the system creates + during the import +- Restores base resources +- Waits for XRDs and packages to establish +- Imports Claims, XRs resources +- Finalizes the import and resumes managed resources + +Example output: +```bash +Importing control plane state... + ✓ Reading state from the archive... Done! 👀 + ✓ Importing base resources... 56 resources imported!📥 + ✓ Waiting for XRDs... Established! ⏳ + ✓ Waiting for Packages... Installed and Healthy! ⏳ + ✓ Importing remaining resources... 88 resources imported! 📥 + ✓ Finalizing import... Done! 🎉 + ✓ Unpausing managed resources ... Done! ▶️ + +fully imported control plane state! +``` + +Verify Imported Claims + + +The Control Plane Connector renames all claims and adds additional labels to them. + +```bash +kubectl get claim -A +``` + +Example output: +```bash +NAMESPACE NAME SYNCED READY CONNECTION-SECRET AGE +my-appcluster cluster.aws.platformref.upbound.io/claim-e708ff592b974f51 True True platform-ref-aws-kubeconfig 3m17s +``` + +Inspect the labels: +```bash +kubectl get -n my-appcluster cluster.aws.platformref.upbound.io/claim-e708ff592b974f51 -o yaml | yq .metadata.labels +``` + +Example output: +```bash +mcp-connector.upbound.io/app-cluster: my-appcluster +mcp-connector.upbound.io/app-namespace: default +mcp-connector.upbound.io/app-resource-name: example +``` + +#### Cleanup the app cluster + +Remove all Crossplane-related resources from the application cluster, including: + +- Managed Resources +- Claims +- Compositions +- XRDs +- Packages (Functions, Configurations, Providers) +- Crossplane and all associated CRDs + + +#### Install Control Plane Connector + + +Follow the preceding installation guide and configure the `connector-values.yaml`: + +```yaml +# NOTE: clusterID needs to match --mcp-connector-cluster-id used in the import on the managed control Plane +clusterID: my-appcluster +upbound: + account: + token: + +spaces: + host: "" + insecureSkipTLSVerify: true + controlPlane: + name: + group: + # NOTE: This is the --mcp-connector-claim-namespace used during the import to the control plane + claimNamespace: +``` +Once the Control Plane Connector installs, verify that resources exist in the application +cluster: + +```bash +kubectl api-resources | grep platform +``` + +Example output: +```bash +awslbcontrollers aws.platform.upbound.io/v1alpha1 true AWSLBController +podidentities aws.platform.upbound.io/v1alpha1 true PodIdentity +sqlinstances aws.platform.upbound.io/v1alpha1 true SQLInstance +clusters aws.platformref.upbound.io/v1alpha1 true Cluster +osss observe.platform.upbound.io/v1alpha1 true Oss +apps platform.upbound.io/v1alpha1 true App +``` + +Restore claims from the control plane to the application cluster: + +```bash +kubectl get claim -A +``` + +Example output: +```bash +NAMESPACE NAME SYNCED READY CONNECTION-SECRET AGE +default cluster.aws.platformref.upbound.io/example True True platform-ref-aws-kubeconfig 127m +``` + +With this guide, you migrated your Crossplane installation to +Upbound-control planes. This ensures seamless integration with your +application cluster using the Control Plane Connector. + +### Connect multiple app clusters to a control plane + +Claims are store in a unique namespace in the Upbound control plane. +Every cluster creates a new control plane namespace. + +![Multi-cluster architecture with control plane connector](/img/ConnectorMulticlusterArch.png) + +There's no limit on the number of clusters connected to a single control plane. +Control plane operators can see all their infrastructure in a central control +plane. + +Without using control planes and Control Plane Connector, users have to install +Crossplane and providers for cluster. Each cluster requires configuration for +providers with necessary credentials. With a single control plane where multiple +clusters connected through Upbound tokens, you don't need to give out any cloud +credentials to the clusters. + + +[kubeconfig]: /manuals/cli/howtos/context-config/#generate-a-kubeconfig-for-a-control-plane-in-a-group +[kubeconfig-1]:/spaces/concepts/control-planes/#connect-directly-to-your-control-plane +[these-instructions]:/manuals/console/#create-a-personal-access-token +[kubernetes-api-aggregationlayer]: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/ +[configuration-eks]: https://github.com/upbound/configuration-eks +[an-example]: https://github.com/upbound/configuration-eks/blob/9f86b6d/.up/examples/cluster.yaml diff --git a/spaces_versioned_docs/version-v1.12/howtos/debugging-a-ctp.md b/spaces_versioned_docs/version-v1.12/howtos/debugging-a-ctp.md new file mode 100644 index 000000000..521271e40 --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/debugging-a-ctp.md @@ -0,0 +1,128 @@ +--- +title: Debugging issues on a control plane +sidebar_position: 70 +description: A guide for how to debug resources on a control plane running in Upbound. +--- + +This guide provides troubleshooting guidance for how to identify and fix issues on a control plane. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). Debugging methods are consistent across versions. + +For related control plane specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version-specific features, see the . +::: + +## Start from Upbound Console + + +The Upbound [Console][console] has a built-in control plane explorer experience +that surfaces status and events for the resources on your control plane. The +explorer is claim-based. Resources in this view exist only if they exist in the +reference chain originating from a claim. This view is a helpful starting point +if you are attempting to debug an issue originating from a claim. + +:::tip +If you directly create Crossplane Managed Resources (`MR`s) or Composite +Resources (`XR`s), they won't render in the explorer. +::: + +### Example + +The example below uses the control plane explorer view to inspect why a claim for an EKS Cluster isn't healthy. + +#### Check the health status of claims + +From the API type card, two claims branching from of it: one shows a healthy green icon, while the other shows an unhealthy red icon. + +![Use control plane explorer view to see status of claims](/img/debug-overview.png) + +Select `More details` on the unhealthy claim card and Upbound shows details for the claim. + +![Use control plane explorer view to see details of claims](/img/debug-claim-more-details.png) + +Looking at the three events for this claim: + +- **ConfigureCompositeResource**: this event indicates Upbound created the claimed Composite Resource (`XR`). + +- **BindCompositeResource**: this indicates the Composite Resource (`XR`) that's being "claimed" isn't ready yet. A claim doesn't show `HEALTHY` until the XR it references is ready. + +- **ConfigureCompositeResource**: the error saying, `cannot apply composite resource...the object has been modified; please apply your changes to the latest version and try again` is a generic event from Crossplane resources. It's safe to ignore this error. + +Next, look at the `status` field of the rendered YAML for the resource. + +![Use control plane explorer view to see status details of claims](/img/debug-claim-status.png) + +The status reports a similar message as the event stream: this claim is waiting for a Composite Resource to be ready. Based on this, investigate the Composite Resource referenced by this claim next. + +#### Check the health status of the Composite Resource + + +The control plane explorer only shows the claim cards by default. Selecting the claim card renders the rest of the Crossplane resource tree associated with the selected claim. + + +The previous claim expands into this screenshot: + +![Use control plane explorer view to expand tree of claim](/img/debug-claim-expansion.png) + +This renders the XR referenced by the claim (along with all its references). You can see the XR is showing the same unhealthy status icon in its card. Notice the XR has itself two nested XRs. One of the nested XRs shows a healthy green icon on its card, while the other shows an unhealthy red icon. Like the claim, a Composite Resource doesn't show healthy until all referenced resources also show healthy. + +#### Inspecting Managed Resources + +Select `more details` to inspect one of the unhealthy Managed Resources shows the following: + +![Use control plane explorer view to view events for an MR](/img/debug-mr-event.png) + +This event reveals it's unhealthy because it's waiting on a reference to another Managed Resource. Searching the rendered YAML of the MR for this resource shows the following: + +![Use control plane explorer view to view status for an MR](/img/debug-mr-status.png) + +The rendered YAML shows this MR is referencing a sibling MR that shares the same controller. The same parent XR created both of these managed resources. Inspect the sibling MR to see what its status is. + +![Use control plane explorer view to view status for a sibling MR](/img/debug-mr-dependency-status.png) + +The sibling MR event stream shows the Provider processed the resource create request. Ignore the `CannotInitalizeManagedResrouce` event. EKS clusters can take 15 minutes or more to provision in AWS. The root cause is everything is fine -- all the resources are still provisioning. Waiting longer and then looking at the control plane explorer again, shows all resources are healthy. reference, below is an example status field for a resource that's healthy and provisioned. + +```yaml +... +status: + atProvider: + id: team-b-app-cluster-bhwfb-hwtgs-20230403135452772300000008 + conditions: + - lastTransitionTime: '2023-04-03T13:56:35Z' + reason: Available + status: 'True' + type: Ready + - lastTransitionTime: '2023-04-03T13:54:02Z' + reason: ReconcileSuccess + status: 'True' + type: Synced + - lastTransitionTime: '2023-04-03T13:54:53Z' + reason: Success + status: 'True' + type: LastAsyncOperation + - lastTransitionTime: '2023-04-03T13:54:53Z' + reason: Finished + status: 'True' + type: AsyncOperation +``` + +### Control plane explorer limitations + +The control plane explorer view is currently designed around claims (`XC`s). The control plane explorer doesn't inspect other Crossplane resources. To inspect other Crossplane resources, use the `up` CLI. + +Some examples of Crossplane resources that require the `up` CLI + +- Managed Resources that aren't associated with a claim +- Composite Resources that aren't associated with a claim +- The status of _deleting_ resources +- ProviderConfigs +- Provider events + +## Use direct CLI access + +If your preference is to use a terminal instead of a GUI, Upbound supports direct access to the API server of the control plane. Use [`up ctx`][up-ctx] to connect directly to your control plane. + + +[console]: /manuals/console/upbound-console +[up-ctx]: /reference/cli-reference diff --git a/spaces_versioned_docs/version-v1.12/howtos/managed-service.md b/spaces_versioned_docs/version-v1.12/howtos/managed-service.md new file mode 100644 index 000000000..40b983a76 --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/managed-service.md @@ -0,0 +1,23 @@ +--- +title: Managed Upbound control planes +description: "Learn about the managed service capabilities of a Space" +sidebar_position: 10 +--- + +Control planes in Upbound are fully isolated [Upbound Crossplane][uxp] instances +that Upbound manages for you. This means: + +- the underlying lifecycle of infrastructure (compute, memory, and storage) required to power your instance. +- scaling of the infrastructure. +- the maintenance of the core Upbound Crossplane components that make up a control plane. + +This lets users focus on building their APIs and operating their control planes, +while Upbound handles the rest. Each control plane has its own dedicated API +server connecting users to their control plane. + +## Learn about Upbound control planes + +Read the [concept][ctp-concept] documentation to learn about Upbound control planes. + +[uxp]: /manuals/uxp/overview +[ctp-concept]: /spaces/concepts/control-planes \ No newline at end of file diff --git a/spaces_versioned_docs/version-v1.12/howtos/mcp-connector-guide.md b/spaces_versioned_docs/version-v1.12/howtos/mcp-connector-guide.md new file mode 100644 index 000000000..8a3866d07 --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/mcp-connector-guide.md @@ -0,0 +1,169 @@ +--- +title: Consume control plane APIs in an app cluster with control plane connector +sidebar_position: 99 +description: A tutorial to configure a Space with Argo to declaratively create and + manage control planes +--- + +In this tutorial, you learn how to configure a Kubernetes app cluster to communicate with a control plane in an Upbound self-hosted Space. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Control plane connector functionality is consistent across versions. + +For related API specifications and resources, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +The [control plane connector][control-plane-connector] bridges your Kubernetes application clusters---running outside of Upbound--to your control planes running in Upbound. This allows you to interact with your control plane's API right from the app cluster. The claim APIs you define via `CompositeResourceDefinitions` are available alongside Kubernetes workload APIs like `Pod`. In effect, control plane connector provides the same experience as a locally installed Crossplane. + +## Prerequisites + +To complete this tutorial, you need the following: + +- Have already deployed an Upbound Space. +- Have already deployed an Kubernetes cluster (referred to as `app cluster`). + +## Create a control plane + +Create a new control plane in your self-hosted Space. Run the following command in a terminal: + +```bash +up ctp create my-control-plane +``` + +Once the control plane is ready, connect to it. + +```bash +up ctp connect my-control-plane +``` + +For convenience, install a an Upbound [platform reference Configuration][platform-reference-configuration] from the marketplace. production scenarios, replace this with your own Crossplane Configurations or compositions. + +```bash +up ctp configuration install xpkg.upbound.io/upbound/platform-ref-aws:v1.4.0 +``` + +## Fetch the control plane's connection details + +Run the following command in a terminal: + +```shell +kubectl get secret kubeconfig-my-control-plane -n default -o jsonpath='{.data.kubeconfig}' | base64 -d > kubeconfig-my-control-plane.yaml +``` + +This command saves the kubeconfig for the control plane to a file in your working directory. + +## Install control plane connector in your app cluster + +Switch contexts to your Kubernetes app cluster. To install the control plane connector in your app cluster, you must first provide a secret containing your control plane's kubeconfig at install-time. Run the following command in a terminal: + +:::important +Make sure the following commands are executed against your **app cluster**, not your control plane. +::: + +```bash +kubectl create secret generic kubeconfig-my-control-plane -n kube-system --from-file=kubeconfig=./kubeconfig-my-control-plane.yaml +``` + +Set the environment variable below to configure which namespace _in your control plane_ you wish to sync the app cluster's claims to. + +```shell +export CONNECTOR_CTP_NAMESPACE=app-cluster-1 +``` + +Install the Control Plane Connector in the app cluster and point it to your control plane. + +```bash +up ctp connector install my-control-plane $CONNECTOR_CTP_NAMESPACE --control-plane-secret=kubeconfig-my-control-plane +``` + +## Inspect your app cluster + +After you install Control Plane Connector in the app cluster, you can now see APIs which live on the control plane. You can confirm this is the case by running the following command on your app cluster: + +```bash {copy-lines="1"} +kubectl api-resources | grep upbound + +# The output should look like this: +sqlinstances aws.platform.upbound.io/v1alpha1 true SQLInstance +clusters aws.platformref.upbound.io/v1alpha1 true Cluster +osss observe.platform.upbound.io/v1alpha1 true Oss +apps platform.upbound.io/v1alpha1 true App +``` + +## Claim a database instance on your app cluster + +Create a database claim against the `SQLInstance` API and observe resources get created by your control plane. Apply the following resources to your app cluster: + +```yaml +cat < --output + ``` + + The command exports your existing Crossplane control plane configuration/state into an archive file. + +::: note +By default, the export command doesn't make any changes to your existing Crossplane control plane state, leaving it intact. Use the `--pause-before-export` flag to pause the reconciliation on managed resources before exporting the archive file. + +This safety mechanism ensures the control plane you migrate state to doesn't assume ownership of resources before you're ready. +::: + +2. Use the control plane [create command][create-command] to create a managed +control plane in Upbound: + + ```bash + up controlplane create my-controlplane + ``` + +3. Use [`up ctx`][up-ctx] to connect to the control plane created in the previous step: + + ```bash + up ctx "///my-controlplane" + ``` + + The command configures your local `kubeconfig` to connect to the control plane. + +4. Run the following command to import the archive file into the control plane: + + ```bash + up controlplane migration import --input + ``` + +:::note +By default, the import command leaves the control plane in an inactive state by pausing the reconciliation on managed +resources. This pause gives you an opportunity to review the imported configuration/state before activating the control plane. +Use the `--unpause-after-import` flag to change the default behavior and activate the control plane immediately after +importing the archive file. +::: + + + +5. Review and validate the imported configuration/state. When you are ready, activate your managed + control plane by running the following command: + + ```bash + kubectl annotate managed --all crossplane.io/paused- + ``` + + At this point, you can delete the source Crossplane control plane. + +## CLI options + +### Filtering + +The migration tool captures the state of a Control Plane. The only filtering +supported is Kubernetes namespace and Kubernetes resource Type filtering. + +You can exclude namespaces using the `--exclude-namespaces` CLI option. This can prevent the CLI from including unwanted resources in the export. + +```bash +--exclude-namespaces=kube-system,kube-public,kube-node-lease,local-path-storage,... + +# A list of specific namespaces to exclude from the export. Defaults to 'kube-system', 'kube-public','kube-node-lease', and 'local-path-storage'. +``` + +You can exclude Kubernetes Resource types by using the `--exclude-resources` CLI option: + +```bash +--exclude-resources=EXCLUDE-RESOURCES,... + +# A list of resource types to exclude from the export in "resource.group" format. No resources are excluded by default. +``` + +For example, here's an example for excluding the CRDs installed by Crossplane functions (since they're not needed): + +```bash +up controlplane migration export \ + --exclude-resources=gotemplates.gotemplating.fn.crossplane.io,kclinputs.template.fn.crossplane.io +``` + +:::warning +You must specify resource names in lowercase "resource.group" format (for example, `gotemplates.gotemplating.fn.crossplane.io`). Using only the resource kind (for example, `GoTemplate`) isn't supported. +::: + + +:::tip Function Input CRDs + +Exclude function input CRDs (`inputs.template.fn.crossplane.io`, `resources.pt.fn.crossplane.io`, `gotemplates.gotemplating.fn.crossplane.io`, `kclinputs.template.fn.crossplane.io`) from migration exports. Upbound automatically recreates these resources during import. Function input CRDs typically have owner references to function packages and may have restricted RBAC access. Upbound installs these CRDs during the import when function packages are restored. + +::: + + +After export, users can also change the archive file to only include necessary resources. + +### Export non-Crossplane resources + +Use the `--include-extra-resources=` CLI option to select other CRD types to include in the export. + +### Set the kubecontext + +Currently `--context` isn't supported in the migration CLI. You should be able to use the `--kubeconfig` CLI option to use a file that's set to the correct context. example: + +```bash +up controlplane migration export --kubeconfig +``` + +Use this in tandem with `up ctx` to export a control plane's kubeconfig: + +```bash +up ctx --kubeconfig ~/.kube/config + +# To list the current contet +up ctx . --kubeconfig ~/.kube/config +``` + +## Export archive + +The migration CLI exports an archive upon successful completion. Below is an example export of a control plane that excludes several CRD types and skips the confirmation prompt. A file gets written to the working directory, unless you select another output file: + +
+ +View the example export + +```bash +$ up controlplane migration export --exclude-resources=gotemplates.gotemplating.fn.crossplane.io,kclinputs.template.fn.crossplane.io --yes +Exporting control plane state... +✓ Scanning control plane for types to export... 121 types found! 👀 +✓ Exporting 121 Crossplane resources...60 resources exported! 📤 +✓ Exporting 3 native resources...8 resources exported! 📤 +✓ Archiving exported state... archived to "xp-state.tar.gz"! 📦 +``` + +
+ + +When an export occurs, a file named `xp-state.tar.gz` by default gets created in the working directory. You can unzip the file and all the contents of the export are all text YAML files. + +- Each CRD (for example `vpcs.ec2.aws.upbound.io`) gets its own directory +which contains: + - A `metadata.yaml` file that contains Kubernetes Object Metadata + - A list of Kubernetes Categories the resource belongs to +- A `cluster` directory that contains YAML manifests for all resources provisioned +using the CRD. + +Sample contents for a Cluster with a single `XNetwork` Composite from +[configuration-aws-network][configuration-aws-network] is show below: + + +
+ +View the example cluster content + +```bash +├── compositionrevisions.apiextensions.crossplane.io +│ ├── cluster +│ │ ├── kcl.xnetworks.aws.platform.upbound.io-4ca6a8a.yaml +│ │ └── xnetworks.aws.platform.upbound.io-9859a34.yaml +│ └── metadata.yaml +├── configurations.pkg.crossplane.io +│ ├── cluster +│ │ └── configuration-aws-network.yaml +│ └── metadata.yaml +├── deploymentruntimeconfigs.pkg.crossplane.io +│ ├── cluster +│ │ └── default.yaml +│ └── metadata.yaml +├── export.yaml +├── functions.pkg.crossplane.io +│ ├── cluster +│ │ ├── crossplane-contrib-function-auto-ready.yaml +│ │ ├── crossplane-contrib-function-go-templating.yaml +│ │ └── crossplane-contrib-function-kcl.yaml +│ └── metadata.yaml +├── internetgateways.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-xgl4q.yaml +│ └── metadata.yaml +├── mainroutetableassociations.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-t2qh7.yaml +│ └── metadata.yaml +├── namespaces +│ └── cluster +│ ├── crossplane-system.yaml +│ ├── default.yaml +│ └── upbound-system.yaml +├── providerconfigs.aws.upbound.io +│ ├── cluster +│ │ └── default.yaml +│ └── metadata.yaml +├── providerconfigusages.aws.upbound.io +│ ├── cluster +│ │ ├── 0a2a3ec6-ef13-45f9-9cf0-63af7f4a6b6b.yaml +...redacted +│ │ └── f7092b0f-3a78-4bfe-82c8-57e5085a9b11.yaml +│ └── metadata.yaml +├── providers.pkg.crossplane.io +│ ├── cluster +│ │ ├── upbound-provider-aws-ec2.yaml +│ │ └── upbound-provider-family-aws.yaml +│ └── metadata.yaml +├── routes.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-dt9cj.yaml +│ └── metadata.yaml +├── routetableassociations.ec2.aws.upbound.io +│ ├── cluster +│ │ ├── borrelli-backup-test-mr2sd.yaml +│ │ ├── borrelli-backup-test-ngq5h.yaml +│ │ ├── borrelli-backup-test-nrkgg.yaml +│ │ └── borrelli-backup-test-wq752.yaml +│ └── metadata.yaml +├── routetables.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-dv4mb.yaml +│ └── metadata.yaml +├── secrets +│ └── namespaces +│ ├── crossplane-system +│ │ ├── cert-token-signing-gateway-pub.yaml +│ │ ├── mxp-hostcluster-certs.yaml +│ │ ├── package-pull-secret.yaml +│ │ └── xgql-tls.yaml +│ └── upbound-system +│ └── aws-creds.yaml +├── securitygrouprules.ec2.aws.upbound.io +│ ├── cluster +│ │ ├── borrelli-backup-test-472f4.yaml +│ │ └── borrelli-backup-test-qftmw.yaml +│ └── metadata.yaml +├── securitygroups.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-w5jch.yaml +│ └── metadata.yaml +├── storeconfigs.secrets.crossplane.io +│ ├── cluster +│ │ └── default.yaml +│ └── metadata.yaml +├── subnets.ec2.aws.upbound.io +│ ├── cluster +│ │ ├── borrelli-backup-test-8btj6.yaml +│ │ ├── borrelli-backup-test-gbmrm.yaml +│ │ ├── borrelli-backup-test-m7kh7.yaml +│ │ └── borrelli-backup-test-nttt5.yaml +│ └── metadata.yaml +├── vpcs.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-7hwgh.yaml +│ └── metadata.yaml +└── xnetworks.aws.platform.upbound.io +├── cluster +│ └── borrelli-backup-test.yaml +└── metadata.yaml +43 directories, 87 files +``` + +
+ + +The `export.yaml` file contains metadata about the export, including the configuration of the export, Crossplane information, and what's included in the export bundle. + +
+ +View the export + +```yaml +version: v1alpha1 +exportedAt: 2025-01-06T17:39:53.173222Z +options: + excludedNamespaces: + - kube-system + - kube-public + - kube-node-lease + - local-path-storage + includedResources: + - namespaces + - configmaps + - secrets + excludedResources: + - gotemplates.gotemplating.fn.crossplane.io + - kclinputs.template.fn.crossplane.io +crossplane: + distribution: universal-crossplane + namespace: crossplane-system + version: 1.17.3-up.1 + featureFlags: + - --enable-provider-identity + - --enable-environment-configs + - --enable-composition-functions + - --enable-usages +stats: + total: 68 + nativeResources: + configmaps: 0 + namespaces: 3 + secrets: 5 + customResources: + amicopies.ec2.aws.upbound.io: 0 + amilaunchpermissions.ec2.aws.upbound.io: 0 + amis.ec2.aws.upbound.io: 0 + availabilityzonegroups.ec2.aws.upbound.io: 0 + capacityreservations.ec2.aws.upbound.io: 0 + carriergateways.ec2.aws.upbound.io: 0 + compositeresourcedefinitions.apiextensions.crossplane.io: 0 + compositionrevisions.apiextensions.crossplane.io: 2 + compositions.apiextensions.crossplane.io: 0 + configurationrevisions.pkg.crossplane.io: 0 + configurations.pkg.crossplane.io: 1 +...redacted +``` + +
+ +### Skipped resources + +Along with to the resources excluded via CLI options, the following resources aren't +included in the backup: + +- The `kube-root-ca.crt` ConfigMap, since this is cluster-specific +- Resources directly managed via Helm (ArgoCD's helm implementation, which templates +Helm resources and then applies them, get included in the backup). The migration creates the exclusion list by looking for: + - Any Resource with the label `"app.kubernetes.io/managed-by" == "Helm"` + - Kubernetes Secrets with the label prefix `helm.sh/release`. example, `helm.sh/release.v1` +- Resources installed via a Crossplane package. These have an `ownerReference` with +a prefix `pkg.crossplane.io`. The expectation is that during import, the Crossplane Package Manager bears responsibility for installing the resources. +- Crossplane Locks: Any `Lock.pkg.crossplane.io` resource isn't included in the +export. + +## Restore + +The following is an example of a successful import run. At the end of the import, all Managed Resources are in a paused state. + +
+ +View the migration import + +```bash +$ up controlplane migration import +Importing control plane state... +✓ Reading state from the archive... Done! 👀 +✓ Importing base resources... 18 resources imported! 📥 +✓ Waiting for XRDs... Established! ⏳ +✓ Waiting for Packages... Installed and Healthy! ⏳ +✓ Importing remaining resources... 50 resources imported! 📥 +✓ Finalizing import... Done! 🎉 +``` + +
+ +Your scenario may involve migrating resources which already exist through other automation on the platform. When executing an import in these circumstances, the importer applies the new manifests to the cluster. If the resource already exists, the restore sets fields to what's in the backup. + +The importer restores all resources in the export archive. Managed Resources get imported with the `crossplane.io/paused: "true"` annotation set. Use the `--unpause-after-import` CLI argument to automatically un-pause resources that got +paused during backup, or remove the annotation manually. + +### Restore order + +The importer restores based on Kubernetes types. The restore order doesn't include parent/child relationships. + +Because Crossplane Composites create new Managed Resources if not present on the cluster, all +Claims, Composites and Managed Resources get imported in a paused state. You can un-pause them after the restore completes. + +The first step of import is installing Base Resources into the cluster. These resources (such has +packages and XRDs) must be ready before proceeding with the import. +Base Resources are: + +- Kubernetes Resources + - ConfigMaps + - Namespaces + - Secrets +- Crossplane Resources + - ControllerConfigs: `controllerconfigs.pkg.crossplane.io` + - DeploymentRuntimeConfigs: `deploymentruntimeconfigs.pkg.crossplane.io` + - StoreConfigs: `storeconfigs.secrets.crossplane.io` +- Crossplane Packages + - Providers: `providers.pkg.crossplane.io` + - Functions: `functions.pkg.crossplane.io` + - Configurations: `configurations.pkg.crossplane.io` + +Restore waits for the base resources to be `Ready` before moving on to the next step. Next, restore walks through the archive and restores all the manifests present. + +During import, the `crossplane.io/paused` annotation gets added to Managed Resources, Claims +and Composites. + +To manually un-pause managed resources after an import, remove the annotation by running: + +```bash +kubectl annotate managed --all crossplane.io/paused- +``` + +You can also run import again with the `--unpause-after-import` flag to remove the annotations. + +```bash +up controlplane migration import --unpause-after-import +``` + +### Restoring resource status + +The importer applies the status of all resources during import. The importer determines if the CRD version has a status field defined based on the stored CRD version. + + +[cli-command]: /reference/cli-reference +[up-cli]: /reference/cli-reference +[up-cli-1]: /manuals/cli/overview +[create-command]: /reference/cli-reference +[up-ctx]: /reference/cli-reference +[configuration-aws-network]: https://marketplace.upbound.io/configurations/upbound/configuration-aws-network diff --git a/spaces_versioned_docs/version-v1.12/howtos/observability.md b/spaces_versioned_docs/version-v1.12/howtos/observability.md new file mode 100644 index 000000000..8fc5c3278 --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/observability.md @@ -0,0 +1,395 @@ +--- +title: Observability +sidebar_position: 50 +description: A guide for how to use the integrated observability pipeline feature + in a Space. +plan: "enterprise" +--- + + + +This guide explains how to configure observability in Upbound Spaces. Upbound +provides integrated observability features built on +[OpenTelemetry][opentelemetry] to collect, process, and export logs, metrics, +and traces. + +Upbound Spaces offers two levels of observability: + +1. **Space-level observability** - Observes the cluster infrastructure where Spaces software is installed (Self-Hosted only) +2. **Control plane observability** - Observes workloads running within individual control planes + + + + + +:::info API Version Information & Version Selector +This guide applies to all supported versions (v1.9-v1.15+). Observability features evolved: + +- **v1.9-v1.12**: SharedTelemetryConfig available (alpha until v1.11) +- **v1.11+**: Observability promoted to stable with logs export support +- **v1.14+**: Both space-level and control-plane observability GA + +**View API Reference for Your Version**: +| Version | Status | Link | +|---------|--------|------| +| v1.15 | Latest | [Spaces API v1.15](../../../next/reference/apis/spaces-api/) | +| v1.14 | Supported | [Spaces API v1.14](../../../1.14/reference/apis/spaces-api/) | +| v1.13 | Supported | [Spaces API v1.13](../../../1.13/reference/apis/spaces-api/) | +| v1.12 | Supported | [Spaces API v1.12](../../../1.12/reference/apis/spaces-api/) | +| v1.11 | Supported | [Spaces API v1.11](../../../1.11/reference/apis/spaces-api/) | +| v1.10 | Limited | [Spaces API v1.10](../../../1.10/reference/apis/spaces-api/) | +| v1.9 | EOL | [Spaces API v1.9](../../../1.9/reference/apis/spaces-api/) | + +For version support policy and feature availability, see and . +::: + +:::important +**Space-level observability** (available since v1.6.0, GA in v1.14.0): +- Disabled by default +- Requires manual enablement and configuration +- Self-Hosted Spaces only + +**Control plane observability** (available since v1.13.0, GA in v1.14.0): +- Enabled by default +- No additional configuration required +::: + + + + +## Prerequisites + + +**Control plane observability** is enabled by default. No additional setup is +required. + + + +### Self-hosted Spaces + +1. **Enable the observability feature** when installing Spaces: + ```bash + up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "observability.enabled=true" + ``` + +Set `features.alpha.observability.enabled=true` instead if using Spaces version +before `v1.14.0`. + +2. **Install OpenTelemetry Operator** (required for Space-level observability): + ```bash + kubectl apply -f https://github.com/open-telemetry/opentelemetry-operator/releases/download/v0.116.0/opentelemetry-operator.yaml + ``` + + :::important + If running Spaces `v1.11` or later, use OpenTelemetry Operator `v0.110.0` or later due to breaking changes. + ::: + + +## Space-level Observability + +Space-level observability is only available for self-hosted Spaces and allows +administrators to observe the cluster infrastructure. + +### Configuration + +Configure Space-level observability using the `spacesCollector` value in your +Spaces Helm chart: + +```yaml +observability: + spacesCollector: + config: + exporters: + otlphttp: + endpoint: "" + headers: + api-key: YOUR_API_KEY + exportPipeline: + logs: + - otlphttp + metrics: + - otlphttp +``` + +This configuration exports metrics and logs from: + +- Crossplane installation +- Spaces infrastructure (controller, API, router, etc.) + +### Router metrics + +The Spaces router uses Envoy as a reverse proxy and automatically exposes +metrics when you enable Space-level observability. These metrics provide +visibility into: + +- Traffic routing to control planes and services +- Request status codes, timeouts, and retries +- Circuit breaker state preventing cascading failures +- Client connection patterns and request volume +- Request latency (P50, P95, P99) + +For more information about available metrics, example queries, and how to enable +this feature, see the [Space-level observability guide][space-level-o11y]. + +## Control plane observability + +Control plane observability collects telemetry data from workloads running +within individual control planes using `SharedTelemetryConfig` resources. + +The pipeline deploys [OpenTelemetry Collectors][opentelemetry-collectors] per +control plane, defined by a `SharedTelemetryConfig` at the group level. +Collectors pass data to external observability backends. + +:::important +From Spaces `v1.13` and beyond, telemetry only includes user-facing control +plane workloads (Crossplane, providers, functions). + +Self-hosted users can include system workloads (`api-server`, `etcd`) by setting +`observability.collectors.includeSystemTelemetry=true` in Helm. +::: + +:::important +Spaces validates `SharedTelemetryConfig` resources before applying them by +sending telemetry to configured exporters. self-hosted Spaces, ensure that +`spaces-controller` can reach the exporter endpoints. +::: + +### `SharedTelemetryConfig` + +`SharedTelemetryConfig` is a group-scoped custom resource that defines telemetry +configuration for control planes. + +#### New Relic example + +```yaml +apiVersion: observability.spaces.upbound.io/v1alpha1 +kind: SharedTelemetryConfig +metadata: + name: newrelic + namespace: default +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + org: foo + exporters: + otlphttp: + endpoint: https://otlp.nr-data.net + headers: + api-key: YOUR_API_KEY + exportPipeline: + metrics: [otlphttp] + traces: [otlphttp] + logs: [otlphttp] +``` + +#### Datadog Example + +```yaml +apiVersion: observability.spaces.upbound.io/v1alpha1 +kind: SharedTelemetryConfig +metadata: + name: datadog + namespace: default +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + org: foo + exporters: + datadog: + api: + site: ${DATADOG_SITE} + key: ${DATADOG_API_KEY} + exportPipeline: + metrics: [datadog] + traces: [datadog] + logs: [datadog] +``` + +### Control plane selection + +Use `spec.controlPlaneSelector` to specify which control planes should use the +telemetry configuration. + +#### Label-based selection + +```yaml +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +#### Expression-based selection + +```yaml +spec: + controlPlaneSelector: + labelSelectors: + - matchExpressions: + - { key: environment, operator: In, values: [production,staging] } +``` + +#### Name-based selection + +```yaml +spec: + controlPlaneSelector: + names: + - controlplane-dev + - controlplane-staging + - controlplane-prod +``` + +### Manage sensitive data + +:::important +Available from Spaces `v1.10` +::: + +Store sensitive data in Kubernetes secrets and reference them in your +`SharedTelemetryConfig`: + +1. **Create the secret:** + ```bash + kubectl create secret generic sensitive -n \ + --from-literal=apiKey='YOUR_API_KEY' + ``` + +2. **Reference in SharedTelemetryConfig:** + ```yaml + apiVersion: observability.spaces.upbound.io/v1alpha1 + kind: SharedTelemetryConfig + metadata: + name: newrelic + spec: + configPatchSecretRefs: + - name: sensitive + key: apiKey + path: exporters.otlphttp.headers.api-key + controlPlaneSelector: + labelSelectors: + - matchLabels: + org: foo + exporters: + otlphttp: + endpoint: https://otlp.nr-data.net + headers: + api-key: dummy # Replaced by secret value + exportPipeline: + metrics: [otlphttp] + traces: [otlphttp] + logs: [otlphttp] + ``` + +### Telemetry processing + +:::important +Available from Spaces `v1.11` +::: + +Configure processing pipelines to transform telemetry data using the [transform +processor][transform-processor]. + +#### Add labels to metrics + +```yaml +spec: + processors: + transform: + error_mode: ignore + metric_statements: + - context: datapoint + statements: + - set(attributes["newLabel"], "someLabel") + processorPipeline: + metrics: [transform] +``` + +#### Remove labels + +From metrics: +```yaml +processors: + transform: + metric_statements: + - context: datapoint + statements: + - delete_key(attributes, "kubernetes_namespace") +``` + +From logs: +```yaml +processors: + transform: + log_statements: + - context: log + statements: + - delete_key(attributes, "log.file.name") +``` + +#### Modify log messages + +```yaml +processors: + transform: + log_statements: + - context: log + statements: + - set(attributes["original"], body) + - set(body, Concat(["log message:", body], " ")) +``` + +### Monitor status + +Check the status of your `SharedTelemetryConfig`: + +```bash +kubectl get stc +NAME SELECTED FAILED PROVISIONED AGE +datadog 1 0 1 63s +``` + +- `SELECTED`: Number of control planes selected +- `FAILED`: Number of control planes that failed provisioning +- `PROVISIONED`: Number of successfully running collectors + +For detailed status information: + +```bash +kubectl describe stc +``` + +## Supported exporters + +Both Space-level and control plane observability support: +- `datadog` -. Datadog integration +- `otlphttp` - General-purpose exporter (used by New Relic, among others) +- `debug` -. troubleshooting + +## Considerations + +- **Control plane conflicts**: Each control plane can only use one `SharedTelemetryConfig`. Multiple configs selecting the same control plane conflict. +- **Custom collector image**: Both Space-level and control plane observability use the same custom OpenTelemetry Collector image with supported exporters. +- **Resource scope**: `SharedTelemetryConfig` resources are group-scoped, allowing different telemetry configurations per group. + +For more advanced configuration options, review the [Helm chart +reference][helm-chart-reference] and [OpenTelemetry Transformation Language +documentation][opentelemetry-transformation-language]. + + +[opentelemetry]: https://opentelemetry.io/ +[opentelemetry-collectors]: https://opentelemetry.io/docs/collector/ +[opentelemetry-collector-configuration]: https://opentelemetry.io/docs/collector/configuration/#exporters +[opentelemetry-operator]: https://opentelemetry.io/docs/kubernetes/operator/ +[transform-processor]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/transformprocessor/README.md +[opentelemetry-transformation-language]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl +[space-level-o11y]: /spaces/howtos/self-hosted/space-observability +[helm-chart-reference]: /reference/helm-reference +[opentelemetry-transformation-language-functions]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/ottlfuncs/README.md +[opentelemetry-transformation-language-contexts]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts +[guide-on-ottl]: https://betterstack.com/community/guides/observability/ottl/#a-brief-overview-of-the-ottl-grammar diff --git a/spaces_versioned_docs/version-v1.12/howtos/query-api.md b/spaces_versioned_docs/version-v1.12/howtos/query-api.md new file mode 100644 index 000000000..78163de2f --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/query-api.md @@ -0,0 +1,320 @@ +--- +title: Query API +sidebar_position: 40 +description: Use the `up` CLI to query objects and resources +--- + + + + +Upbound's Query API allows users to inspect objects and resources within their control planes. The read-only `up alpha query` and `up alpha get` CLI commands allow you to gather information on your control planes in a fast and efficient package. These commands follow the [`kubectl` conventions][kubectl-conventions] for filtering, sorting, and retrieving information from your Space. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). Query API is available in Cloud Spaces since v1.6 and Self-Hosted since v1.8. + +For detailed deployment procedures, see [Deploy Query API infrastructure - Self-Hosted](./self-hosted/query-api.md). version compatibility details, see the . +::: + + + +## Using the Query API + + +The Query API allows you to retrieve control plane information faster than traditional `kubectl` commands. This feature lets you debug your Crossplane resources with the CLI or within the Upbound Console's enhanced management views. + +### Query within a single control plane + +Use the `up alpha get` command to retrieve information about objects within the current control plane context. This command uses the **Query** endpoint and targets the current control plane. + +To switch between control plane groups, use the [`up ctx` ][up-ctx] and change to your desired context with an interactive prompt or specify with your control plane path: + +```shell +up ctx /// +``` + +You can query within a single control plane with the [`up alpha get` command][up-alpha-get-command] to return more information about a given object within the current kubeconfig context. + +The `up alpha get` command can query resource types and aliases to return objects in your control plane. + +```shell +up alpha get managed +NAME READY SYNCED AGE +custom-account1-5bv5j-sa True True 15m +custom-cluster1-bq6dk-net True True 15m +custom-account1-5bv5j-subnet True True 15m +custom-cluster1-bq6dk-nodepool True True 15m +custom-cluster1-bq6dk-cluster True True 15m +custom-account1-5bv5j-net True True 15m +custom-cluster1-bq6dk-subnet True True 15m +custom-cluster1-bq6dk-sa True True 15m +``` + +The [`-A` flag][a-flag] queries for objects across all namespaces. + +```shell +up alpha get configmaps -A +NAMESPACE NAME AGE +crossplane-system uxp-versions-config 18m +crossplane-system universal-crossplane-config 18m +crossplane-system kube-root-ca.crt 18m +upbound-system kube-root-ca.crt 18m +kube-system kube-root-ca.crt 18m +kube-system coredns 18m +default kube-root-ca.crt 18m +kube-node-lease kube-root-ca.crt 18m +kube-public kube-root-ca.crt 18m +kube-system kube-apiserver-legacy-service-account-token-tracking 18m +kube-system extension-apiserver-authentication 18m +``` + +To query for [multiple resource types][multiple-resource-types], you can add the name or alias for the resource as a comma separated string. + +```shell +up alpha get providers,providerrevisions + +NAME HEALTHY REVISION IMAGE STATE DEP-FOUND DEP-INSTALLED AGE +providerrevision.pkg.crossplane.io/crossplane-contrib-provider-nop-ecc25c121431 True 1 xpkg.upbound.io/crossplane-contrib/provider-nop:v0.2.1 Active 18m +NAME INSTALLED HEALTHY PACKAGE AGE +provider.pkg.crossplane.io/crossplane-contrib-provider-nop True True xpkg.upbound.io/crossplane-contrib/provider-nop:v0.2.1 18m +``` + +### Query multiple control planes + +The [`up alpha query` command][up-alpha-query-command] returns a list of objects of any kind within all the control planes in your Space. This command uses either the **SpaceQuery** or **GroupQuery** endpoints depending on your query scope. The `-A` flag switches the query context from the group level to the entire Space + +The `up alpha query` command accepts resources and aliases to return objects across your group or Space. + +```shell +up alpha query crossplane + +NAME ESTABLISHED OFFERED AGE +compositeresourcedefinition.apiextensions.crossplane.io/xnetworks.platform.acme.co True True 20m +compositeresourcedefinition.apiextensions.crossplane.io/xaccountscaffolds.platform.acme.co True True 20m + + +NAME XR-KIND XR-APIVERSION AGE +composition.apiextensions.crossplane.io/xaccountscaffolds.platform.acme.co XAccountScaffold platform.acme.co/v1alpha1 20m +composition.apiextensions.crossplane.io/xnetworks.platform.acme.co XNetwork platform.acme.co/v1alpha1 20m + + +NAME REVISION XR-KIND XR-APIVERSION AGE +compositionrevision.apiextensions.crossplane.io/xaccountscaffolds.platform.acme.co-5ae9da5 1 XAccountScaffold platform.acme.co/v1alpha1 20m +compositionrevision.apiextensions.crossplane.io/xnetworks.platform.acme.co-414ce80 1 XNetwork platform.acme.co/v1alpha1 20m + +NAME READY SYNCED AGE +nopresource.nop.crossplane.io/custom-cluster1-bq6dk-subnet True True 19m +nopresource.nop.crossplane.io/custom-account1-5bv5j-net True True 19m + +## Output truncated... + +``` + + +The [`--sort-by` flag][sort-by-flag] allows you to return information to your specifications. You can construct your sort order in a JSONPath expression string or integer. + + +```shell +up alpha query crossplane -A --sort-by="{.metadata.name}" + +CONTROLPLANE NAME AGE +default/test deploymentruntimeconfig.pkg.crossplane.io/default 10m + +CONTROLPLANE NAME AGE TYPE DEFAULT-SCOPE +default/test storeconfig.secrets.crossplane.io/default 10m Kubernetes crossplane-system +``` + +To query for multiple resource types, you can add the name or alias for the resource as a comma separated string. + +```shell +up alpha query namespaces,configmaps -A + +CONTROLPLANE NAME AGE +default/test namespace/upbound-system 15m +default/test namespace/crossplane-system 15m +default/test namespace/kube-system 16m +default/test namespace/default 16m + +CONTROLPLANE NAMESPACE NAME AGE +default/test crossplane-system configmap/uxp-versions-config 15m +default/test crossplane-system configmap/universal-crossplane-config 15m +default/test crossplane-system configmap/kube-root-ca.crt 15m +default/test upbound-system configmap/kube-root-ca.crt 15m +default/test kube-system configmap/coredns 16m +default/test default configmap/kube-root-ca.crt 16m + +## Output truncated... + +``` + +The Query API also allows you to return resource types with specific [label columns][label-columns]. + +```shell +up alpha query composite -A --label-columns=crossplane.io/claim-namespace + +CONTROLPLANE NAME SYNCED READY COMPOSITION AGE CLAIM-NAMESPACE +query-api-test/test xeks.argo.discover.upbound.io/test-k7xbk False xeks.argo.discover.upbound.io 51d default + +CONTROLPLANE NAME EXTERNALDNS SYNCED READY COMPOSITION AGE CLAIM-NAMESPACE +spaces-clusters/controlplane-query-api-test-spaces-playground xexternaldns.externaldns.platform.upbound.io/spaces-cluster-0-xd8v2-lhnl7 6.34.2 True True xexternaldns.externaldns.platform.upbound.io 19d default +default/query-api-test xexternaldns.externaldns.platform.upbound.io/space-awg-kine-f7dxq-nkk2q 6.34.2 True True xexternaldns.externaldns.platform.upbound.io 55d default + +## Output truncated... + +``` + +### Query API request format + +The CLI can also return a version of your query request with the [`--debug` flag][debug-flag]. This flag returns the API spec request for your query. + +```shell +up alpha query composite -A -d + +apiVersion: query.spaces.upbound.io/v1alpha1 +kind: SpaceQuery +metadata: + creationTimestamp: null +spec: + cursor: true + filter: + categories: + - composite + controlPlane: {} + limit: 500 + objects: + controlPlane: true + table: {} + page: {} +``` + +For more complex queries, you can interact with the Query API like a Kubernetes-style API by creating a query and applying it with `kubectl`. + +The example below is a query for `claim` resources in every control plane from oldest to newest and returns specific information about those claims. + + +```yaml +apiVersion: query.spaces.upbound.io/v1alpha1 +kind: SpaceQuery +spec: + filter: + categories: + - claim + order: + - creationTimestamp: Asc + cursor: true + count: true + objects: + id: true + controlPlane: true + object: + kind: true + apiVersion: true + metadata: + name: true + uid: true + spec: + containers: + image: true +``` + + +The Query API is served by the Spaces API endpoint. You can use `up ctx` to +switch the kubectl context to the Spaces API ingress. After that, you can use +`kubectl create` and receive the `response` for your query parameters. + + +```shell +kubectl create -f spaces-query.yaml -o yaml +``` + +Your `response` should look similar to this example: + +```yaml {copy-lines="none"} +apiVersion: query.spaces.upbound.io/v1alpha1 +kind: SpaceQuery +metadata: + creationTimestamp: "2024-08-08T14:41:46Z" + name: default +response: + count: 3 + cursor: + next: "" + page: 0 + pageSize: 100 + position: 0 + objects: + - controlPlane: + name: query-api-test + namespace: default + id: default/query-api-test/823b2781-7e70-4d91-a6f0-ee8f455d67dc + object: + apiVersion: spaces.platform.upbound.io/v1alpha1 + kind: Space + metadata: + name: space-awg-kine + resourceVersion: "803868" + uid: 823b2781-7e70-4d91-a6f0-ee8f455d67dc + spec: {} + - controlPlane: + name: test-1 + namespace: test + id: test/test-1/08a573dd-851a-42cc-a600-b6f6ed37ee8d + object: + apiVersion: argo.discover.upbound.io/v1alpha1 + kind: EKS + metadata: + name: test-1 + resourceVersion: "4270320" + uid: 08a573dd-851a-42cc-a600-b6f6ed37ee8d + spec: {} + - controlPlane: + name: controlplane-query-api-test-spaces-playground + namespace: spaces-clusters + id: spaces-clusters/controlplane-query-api-test-spaces-playground/b5a6770f-1f85-4d09-8990-997c84bd4159 + object: + apiVersion: spaces.platform.upbound.io/v1alpha1 + kind: Space + metadata: + name: spaces-cluster-0 + resourceVersion: "1408337" + uid: b5a6770f-1f85-4d09-8990-997c84bd4159 + spec: {} +``` + + +## Query API Explorer + + + +import CrdDocViewer from '@site/src/components/CrdViewer'; + +### Query + +The Query resource allows you to query objects in a single control plane. + + + +### GroupQuery + +The GroupQuery resource allows you to query objects across a group of control planes. + + + +### SpaceQuery + +The SpaceQuery resource allows you to query objects across all control planes in a space. + + + + + + +[documentation]: /spaces/howtos/self-hosted/query-api +[up-ctx]: /reference/cli-reference +[up-alpha-get-command]: /reference/cli-reference +[a-flag]: /reference/cli-reference +[multiple-resource-types]: /reference/cli-reference +[up-alpha-query-command]: /reference/cli-reference +[sort-by-flag]: /reference/cli-reference +[label-columns]: /reference/cli-reference +[debug-flag]: /reference/cli-reference +[kubectl-conventions]: https://kubernetes.io/docs/reference/kubectl/generated/kubectl_get/ diff --git a/spaces_versioned_docs/version-v1.12/howtos/secrets-management.md b/spaces_versioned_docs/version-v1.12/howtos/secrets-management.md new file mode 100644 index 000000000..88e730ae5 --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/secrets-management.md @@ -0,0 +1,719 @@ +--- +title: Secrets Management +sidebar_position: 20 +description: A guide for how to configure synchronizing external secrets into control + planes in a Space. +--- + +Upbound's _Shared Secrets_ is a built in secrets management feature that +provides an integrated way to manage secrets across your platform. It allows you +to store sensitive data like passwords and certificates for your managed control +planes as secrets in an external secret store. + +This feature is a wrapper around the [External Secrets Operator (ESO)][external-secrets-operator-eso] that pulls secrets from external vaults and distributes them across your platform. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). The SharedSecretStore and SharedExternalSecret CRDs have been stable since v1.9. + +For API specifications and configuration details, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +## Benefits + +The Shared Secrets feature allows you to: + +* Access secrets from a variety of external secret stores without operation overhead +* Configure synchronization for multiple control planes in a group +* Store and manage all your secrets centrally +* Use Shared Secrets across all Upbound environments(Cloud and Disconnected Spaces) +* Synchronize secrets across groups of control planes while maintaining clear security boundaries +* Manage secrets at scale programmatically while ensuring proper isolation and access control + +## Understanding the Architecture + +The Shared Secrets feature uses a hierarchical approach to centrally manage +secrets and effectively control their distribution. + +![Shared Secrets workflow diagram](/img/shared-secrets-workflow.png) + +1. The flow begins at the group level, where you define your secret sources and distribution rules +2. These rules automatically create corresponding resources in your control planes +3. In each control plane, specific namespaces receive the secrets +4. Changes at the group level automatically propagate through this chain + +## Component configuration + +Upbound Shared Secrets consists of two components: + +1. **SharedSecretStore**: Defines connections to external secret providers +2. **SharedExternalSecret**: Specifies which secrets to synchronize and where + + +### Connect to an External Vault + + +The `SharedSecretStore` component is the connection point to your external +secret vaults. It provisions ClusterSecretStore resources into control planes +within the group. + + +#### AWS Secrets Manager + + + +In this example, you'll create a `SharedSecretStore` to connect to AWS +Secrets Manager in `us-west-2`. Then apply access to all control planes labeled with +`environment: production`, and make these secrets available in the `default` and +`crossplane-system` namespaces. + + +You can configure access to AWS Secrets Manager using static credentials or +workload identity. + +:::important +While the underlying ESO API supports more auth methods, static credentials are currently the only supported auth method in Cloud Spaces. +::: + +##### Static credentials + +1. Use the AWS CLI to create access credentials. + + +2. Create your access credentials. +```ini +# Create a text file with AWS credentials +cat > aws-credentials.txt << EOF +[default] +aws_access_key_id = +aws_secret_access_key = +EOF +``` + +3. Next,store the access credentials in a secret in the namespace you want to have access to the `SharedSecretStore`. +```shell +kubectl create secret \ + generic aws-credentials \ + -n default \ + --from-file=creds=./aws-credentials.txt +``` + +4. Create a `SharedSecretStore` custom resource file called `secretstore.yaml`. + Paste the following configuration: +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: aws-secrets +spec: + # Define which control planes should receive this configuration + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production + + # Define which namespaces within those control planes can access secrets + namespaceSelector: + names: + - default + - crossplane-system + + # Configure the connection to AWS Secrets Manager + provider: + aws: + service: SecretsManager + region: us-west-2 + auth: + secretRef: + accessKeyIDSecretRef: + name: aws-credentials + key: access-key-id + secretAccessKeySecretRef: + name: aws-credentials + key: secret-access-key +``` + + + +##### Workload Identity with IRSA + + + +You can also use AWS IAM Roles for Service Accounts (IRSA) depending on your +organizations needs: + +1. Ensure you have deployed the Spaces software into an IRSA-enabled EKS cluster. +2. Follow the AWS instructions to create an IAM OIDC provider with your EKS OIDC + provider URL. +3. Determine the Spaces-generated `controlPlaneID` of your control plane: +```shell +kubectl get controlplane -o jsonpath='{.status.controlPlaneID}' +``` + +4. Create an IAM trust policy in your AWS account to match the control plane. +```yaml +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam:::oidc-provider/" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + ":aud": "sts.amazonaws.com", + ":sub": [ +"system:serviceaccount:mxp--system:external-secrets-controller"] + } + } + } + ] +} +``` + +5. Update your Spaces deployment to annotate the SharedSecrets service account + with the role ARN. +```shell +up space upgrade ... \ + --set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."eks\.amazonaws\.com/role-arn"="" +``` + +6. Create a SharedSecretStore and reference the SharedSecrets service account: +```ini {copy-lines="all"} +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: aws-sm + namespace: default +spec: + provider: + aws: + service: SecretsManager + region: + auth: + jwt: + serviceAccountRef: + name: external-secrets-controller + controlPlaneSelector: + names: + - + namespaceSelector: + names: + - default +``` + +When you create a `SharedSecretStore` the underlying mechanism: + +1. Applies at the group level +2. Determines which control planes should receive this configuration by the `controlPlaneSelector` +3. Automatically creates a ClusterSecretStore inside each identified control plane +4. Maintains a connection in each control plane with the ClusterSecretStore + credentials and configuration from the parent SharedSecretStore + +Upbound automatically generates a ClusterSecretStore in each matching control +plane when you create a SharedSecretStore. + +```yaml {copy-lines="none"} +# Automatically created in each matching control plane +apiVersion: external-secrets.io/v1beta1 +kind: ClusterSecretStore +metadata: + name: aws-secrets # Name matches the parent SharedSecretStore +spec: + provider: + upboundspaces: + storeRef: + name: aws-secret +``` + +When you create the SharedSecretStore controller, it replaces the provider with +a special provider called `upboundspaces`. This provider references the +SharedSecretStore object in the Spaces API. This avoids copying the actual cloud +credentials from Spaces to each control plane. + +This workflow allows you to configure the store connection only once at the +group level and automatically propagates to each control plane. Individual control +planes can use the store without exposure to the group-level configuration and +updates all child ClusterSecretStores when updated. + + +#### Azure Key Vault + + +:::important +While the underlying ESO API supports more auth methods, static credentials are currently the only supported auth method in Cloud Spaces. +::: + +##### Static credentials + +1. Use the Azure CLI to create a service principal and authentication file. +2. Create a service principal and save credentials in a file: +```json +{ + "appId": "myAppId", + "displayName": "myServicePrincipalName", + "password": "myServicePrincipalPassword", + "tenant": "myTentantId" +} +``` + +3. Store the credentials as a Kubernetes secret: +```shell +kubectl create secret \ + generic azure-secret-sp \ + -n default \ + --from-file=creds=./azure-credentials.json +``` + +4. Create a SharedSecretStore referencing these credentials: +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: azure-kv +spec: + provider: + azurekv: + tenantId: "" + vaultUrl: "" + authSecretRef: + clientId: + name: azure-secret-sp + key: ClientID + clientSecret: + name: azure-secret-sp + key: ClientSecret + controlPlaneSelector: + names: + - + namespaceSelector: + names: + - default +``` + +##### Workload Identity + + +You can also use Entra Workload Identity Federation to access Azure Key Vault +without needing to manage secrets. + +To use Entra Workload ID with AKS: + + +1. Deploy the Spaces software into a [workload identity-enabled AKS cluster][workload-identity-enabled-aks-cluster]. +2. Retrieve the OIDC issuer URL of the AKS cluster: +```ini +az aks show --name "" \ + --resource-group "" \ + --query "oidcIssuerProfile.issuerUrl" \ + --output tsv +``` + +3. Use the Azure CLI to make a managed identity: +```ini +az identity create \ + --name "" \ + --resource-group "" \ + --location "" \ + --subscription "" +``` + +4. Look up the managed identity's client ID: +```ini +az identity show \ + --resource-group "" \ + --name "" \ + --query 'clientId' \ + --output tsv +``` + +5. Update your Spaces deployment to annotate the SharedSecrets service account with the associated Entra application client ID from the previous step: +```ini +up space upgrade ... \ + --set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."azure\.workload\.identity/client-id"="" \ + --set-string controlPlanes.sharedSecrets.pod.customLabels."azure\.workload\.identity/use"="true" +``` + +6. Determine the Spaces-generated `controlPlaneID` of your control plane. When you deploy a `kind: controlplane` in a Space, the Spaces software deploys a set of pods in a new namespace following the format `mxp--system`. +```ini +kubectl get controlplane -o jsonpath='{.status.controlPlaneID}' +``` + +7. Create a federated identity credential. +```ini +FEDERATED_IDENTITY_CREDENTIAL_NAME= +USER_ASSIGNED_IDENTITY_NAME= +RESOURCE_GROUP= +AKS_OIDC_ISSUER= +CONTROLPLANE_ID= +az identity federated-credential create --name ${FEDERATED_IDENTITY_CREDENTIAL_NAME} --identity-name "${USER_ASSIGNED_IDENTITY_NAME}" --resource-group "${RESOURCE_GROUP}" --issuer "${AKS_OIDC_ISSUER}" --subject system:serviceaccount:"mxp-${CONTROLPLANE_ID}-system:external-secrets-controller" --audience api://AzureADTokenExchange +``` + +8. Assign the `Key Vault Secrets User` role to the user-assigned managed identity that you created earlier. This step gives the managed identity permission to read secrets from the key vault: +```ini +az role assignment create \ + --assignee-object-id "${IDENTITY_PRINCIPAL_ID}" \ + --role "Key Vault Secrets User" \ + --scope "${KEYVAULT_RESOURCE_ID}" \ + --assignee-principal-type ServicePrincipal +``` + +:::important +You must manually restart a workload's pod when you add the annotation to the running pod's service account. The Entra workload identity mutating admission webhook requires a restart to inject the necessary environment. +::: + +8. Create a `SharedSecretStore`. Replace `vaultURL` with the URL of your Azure Key Vault instance. Replace `identityId` with the client ID of the managed identity created earlier: +```yaml {copy-lines="all"} +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: azure-kv +spec: + provider: + azurekv: + authType: WorkloadIdentity + vaultUrl: "" + controlPlaneSelector: + names: + - + namespaceSelector: + names: + - default +``` + + + + +#### Google Cloud Secret Manager + + + +You can configure access to Google Cloud Secret Manager using static credentials or workload identity. Below are instructions for configuring either. See the [ESO provider API][eso-provider-api] for more information. + +:::important +While the underlying ESO API supports more auth methods, static credentials are currently the only supported auth method in Cloud Spaces. +::: + +##### Static credentials + +1. Use the [GCP CLI][gcp-cli] to create access credentials. +2. Save the output in a file called `gcp-credentials.json`. +3. Store the access credentials in a secret in the same namespace as the `SharedSecretStore`. + ```shell {label="kube-create-secret",copy-lines="all"} + kubectl create secret \ + generic gcpsm-secret \ + -n default \ + --from-file=creds=./gcp-credentials.json + ``` + +4. Create a `SharedSecretStore`, referencing the secret created earlier. Replace `projectID` with your GCP Project ID: +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: gcp-sm +spec: + provider: + gcpsm: + auth: + secretRef: + secretAccessKeySecretRef: + name: gcpsm-secret + key: creds + projectID: + controlPlaneSelector: + names: + - + namespaceSelector: + names: + - default +``` + +:::tip +The example above maps a Shared Secret Store into a single namespace of a single control plane. Read [control plane selection][control-plane-selection] and [namespace selection][namespace-selection] to learn how to map into one or more namespaces of one or more control planes. +::: + + +##### Workload identity with Service Accounts to IAM Roles + + +To configure, grant the `roles/iam.workloadIdentityUser` role to the Kubernetes +service account in the control plane namespace to impersonate the IAM service +account. + +1. Ensure you've deployed Spaces on a [Workload Identity Federation-enabled][workload-identity-federation-enabled] GKE cluster. +2. Determine the Spaces-generated `controlPlaneID` of your control plane. When you deploy a `kind: controlplane` in a Space, the Spaces software deploys a set of pods in a new namespace following the format `mxp--system`. +```ini +kubectl get controlplane -o jsonpath='{.status.controlPlaneID}' +``` + +3. Create a GCP IAM service account with the [GCP CLI][gcp-cli-1]: +```ini +gcloud iam service-accounts create \ + --project= +``` + +4. Grant the IAM service account the role to access GCP Secret Manager: +```ini +SA_NAME= +IAM_SA_PROJECT_ID= +gcloud projects add-iam-policy-binding IAM_SA_PROJECT_ID \ + --member "serviceAccount:SA_NAME@IAM_SA_PROJECT_ID.iam.gserviceaccount.com" \ + --role roles/secretmanager.secretAccessor +``` + +5. When you enable the Shared Secrets feature, a service account gets created in each control plane for the External Secrets Operator. Apply a [GCP IAM policy binding][gcp-iam-policy-binding] to associate this service account with the desired GCP IAM role. +```ini +PROJECT_ID= +PROJECT_NUMBER= +CONTROLPLANE_ID= +gcloud projects add-iam-policy-binding projects/${PROJECT_ID} \ + --role "roles/iam.workloadIdentityUser" \ + --member=principal://iam.googleapis.com/projects/${PROJECT_NUMBER}/locations/global/workloadIdentityPools/${PROJECT_ID}.svc.id.goog/subject/ns/mxp-${CONTROLPLANE_ID}-system/sa/external-secrets-controller +``` + +6. Update your Spaces deployment to annotate the SharedSecrets service account with GCP IAM service account's identifier: +```ini +up space upgrade ... \ + --set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."iam\.gke\.io/gcp-service-account"="" +``` + +7. Create a `SharedSecretStore`. Replace `projectID` with your GCP Project ID: +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: gcp-sm +spec: + provider: + gcpsm: + projectID: + controlPlaneSelector: + names: + - + namespaceSelector: + names: + - default +``` + +:::tip +The example above maps a Shared Secret Store into a single namespace of a single control plane. Read [control plane selection][control-plane-selection-1] and [namespace selection][namespace-selection-2] to learn how to map into one or more namespaces of one or more control planes. +::: + +### Manage your secret distribution + +After you create your SharedSecretStore, you can define which secrets to +distribute using SharedExternalSecret: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedExternalSecret +metadata: + name: database-credentials + namespace: default +spec: + # Select the same control planes as your SharedSecretStore + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production + + externalSecretSpec: + refreshInterval: 1h + secretStoreRef: + name: aws-secrets # References the SharedSecretStore name + kind: ClusterSecretStore + target: + name: db-credentials + data: + - secretKey: username + remoteRef: + key: prod/database/credentials + property: username + - secretKey: password + remoteRef: + key: prod/database/credentials + property: password +``` + +This configuration: + +* Pulls database credentials from your external secret provider +* Creates secrets in all production control planes +* Refreshes the secrets every hour +* Creates a secret called `db-credentials` in each control plane + +When you create a SharedExternalSecret at the group level, Upbound's system +creates a template for the corresponding ClusterExternalSecrets in each selected +control plane. + +The example below simulates the ClusterExternalSecret that Upbound creates: + +```yaml +# Inside each matching control plane: +apiVersion: external-secrets.io/v1beta1 +kind: ClusterExternalSecret +metadata: + name: database-credentials +spec: + refreshInterval: 1h + secretStoreRef: + name: aws-secrets + kind: ClusterSecretStore + data: + - secretKey: username + remoteRef: + key: prod/database/credentials + property: username +``` + +The hierarchy in this configuration is: + +1. SharedExternalSecret (group level) defines what secrets to distribute +2. ClusterExternalSecret (control plane level) manages the distribution within + each control plane + +3. Kubernetes Secrets (namespace level) are created in specified namespaces + + +#### Control plane selection + +To configure which control planes in a group you want to project a SecretStore into, use the `spec.controlPlaneSelector` field. You can either use `labelSelectors` or the `names` of a control plane directly. A control plane matches if any of the label selectors match. + +This example matches all control planes in the group that have `environment: production` as a label: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +You can use the more complex `matchExpressions` to match labels based on an expression. This example matches control planes that have label `environment: production` or `environment: staging`: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + controlPlaneSelector: + labelSelectors: + - matchExpressions: + - { key: environment, operator: In, values: [production,staging] } +``` + +You can also specify the names of control planes directly: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + controlPlaneSelector: + names: + - controlplane-dev + - controlplane-staging + - controlplane-prod +``` + + +#### Namespace selection + +To configure which namespaces **within each matched control plane** to project the secret store into, use `spec.namespaceSelector` field. The projected secret store only appears in the namespaces matching the provided selector. You can either use `labelSelectors` or the `names` of namespaces directly. A control plane matches if any of the label selectors match. + +**For all control planes matched by** `spec.controlPlaneSelector`, This example matches all namespaces in each selected control plane that have `team: team1` as a label: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + namespaceSelector: + labelSelectors: + - matchLabels: + team: team1 +``` + +You can use the more complex `matchExpressions` to match labels based on an expression. This example matches namespaces that have label `team: team1` or `team: team2`: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + namespaceSelector: + labelSelectors: + - matchExpressions: + - { key: team, operator: In, values: [team1,team2] } +``` + +You can also specify the names of namespaces directly: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + namespaceSelector: + names: + - team1-namespace + - team2-namespace +``` + +## Configure secrets directly in a control plane + + +The above explains using group-scoped resources to project secrets into multiple control planes. You can also use ESO API types directly in a control plane as you would in standalone Crossplane or Kubernetes. + + +See the [ESO documentation][eso-documentation] for a full guide on using the API types. + +## Best practices + +When you configure secrets management in your Upbound environment, keep the +following best practices in mind: + +**Use consistent labeling schemes** across your control planes for predictable +and manageable secret distribution. + +**Organize your secrets** in your external provider using a hierarchical +structure that mirrors your control plane organization. + +**Set appropriate refresh intervals** based on your security requires and the +nature of the secrets. + +**Use namespace selection sparingly** to limit secret distribution to only the +namespaces that need them. + +**Use separate tokens for each environment.** Keep them in distinct +SharedSecretStores. Users could bypass SharedExternalSecret selectors by +creating ClusterExternalSecrets directly in control planes. This grants access to all +secrets available to that token. + +**Document your secret management architecture**, including which control planes +should receive which secrets. + +[control-plane-selection]: #control-plane-selection +[namespace-selection]: #namespace-selection +[control-plane-selection-1]: #control-plane-selection +[namespace-selection-2]: #namespace-selection + +[external-secrets-operator-eso]: https://external-secrets.io +[workload-identity-enabled-aks-cluster]: https://learn.microsoft.com/en-us/azure/aks/workload-identity-deploy-cluster +[eso-provider-api]: https://external-secrets.io/latest/provider/google-secrets-manager/ +[gcp-cli]: https://cloud.google.com/iam/docs/creating-managing-service-account-keys +[workload-identity-federation-enabled]: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#enable_on_clusters_and_node_pools +[gcp-cli-1]: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#kubernetes-sa-to-iam +[gcp-iam-policy-binding]: https://cloud.google.com/sdk/gcloud/reference/iam/service-accounts/add-iam-policy-binding +[eso-documentation]: https://external-secrets.io/latest/introduction/getting-started/ diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/_category_.json b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/_category_.json new file mode 100644 index 000000000..5bf23bb0a --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/_category_.json @@ -0,0 +1,11 @@ +{ + "label": "Self-Hosted Spaces", + "position": 2, + "collapsed": true, + "customProps": { + "plan": "business" + } + +} + + diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/administer-features.md b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/administer-features.md new file mode 100644 index 000000000..ce878014e --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/administer-features.md @@ -0,0 +1,121 @@ +--- +title: Administer features +sidebar_position: 12 +description: Enable and disable features in Spaces +--- + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Feature flags and their availability may vary by version. + +For detailed feature availability across versions, see the . +::: + +This guide shows how to enable or disable features in your self-hosted Space. + +## Shared secrets + +**Status:** Preview + +This feature is enabled by default in Cloud Spaces. + +To enable this feature in a self-hosted Space, set +`features.alpha.sharedSecrets.enabled=true` when installing the Space: + +```bash +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "features.alpha.sharedSecrets.enabled=true" \ +``` + + +## Observability + +**Status:** GA +**Available from:** Spaces v1.13+ + +This feature is enabled by default in Cloud Spaces. + + + +To enable this feature in a self-hosted Space, set +`observability.enabled=true` (`features.alpha.observability.enabled=true` before `v1.14.0`) when installing the Space: + +```bash +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "observability.enabled=true" \ +``` + +The observability feature collects telemetry data from user-facing control +plane workloads like: + +* Crossplane +* Providers +* Functions + +Self-hosted Spaces users can add control plane system workloads such as the +`api-server`, `etcd` by setting the +`observability.collectors.includeSystemTelemetry` Helm flag to true. + +### Sensitive data + +To avoid exposing sensitive data in the `SharedTelemetryConfig` resource, use +Kubernetes secrets to store the sensitive data and reference the secret in the +`SharedTelemetryConfig` resource. + +Create the secret in the same namespace/group as the `SharedTelemetryConfig` +resource. The example below uses `kubectl create secret` to create a new secret: + +```bash +kubectl create secret generic sensitive -n \ + --from-literal=apiKey='YOUR_API_KEY' +``` + +Next, reference the secret in the `SharedTelemetryConfig` resource: + +```yaml +apiVersion: observability.spaces.upbound.io/v1alpha1 +kind: SharedTelemetryConfig +metadata: + name: newrelic +spec: + configPatchSecretRefs: + - name: sensitive + key: apiKey + path: exporters.otlphttp.headers.api-key + controlPlaneSelector: + labelSelectors: + - matchLabels: + org: foo + exporters: + otlphttp: + endpoint: https://otlp.nr-data.net + headers: + api-key: dummy # This value is replaced by the secret value, can be omitted + exportPipeline: + metrics: [otlphttp] + traces: [otlphttp] + logs: [otlphttp] +``` + +The `configPatchSecretRefs` field in the `spec` specifies the secret `name`, +`key`, and `path` values to inject the secret value in the +`SharedTelemetryConfig` resource. + +## Shared backups + +As of Spaces `v.12.0`, this feature is enabled by default. + +To disable in a self-hosted Space, pass the `features.alpha.sharedBackup.enabled=false` as a Helm chart value. +`--set "features.alpha.sharedBackup.enabled=false"` + +## Query API + +**Status:** Preview +The Query API is available in the Cloud Space offering and enabled by default. + +Query API is required for self-hosted deployments with connected Spaces. See the +related [documentation][documentation] +to enable this feature. + +[documentation]: /spaces/howtos/query-api/ diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/attach-detach.md b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/attach-detach.md new file mode 100644 index 000000000..1465921cf --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/attach-detach.md @@ -0,0 +1,198 @@ +--- +title: Connect or disconnect a Space +sidebar_position: 12 +description: Enable and connect self-hosted Spaces to the Upbound console +--- +:::info API Version Information +This guide applies to **Spaces v1.8.0 and later** (Self-Hosted only). Connection to Upbound console requires Query API and RBAC to be enabled. + +For version-specific features and requirements, see the . Query API setup details, see [Deploy Query API infrastructure](./query-api.md). +::: + +:::important +This feature is in preview. Starting in Spaces `v1.8.0` and later, you must +deploy and [enable the Query API][enable-the-query-api] and [enable Upbound +RBAC][enable-upbound-rbac] to connect a Space to Upbound. +::: + +[Upbound][upbound] allows you to connect self-hosted Spaces and enables a streamlined operations and debugging experience in your Console. + +## Usage + +### Connect + +Before you begin, make sure you have: + +- An existing Upbound [organization][organization] in Upbound SaaS. +- The `up` CLI installed and logged into your organization +- `kubectl` installed with the kubecontext of your self-hosted Space cluster. +- A `token.json` license, provided by your Upbound account representative. +- You enabled the [Query API][query-api] in the self-hosted Space. + +Create a new `UPBOUND_SPACE_NAME`. If you don't create a name, `up` automatically generates one for you: + +```ini +export UPBOUND_SPACE_NAME=your-self-hosted-space +``` + +#### With up CLI + +:::tip +The command tries to connect the Space to the org account context pointed at by your `up` CLI profile. Make sure you've logged into Upbound SaaS with `up login -a ` before trying to connect the Space. +::: + +Connect the Space to the Console: + +```bash +up space connect "${UPBOUND_SPACE_NAME}" +``` + +This command installs a Connect agent, creates a service account, and configures permissions in your Upbound cloud organization in the `upbound-system` namespace of your Space. + +#### With Helm + +Export your Upbound org account name to an environment variable called `UPBOUND_ORG_NAME`. You can see this value by running `up org list` after logging on to Upbound. + +```ini +export UPBOUND_ORG_NAME=your-org-name +``` + +Create a new robot token and export it to an environment variable called `UPBOUND_TOKEN`: + +```bash +up robot create "${UPBOUND_SPACE_NAME}" --description="Robot used for authenticating Space '${UPBOUND_SPACE_NAME}' with Upbound Connect" +export UPBOUND_TOKEN=$(up robot token create "$UPBOUND_SPACE_NAME" "$UPBOUND_SPACE_NAME" --file - | jq -r '.token') +``` + +:::note +Follow the [`jq` installation guide][jq-install] if your machine doesn't include +it by default. +::: + +Create a secret containing the robot token: + +```bash +kubectl create secret -n upbound-system generic connect-token --from-literal=token=${UPBOUND_TOKEN} +``` + +Specify your username and password for the helm OCI registry: + +```bash +jq -r .token $SPACES_TOKEN_PATH | helm registry login xpkg.upbound.io -u $(jq -r .accessId $SPACES_TOKEN_PATH) --password-stdin +``` + +In the same cluster where you installed the Spaces software, install the Upbound connect agent with your token secret. + +```bash +helm -n upbound-system upgrade --install agent \ + oci://xpkg.upbound.io/spaces-artifacts/agent \ + --version "0.0.0-441.g68777b9" \ + --set "image.repository=xpkg.upbound.io/spaces-artifacts/agent" \ + --set "registration.image.repository=xpkg.upbound.io/spaces-artifacts/register-init" \ + --set "imagePullSecrets[0].name=upbound-pull-secret" \ + --set "registration.enabled=true" \ + --set "space=${UPBOUND_SPACE_NAME}" \ + --set "organization=${UPBOUND_ORG_NAME}" \ + --set "tokenSecret=connect-token" \ + --wait +``` + + +#### View your Space in the Console + + +Go to the [Upbound Console][upbound-console], log in, and choose the newly connected Space from the Space selector dropdown. + +![A screenshot of the Upbound Console space selector dropdown](/img/attached-space.png) + +:::note +You can only connect a self-hosted Space to a single organization at a time. +::: + +### Disconnect + +#### With up CLI + +To disconnect a self-hosted Space or a deleted self-hosted Space, run the following command: + +```bash +up space disconnect "${UPBOUND_SPACE_NAME}" +``` + +If the Space still exists, this command uninstalls the Connect agent and deletes the associated service account and permissions. + +#### With Helm + +To disconnect a self-hosted Space or a deleted self-hosted Space, run the following command: + +```bash +helm delete -n upbound-system agent +``` + +Clean up the robot token you created for this self-hosted Space: + +```bash +up robot delete "${UPBOUND_SPACE_NAME}" --force +``` + +## Security model + +### Architecture + +![An architectural diagram of a self-hosted Space attached to Upbound](/img/console-attach-architecture.jpg) + +:::note +This diagram illustrates a self-hosted Space running in AWS connected to the global Upbound Console. The same model applies to a Space running in AKS, GKE, or other Kubernetes environments. +::: + +### Data path + +Upbound uses a Pub/Sub model over TLS to communicate between Upbound's global +console and your self-hosted Space. Self-hosted Spaces establishes a secure +connection with `connect.upbound.io`. `api.upbound.io`, and `auth.upbound.io` and subscribes to an +endpoint. + +:::important +Add `connect.upbound.io`, `api.upbound.io`, and `auth.upbound.io` to your organization's list of +allowed endpoints. +::: + +The +Upbound Console communicates to the Space through that endpoint. The data flow +is: + +1. Users sign in to the Upbound Console, redirecting to authenticate with an organization's configured Identity Provider via SSO. +2. Once authenticated, actions in the Console, like listing control planes or specific resource types from a control plane. These requests post as messages to the Upbound Connect service. +3. A user's self-hosted Space polls the Upbound Connect service periodically for new messages, verifies the authenticity of the message, and fulfills the request contained. +4. A user's self-hosted Space returns the results of the request to the Upbound Connect service and the Console renders the results in the user's browser session. + +**Upbound never stores data originated from a self-hosted Space.** The data is transient and only exposed in the user's browser session. The Console needs this data to render your resources and control planes in the UI. + +### Data transmitted + +Users interact with the Upbound Console to generate request queries to the Upbound Connect Service while exploring, managing, or debugging a self-hosted Space. These requests send data back to the user's browser session in the Console, including: + +* Metadata for the Space +* Metadata for control planes in the state +* Configuration manifests for various resource types within your Space: Crossplane managed resources, composite resources, composite resource claims, Upbound shared secrets, Upbound shared backups, Crossplane providers, ProviderConfigs, Configurations, and Crossplane Composite Functions. + +:::important +This data only concerns resource configuration. The data _inside_ the managed +resource in your Space isn't visible at any point. +::: + +**Upbound can't see your data.** Upbound doesn't have access to session-based data rendered for your users in the Upbound Console. Upbound has no information about your self-hosted Space, other than that you've connected a self-hosted Space. + +### Threat vectors + +Only users with editor or administrative permissions can make changes using the Console like creating or deleting control planes or groups. + + +[enable-the-query-api]: /spaces/howtos/self-hosted/query-api +[enable-upbound-rbac]: /manuals/platform/concepts/authorization/upbound-rbac +[upbound]: /manuals/console/upbound-console +[organization]: /manuals/platform/concepts/identity-management/organizations +[query-api]: /spaces/howtos/self-hosted/query-api +[jq-install]: https://jqlang.org/download/ + +[upbound-console]: https://console.upbound.io diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/billing.md b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/billing.md new file mode 100644 index 000000000..145ff9f03 --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/billing.md @@ -0,0 +1,307 @@ +--- +title: Self-Hosted Space Billing +sidebar_position: 50 +description: A guide for how billing works in an Upbound Space +--- + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Billing models may evolve between versions; see Capacity Licensing for alternative models. + +For version-specific features and capacity-based licensing details, see the . reference specifications, see [Capacity Licensing](/spaces/howtos/self-hosted/capacity-licensing). +::: + +Spaces are a self-hosting feature of Upbound's [flagship product][flagship-product] for platform teams to deploy control planes in their self-managed environments. You can install Spaces into any Kubernetes cluster in your own cloud account, on-premises data center, or on the edge. The pricing usage-based and requires an Upbound account and subscription. The billing unit is a `Loop`. + + +:::info +This guide describes the traditional usage-based billing model using object storage. disconnected or air-gapped environments, consider [Capacity Licensing](/spaces/howtos/self-hosted/capacity-licensing), which provides a simpler fixed-capacity model with local usage tracking. +::: + +## Billing details + +Spaces **aren't connected** to Upbound's global service. To enable proper billing, the Spaces software ships a controller whose responsibility is to collect billing data from your Spaces deployment. The collection and storage of your billing data happens expressly locally within your environment; no data is automatically emitted back to Upbound's global service. This data gets written to object storage of your choice. AWS, Azure, and GCP are currently supported. The Spaces software exports billing usage software every ~15 seconds. + +Spaces customers must periodically provide the billing data to Upbound. Contact your Upbound sales representative to learn more. + + + +## AWS S3 + + + +Configure billing to write to an S3 bucket by providing the following values at install-time. Create an S3 bucket if you don't already have one. + +### IAM policy + +You must create an IAM policy and attach it to the IAM user (for static credentials) or IAM role (for assumed +roles). + +The policy example below enables the necessary S3 permissions: + +```json +{ + "Sid":"EnableS3Permissions", + "Effect":"Allow", + "Action": [ + "s3:PutObject", + "s3:GetObject", + "s3:ListBucket", + "s3:DeleteObject" + ], + "Resource": [ + "arn:aws:s3:::your-bucket-name/*", + "arn:aws:s3:::your-bucket-name" + ] +}, +{ + "Sid": "ListBuckets", + "Effect": "Allow", + "Action": "s3:ListAllMyBuckets", + "Resource": "*" +} +``` + +### Authentication with static credentials + +In your Spaces install cluster, create a secret in the `upbound-system` +namespace. This secret must contain keys `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`. + +```bash +kubectl create secret generic billing-credentials -n upbound-system \ + --from-literal=AWS_ACCESS_KEY_ID= \ + --from-literal=AWS_SECRET_ACCESS_KEY= +``` + +Install the Space software, providing the billing details to the other required values. + + + + + + +```bash {hl_lines="2-6"} +helm -n upbound-system upgrade --install spaces ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=aws" \ + --set "billing.storage.aws.region=" \ + --set "billing.storage.aws.bucket=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + +```bash {hl_lines="2-6"} +up space init ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=aws" \ + --set "billing.storage.aws.region=" \ + --set "billing.storage.aws.bucket=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + + + +### Authentication with an IAM role + + +To use short-lived credentials with an assumed IAM role, create an IAM role with +established trust to the `vector`-serviceaccount in all `mxp-*-system` +namespaces. + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::12345678912:oidc-provider/oidc.eks.eu-west-2.amazonaws.com/id/YOUROIDCPROVIDERID" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringLike": { + "oidc.eks.eu-west-2.amazonaws.com/id/YOUROIDCPROVIDERID:sub": "system:serviceaccount:mxp-*-system:vector" + } + } + } + ] +} +``` + +For more information about workload identities, review the [Workload-identity +Configuration documentation][workload-identity-configuration-documentation] + + + + + + +```bash {hl_lines="2-7"} +helm -n upbound-system upgrade --install spaces ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=aws" \ + --set "billing.storage.aws.region=" \ + --set "billing.storage.aws.bucket=" \ + --set "billing.storage.secretRef.name=" \ + --set "controlPlanes.vector.serviceAccount.customAnnotations[eks.amazonaws.com/role-arn]=" + ... +``` + + + + + +```bash {hl_lines="2-7"} +up space init ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=aws" \ + --set "billing.storage.aws.region=" \ + --set "billing.storage.aws.bucket=" \ + --set "billing.storage.secretRef.name=" \ + --set "controlPlanes.vector.serviceAccount.customAnnotations[eks.amazonaws.com/role-arn]=" + ... +``` + + + + + + +*Note*: You must set `billing.storage.secretRef.name` to an empty string when using an assumed role. + + +## Azure blob storage + +Configure billing to write to a blob in Azure by providing the following values at install-time. Create a storage account and container if you don't already have one. + +Then, on the cluster where you installed the Spaces software, create a secret in `upbound-system`. This secret must contain keys `AZURE_TENANT_ID`, `AZURE_CLIENT_ID`, and `AZURE_CLIENT_SECRET`. Make sure to replace the values with details generated from your Azure account. + +```bash +kubectl create secret generic billing-credentials -n upbound-system \ + --from-literal=AZURE_TENANT_ID= \ + --from-literal=AZURE_CLIENT_ID= \ + --from-literal=AZURE_CLIENT_SECRET= +``` + +Install the Space software, providing the billing details to the other required values. + + + + + + +```bash {hl_lines="2-6"} +helm -n upbound-system upgrade --install spaces ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=azure" \ + --set "billing.storage.azure.storageAccount=" \ + --set "billing.storage.azure.container=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + +```bash {hl_lines="2-6"} +up space init ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=azure" \ + --set "billing.storage.azure.storageAccount=" \ + --set "billing.storage.azure.container=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + + + +## GCP Cloud Storage Buckets + + +Configure billing to write to a Cloud Storage bucket in GCP by providing the following values at install-time. Create a bucket if you don't already have one. + +Then, on the cluster where you installed the Spaces software, create a secret in `upbound-system`. This secret must contain the key `google_application_credentials`. Make sure to replace the value with a GCP service account key JSON generated from your GCP account. + +```bash +kubectl create secret generic billing-credentials -n upbound-system \ + --from-literal=google_application_credentials= +``` + +Install the Space software, providing the billing details to the other required values. + + + + + + +```bash {hl_lines="2-5"} +helm -n upbound-system upgrade --install spaces ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=gcp" \ + --set "billing.storage.gcp.bucket=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + +```bash {hl_lines="2-5"} +up space init ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=gcp" \ + --set "billing.storage.gcp.bucket=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + +## Export billing data to send to Upbound + +To prepare the billing data to send to Upbound, do the following: + +Ensure the current context of your kubeconfig points at the Spaces cluster. Then, run the [export][export] command. + + +:::important +Your current CLI must have read access to the bucket to run this command. +::: + + +The example below exports billing data stored in AWS: + +```bash +up space billing export --provider=aws \ + --bucket=spaces-billing-bucket \ + --account=your-upbound-org \ + --billing-month=2024-07 \ + --force-incomplete +``` + +The command creates a billing report that's zipped up in your current working directory. Send the output to your Upbound sales representative. + + +You can find full instructions and command options in the up [CLI reference][cli-reference] docs. + + +[export]: /reference/cli-reference +[cli-reference]: /reference/cli-reference +[flagship-product]: https://www.upbound.io/platform +[workload-identity-configuration-documentation]: https://docs.upbound.io/operate/accounts/authentication/oidc-configuration diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/capacity-licensing.md b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/capacity-licensing.md new file mode 100644 index 000000000..a1dc6c101 --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/capacity-licensing.md @@ -0,0 +1,591 @@ +--- +title: Capacity Licensing +sidebar_position: 60 +description: A guide for capacity-based licensing in self-hosted Spaces +plan: "enterprise" +--- + + + + + +This guide explains how to configure and monitor capacity-based licensing in +self-hosted Upbound Spaces. Capacity licensing provides a simplified billing +model for disconnected or air-gapped environments where automated usage +reporting isn't possible. + +:::info +Spaces `v1.15` and later support Capacity Licensing as an +alternative to the traditional usage-based billing model described in the +[Self-Hosted Space Billing][space-billing] guide. +::: + +## Overview + +Capacity licensing allows organizations to purchase a fixed capacity of +resources upfront. The Spaces software tracks usage locally and provides +visibility into consumption against your purchased capacity, all without +requiring external connectivity to Upbound's services. + +### Key concepts + +- **Resource Hours**: The primary billing unit representing all resources + managed by Crossplane over time. This includes managed resources, + composites (XRs), claims (XRCs), and all composed resources - essentially + everything Crossplane manages. The system aggregates resource counts over each + hour using trapezoidal integration to accurately account for changes in + resource count throughout the hour. +- **Operations**: The number of Operations invoked by Crossplane. +- **License Capacity**: The total amount of resource hours and operations included in your license. +- **Usage Tracking**: Continuous monitoring of consumption with real-time utilization percentages. + +### How it works + +1. Upbound provides you with a license file containing your purchased capacity +2. You configure a `SpaceLicense` in your Spaces cluster +3. The metering system automatically: + - Collects measurements from all control planes every minute + - Aggregates usage data into hourly intervals + - Stores usage data in a local PostgreSQL database + - Updates the `SpaceLicense` status with current consumption + +## Prerequisites + +### PostgreSQL database + +Capacity licensing requires a PostgreSQL database to store usage measurements. You can use: + +- An existing PostgreSQL instance +- A managed PostgreSQL service (AWS RDS, Azure Database, Google Cloud SQL) +- A PostgreSQL instance deployed in your cluster + +The database must be: + +- Accessible from the Spaces cluster +- Configured with a dedicated database and credentials + +#### Example: Deploy PostgreSQL with CloudNativePG + +If you don't have an existing PostgreSQL instance, you can deploy one in your +cluster using [CloudNativePG] (CNPG). CNPG is a Kubernetes operator that +manages PostgreSQL clusters. + +1. Install the CloudNativePG operator: + +```bash +kubectl apply -f https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.24/releases/cnpg-1.24.1.yaml +``` + +2. Create a PostgreSQL cluster for metering: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: metering-postgres + namespace: upbound-system +spec: + instances: 1 + imageName: ghcr.io/cloudnative-pg/postgresql:16 + bootstrap: + initdb: + database: metering + owner: metering + postInitApplicationSQL: + - ALTER ROLE "metering" CREATEROLE; + storage: + size: 5Gi + # Optional: Configure resources for production use + # resources: + # requests: + # memory: "512Mi" + # cpu: "500m" + # limits: + # memory: "1Gi" + # cpu: "1000m" +--- +apiVersion: v1 +kind: Secret +metadata: + name: metering-postgres-app + namespace: upbound-system + labels: + cnpg.io/reload: "true" +stringData: + username: metering + password: "your-secure-password-here" +type: kubernetes.io/basic-auth +``` + +```bash +kubectl apply -f metering-postgres.yaml +``` + +3. Wait for the cluster to be ready: + +```bash +kubectl wait --for=condition=ready cluster/metering-postgres -n upbound-system --timeout=5m +``` + +4. You can access the PostgreSQL cluster at `metering-postgres-rw.upbound-system.svc.cluster.local:5432`. + +:::tip +For production deployments, consider: +- Increasing `instances` to 3 for high availability +- Configuring [backups] to object storage +- Setting appropriate resource requests and limits +- Using a dedicated storage class with good I/O performance +::: + +### License file + +Contact your Upbound sales representative to obtain a license file for your organization. The license file contains: +- Your unique license ID +- Purchased capacity (resource hours and operations) +- License validity period +- Any usage restrictions (such as cluster UUID pinning) + +## Configuration + +### Step 1: Create database credentials secret + +Create a Kubernetes secret containing your PostgreSQL password using the pgpass format: + +```bash +# Create a pgpass file with format: hostname:port:database:username:password +# Note: The database name and username must be 'metering' +# For CNPG clusters, use the read-write service endpoint: -rw..svc.cluster.local +echo "metering-postgres-rw.upbound-system.svc.cluster.local:5432:metering:metering:your-secure-password-here" > pgpass + +# Create the secret +kubectl create secret generic metering-postgres-credentials \ + -n upbound-system \ + --from-file=pgpass=pgpass + +# Clean up the pgpass file +rm pgpass +``` + +The secret must contain a single key: +- **`pgpass`**: PostgreSQL password file in the format `hostname:port:metering:metering:password` + +:::note +The database name and username are fixed as `metering`. Ensure your PostgreSQL instance has a database named `metering` with a user `metering` that has appropriate permissions. + +If you deployed PostgreSQL using CNPG as shown in the example above, the password should match what you set in the `metering-postgres-app` secret. +::: + +:::tip +For production environments, consider using external secret management solutions: +- [External Secrets Operator][eso] +- Cloud-specific secret managers (AWS Secrets Manager, Azure Key Vault, GCP Secret Manager) +::: + +### Step 2: Enable metering in Spaces + +Enable the metering feature when installing or upgrading Spaces: + + + + + +```bash {hl_lines="2-7"} +helm -n upbound-system upgrade --install spaces ... \ + --set "metering.enabled=true" \ + --set "metering.storage.postgres.connection.url=metering-postgres-rw.upbound-system.svc.cluster.local:5432" \ + --set "metering.storage.postgres.connection.credentials.secret.name=metering-postgres-credentials" \ + --set "metering.interval=1m" \ + --set "metering.workerCount=10" \ + --set "metering.aggregationInterval=1h" \ + --set "metering.measurementRetentionDays=30" + ... +``` + + + + + +```bash {hl_lines="2-7"} +up space init ... \ + --set "metering.enabled=true" \ + --set "metering.storage.postgres.connection.url=metering-postgres-rw.upbound-system.svc.cluster.local:5432" \ + --set "metering.storage.postgres.connection.credentials.secret.name=metering-postgres-credentials" \ + --set "metering.interval=1m" \ + --set "metering.workerCount=10" \ + --set "metering.aggregationInterval=1h" \ + --set "metering.measurementRetentionDays=30" + ... +``` + + + + + +#### Configuration options + +| Option | Default | Description | +|--------|---------|-------------| +| `metering.enabled` | `false` | Enable the metering feature | +| `metering.storage.postgres.connection.url` | - | PostgreSQL host and port (format: `host:port`, required) | +| `metering.storage.postgres.connection.credentials.secret.name` | - | Name of the secret containing PostgreSQL credentials (required) | +| `metering.storage.postgres.connection.sslmode` | `require` | SSL mode for PostgreSQL connection (`disable`, `allow`, `prefer`, `require`, `verify-ca`, `verify-full`) | +| `metering.storage.postgres.connection.ca.name` | - | Name of the secret containing CA certificate for TLS connections (optional) | +| `metering.interval` | `1m` | How often to collect measurements from control planes | +| `metering.workerCount` | `10` | Number of parallel workers for measurement collection | +| `metering.aggregationInterval` | `1h` | How often to aggregate measurements into hourly usage data | +| `metering.measurementRetentionDays` | `30` | Days to retain raw measurements (0 = indefinite) | + + +#### Database sizing and retention + +The metering system uses two PostgreSQL tables to track usage: + +**Raw measurements table** (`measurements`): +- Stores point-in-time snapshots collected every measurement interval (default: 1 minute) +- One row per control plane per interval +- Affected by the `measurementRetentionDays` setting +- Used for detailed auditing and troubleshooting + +**Aggregated usage table** (`hourly_usage`): +- Stores hourly aggregated resource hours and operations per license +- One row per hour per license +- Never deleted (required for accurate license tracking) +- Grows much slower than raw measurements + +##### Storage sizing guidelines + +Estimate your PostgreSQL storage needs based on these factors: + + +| Deployment Size | Control Planes | Measurement Interval | Retention Days | Raw Measurements | Indexes & Overhead | Total Storage | +|----------------|----------------|---------------------|----------------|------------------|-------------------|---------------| +| Small | 10 | 1m | 30 | ~85 MB | ~40 MB | **~125 MB** | +| Medium | 50 | 1m | 30 | ~430 MB | ~215 MB | **~645 MB** | +| Large | 200 | 1m | 30 | ~1.7 GB | ~850 MB | **~2.5 GB** | +| Large (90-day retention) | 200 | 1m | 90 | ~5.2 GB | ~2.6 GB | **~7.8 GB** | + +The aggregated hourly usage table adds minimal overhead (~50 KB per year per license). + +**Formula for custom calculations**: +``` +Daily measurements per control plane = (24 * 60) / interval_minutes +Total rows = control_planes × daily_measurements × retention_days +Storage (MB) ≈ (total_rows × 200 bytes) / 1,048,576 × 1.5 (with indexes) +``` + +##### Retention behavior + +The `measurementRetentionDays` setting controls retention of raw measurement data: + +- **Default: 30 days** - Balances audit capabilities with storage efficiency +- **Set to 0**: Disables cleanup, retains all raw measurements indefinitely +- **Cleanup runs**: Every aggregation interval (default: hourly) +- **What's kept forever**: Aggregated hourly usage data (needed for license tracking) +- **What's cleaned up**: Raw point-in-time measurements older than retention period + +**Recommendations**: +- **30 days**: For most troubleshooting and short-term auditing +- **60 to 90 days**: For environments requiring extended audit trails +- **Unlimited (0)**: Only for environments with ample storage or specific compliance requirements + +:::note +Increasing retention period linearly increases storage requirements for raw measurements. The aggregated hourly data is always retained regardless of this setting. +::: + +### Step 3: Apply your license + +Use the `up` CLI to apply your license file: + +```bash +up space license apply /path/to/license.json +``` + +This command automatically: +- Creates a secret containing your license file in the `upbound-system` namespace +- Creates the `SpaceLicense` resource configured to use that secret + +:::tip +You can specify a different namespace for the license secret using the `--namespace` flag: +```bash +up space license apply /path/to/license.json --namespace my-namespace +``` +::: + +
+Alternative: Manual kubectl approach + +If you prefer not to use the `up` CLI, you can manually create the resources: + +1. Create the license secret: + +```bash +kubectl create secret generic space-license \ + -n upbound-system \ + --from-file=license.json=/path/to/license.json +``` + +2. Create the SpaceLicense resource: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceLicense +metadata: + name: space +spec: + secretRef: + name: space-license + namespace: upbound-system + key: license.json +``` + +```bash +kubectl apply -f spacelicense.yaml +``` + +:::important +You **must** name the `SpaceLicense` resource `space`. This resource is a singleton and only one can exist in the cluster. +::: + +
+ +## Monitoring usage + +### Check license status + +Use the `up` CLI to view your license details and current usage: + +```bash +up space license show +``` + +Example output: + +``` +Spaces License Status: Valid (License is valid) + +Created: 2024-01-01T00:00:00Z +Expires: 2025-01-01T00:00:00Z + +Plan: enterprise + +Resource Hour Limit: 1000000 +Operation Limit: 500000 + +Enabled Features: +- spaces +- query-api +- backup-restore +``` + +The output shows: +- License validity status and any validation messages +- Creation and expiration dates +- Your commercial plan tier +- Capacity limits for resource hours and operations +- Enabled features in your license +- Any restrictions (such as cluster UUID pinning) + +
+Alternative: View detailed status with kubectl + +For detailed information including usage statistics, use kubectl: + +```bash +kubectl get spacelicense space -o yaml +``` + +Example output showing usage data: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceLicense +metadata: + name: space +spec: + secretRef: + name: space-license + namespace: upbound-system +status: + conditions: + - type: LicenseValid + status: "True" + reason: Valid + message: "License is valid" + id: "lic_abc123xyz" + plan: "enterprise" + capacity: + resourceHours: 1000000 + operations: 500000 + usage: + resourceHours: 245680 + operations: 12543 + resourceHoursUtilization: "24.57%" + operationsUtilization: "2.51%" + firstMeasurement: "2024-01-15T10:00:00Z" + lastMeasurement: "2024-02-10T14:30:00Z" + createdAt: "2024-01-01T00:00:00Z" + expiresAt: "2025-01-01T00:00:00Z" + enabledFeatures: + - "spaces" + - "query-api" + - "backup-restore" +``` + +
+ +### Understanding the status fields + +| Field | Description | +|-------|-------------| +| `status.id` | Unique license identifier | +| `status.plan` | Your commercial plan (community, standard, enterprise) | +| `status.capacity` | Total capacity included in your license | +| `status.usage.resourceHours` | Total resource hours consumed | +| `status.usage.operations` | Total operations performed | +| `status.usage.resourceHoursUtilization` | Percentage of resource hours capacity used | +| `status.usage.operationsUtilization` | Percentage of operations capacity used | +| `status.usage.firstMeasurement` | When usage tracking began | +| `status.usage.lastMeasurement` | Most recent usage update | +| `status.expiresAt` | License expiration date | + +### Monitor with kubectl + +Watch your license utilization in real-time: + +```bash +kubectl get spacelicense space -w +``` + +Short output format: + +``` +NAME PLAN VALID REASON AGE +space enterprise True Valid 45d +``` + +## Managing licenses + +### Updating your license + +To update your license with a new license file (for example, when renewing or upgrading capacity), apply the new license: + +```bash +up space license apply /path/to/new-license.json +``` + +This command replaces the existing license secret and updates the SpaceLicense resource. + +### Removing a license + +To remove a license: + +```bash +up space license remove +``` + +This command: +- Prompts for confirmation before proceeding +- Removes the license secret + +To skip the confirmation prompt, use the `--force` flag: + +```bash +up space license remove --force +``` + +## Troubleshooting + +### License not updating + +If the license status doesn't update with usage data: + +1. **Check metering controller logs**: + ```bash + kubectl logs -n upbound-system deployment/spaces-controller -c metering + ``` + +2**Check if the system captures your measurements**: + + ```bash + # Connect to PostgreSQL and query the measurements table + kubectl exec -it -- psql -U -d \ + -c "SELECT COUNT(*) FROM measurements WHERE timestamp > NOW() - INTERVAL '1 hour';" + ``` + +### High utilization warnings + +If you're approaching your capacity limits: + +1. **Review resource usage** by control plane to identify high consumers +2. **Contact your Upbound sales representative** to discuss capacity expansion +3. **Optimize managed resources** by cleaning up unused resources + +### License validation failures + +If your license shows as invalid: + +1. **Check expiration date**: `kubectl get spacelicense space -o jsonpath='{.status.expiresAt}'` +2. **Verify license file integrity**: Ensure the secret contains valid JSON +3. **Check for cluster UUID restrictions**: Upbound pins some licenses to + specific clusters +4. **Review controller logs** for detailed error messages + +## Differences from traditional billing + +### Capacity licensing + +- ✅ Works in disconnected environments +- ✅ Provides real-time usage visibility +- ✅ No manual data export required +- ✅ Requires PostgreSQL database +- ✅ Fixed capacity model + +### Traditional billing (object storage) + + +- ❌ Requires periodic manual export +- ❌ Delayed visibility into usage +- ✅ Works with S3/Azure Blob/GCS +- ❌ Requires cloud storage access +- ✅ Pay-as-you-go model + +## Best practices + +### Database management + +1. **Regular backups**: Back up your metering database regularly to preserve usage history +2. **Monitor database size**: Set appropriate retention periods to manage storage growth +3. **Use managed databases**: Consider managed PostgreSQL services for production +4. **Connection pooling**: Use connection pooling for better performance at scale + +### License management + +1. **Monitor utilization**: Set up alerts before reaching 80% capacity +2. **Plan renewals early**: Start renewal discussions 60 days before expiration +3. **Track grace periods**: Note the `gracePeriodEndsAt` date for planning +4. **Secure license files**: Treat license files as sensitive credentials + +### Operational monitoring + +1. **Set up dashboards**: Create Grafana dashboards for usage trends +2. **Enable alerting**: Configure alerts for high utilization and expiration +3. **Regular audits**: Periodically review usage patterns across control planes +4. **Capacity planning**: Use historical data to predict future capacity needs + +## Next steps + +- Learn about [Observability] to monitor your Spaces deployment +- Explore [Backup and Restore][backup-restore] to protect your control plane data +- Review [Self-Hosted Space Billing][space-billing] for the traditional billing model +- Contact [Upbound Sales][sales] to discuss capacity licensing options + + +[space-billing]: /spaces/howtos/self-hosted/billing +[CloudNativePG]: https://cloudnative-pg.io/ +[backups]: https://cloudnative-pg.io/documentation/current/backup_recovery/ +[backup-restore]: /spaces/howtos/backup-and-restore +[sales]: https://www.upbound.io/contact +[eso]: https://external-secrets.io/ +[Observability]: /spaces/howtos/observability + + diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/certs.md b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/certs.md new file mode 100644 index 000000000..e517c250e --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/certs.md @@ -0,0 +1,274 @@ +--- +title: Istio Ingress Gateway With Custom Certificates +sidebar_position: 20 +description: Install self hosted spaces using istio ingress gateway in a Kind cluster +--- + +:::important +Prerequisites + +- Spaces Token available in a file +- `docker login xpkg.upbound.io -u -p ` +- [`istioctl`][istioctl] installation +- `jq` installation +::: + +This document describes the installation of a self hosted space on an example `kind` +cluster along with Istio Ingress Gateway and certificates. The service mesh and certificates +installation is transferable to self hosted spaces in arbitrary clouds. + +## Create a kind cluster + +```shell +cat < +## Install Istio + + + +:::important +This is an example and not recommended for use in production. +::: + + +1. Create the `istio-values.yaml` file + +```shell +cat > istio-values.yaml << 'EOF' +apiVersion: install.istio.io/v1alpha1 +kind: IstioOperator +spec: + hub: gcr.io/istio-release + components: + ingressGateways: + - enabled: true + name: istio-ingressgateway + k8s: + nodeSelector: + ingress-ready: "true" + overlays: + - apiVersion: apps/v1 + kind: Deployment + name: istio-ingressgateway + patches: + - path: spec.template.spec.containers.[name:istio-proxy].ports + value: + - containerPort: 8080 + hostPort: 80 + - containerPort: 8443 + hostPort: 443 +EOF +``` + +2. Install istio via `istioctl` + +```shell +istioctl install -f istio-values.yaml +``` + +## Create a self-signed Certificate via cert-manager + +:::important +This Certificate manifest creates a self-signed certificate for a proof of concept +environment and isn't recommended for production use cases. +::: + +1. Create the upbound-system namespace + +```shell +kubectl create namespace upbound-system +``` + +2. Create a self-signed certificate + +```shell +cat < +## Create an Istio Gateway and VirtualService + + + + +Configure an Istio Gateway and VirtualService to use TLS passthrough. + + +```shell +cat < spaces-values.yaml << 'EOF' +# Configure spaces-router to use the TLS secret created by cert-manager. +externalTLS: + tlsSecret: + name: example-tls-secret + caBundleSecret: + name: example-tls-secret + key: ca.crt +ingress: + provision: false + # Allow Istio Ingress Gateway to communicate to the spaces-router + namespaceLabels: + kubernetes.io/metadata.name: istio-system + podLabels: + app: istio-ingressgateway + istio: ingressgateway +EOF +``` + +2. Set the required environment variables + +```shell +# Update these according to your account/token file +export SPACES_TOKEN_PATH= +export UPBOUND_ACCOUNT= +# Replace SPACES_ROUTER_HOST with your Spaces ingress hostname +export SPACES_ROUTER_HOST="proxy.upbound-127.0.0.1.nip.io" +export SPACES_VERSION="1.14.1" +``` + +3. Create an image pull secret for Spaces + +```shell +kubectl -n upbound-system create secret docker-registry upbound-pull-secret \ + --docker-server=https://xpkg.upbound.io \ + --docker-username="$(jq -r .accessId $SPACES_TOKEN_PATH)" \ + --docker-password="$(jq -r .token $SPACES_TOKEN_PATH)" +``` + +4. Install the Spaces helm chart + +```shell +# Login to xpkg.upbound.io +jq -r .token $SPACES_TOKEN_PATH | helm registry login xpkg.upbound.io -u $(jq -r .accessId $SPACES_TOKEN_PATH) --password-stdin + +# Install spaces helm chart +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "authentication.hubIdentities=true" \ + --set "authorization.hubRBAC=true" \ + --wait -f spaces-values.yaml +``` + +## Validate the installation + +Successful access of the `up` command to interact with your self hosted space validates the +certificate installation. + +- `up ctx .` + +You can also issue control plane creation, list and deletion commands. + +- `up ctp create cert-test` +- `up ctp list` +- `up ctx disconnected/kind-kind/default/cert-test && kubectl get namespace` +- `up ctp delete cert-test` + +:::note +If `up` can't connect to your control plane, follow [this guide to create a new profile][up-profile]. +::: + +## Troubleshooting + +Examine your certificate with `openssl`: + +```shell +openssl s_client -connect proxy.upbound-127.0.0.1.nip.io:443 -showcerts +``` + +[istioctl]: https://istio.io/latest/docs/ops/diagnostic-tools/istioctl/ +[up-profile]: /manuals/cli/howtos/profile-config/ diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/configure-ha.md b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/configure-ha.md new file mode 100644 index 000000000..ddf36c55e --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/configure-ha.md @@ -0,0 +1,450 @@ +--- +title: Production Scaling and High Availability +description: Configure your Self-Hosted Space for production +sidebar_position: 5 +--- + + + +This guide explains how to configure an existing Upbound Space deployment for +production operation at scale. + +Use this guide when you're ready to deploy production scaling, high availability, +and monitoring in your Space. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. + +For API specifications on ControlPlane resources and configurations, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +## Prerequisites + +Before you begin scaling your Spaces deployment, make sure you have: + + +* A working Space deployment +* Cluster administrator access +* An understanding of load patterns and growth in your organization +* A familiarity with node affinity, tainting, and Horizontal Pod Autoscaling + (HPA) + + +## Production scaling strategy + + +In this guide, you will: + + + +* Create dedicated node pools for different component types +* Configure high-availability to ensure there are no single points of failure +* Set dynamic scaling for variable workloads +* Optimize your storage and component operations +* Monitor your deployment health and performance + +## Spaces architecture + +The basic Spaces workflow follows the pattern below: + + +![Spaces workflow][spaces-workflow] + +## Node architecture + +You can mitigate resource contention and improve reliability by separating system +components into dedicated node pools. + +### `etcd` dedicated nodes + +`etcd` performance directly impacts your entire Space, so isolate it for +consistent performance. + +1. Create a dedicated `etcd` node pool + + **Requirements:** + - **Minimum**: 3 nodes for HA + - **Instance type**: General purpose with high network throughput/low latency + - **Storage**: High performance storage (`etcd` is I/O sensitive) + +2. Taint `etcd` nodes to reserve them + + ```bash + kubectl taint nodes target=etcd:NoSchedule + ``` + +3. Configure `etcd` storage + + `etcd` is sensitive to storage I/O performance. Review the [`etcd` scaling + documentation][scaling] + for specific storage guidance. + +### API server dedicated nodes + +API servers handle all control plane requests and should run on dedicated +infrastructure. + +1. Create dedicated API server nodes + + **Requirements:** + - **Minimum**: 2 nodes for HA + - **Instance type**: Compute-optimized, memory-optimized, or general-purpose + - **Scaling**: Scale vertically based on API server load patterns + +2. Taint API server nodes + + ```bash + kubectl taint nodes target=apiserver:NoSchedule + ``` + +### Configure cluster autoscaling + +Enable cluster autoscaling for all node pools. + +For AWS EKS clusters, Upbound recommends using [`Karpenter`][karpenter] for +improved bin-packing and instance type selection. + +For GCP GKE clusters, follow the [GKE autoscaling][gke-autoscaling] guide. + +For Azure AKS clusters, follow the [AKS autoscaling][aks-autoscaling] guide. + + +## Configure high availability + +Ensure control plane components can survive node and zone failures. + +### Enable high availability mode + +1. Configure control planes for high availability + + ```yaml + controlPlanes: + ha: + enabled: true + ``` + + This configures control plane pods to run with multiple replicas and + associated pod disruption budgets. + +### Configure component distribution + +1. Set up API server pod distribution + + ```yaml + controlPlanes: + vcluster: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: target + operator: In + values: + - apiserver + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vcluster + topologyKey: "kubernetes.io/hostname" + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vcluster + topologyKey: topology.kubernetes.io/zone + weight: 100 + ``` + +2. Configure `etcd` pod distribution + + ```yaml + controlPlanes: + etcd: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: target + operator: In + values: + - etcd + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vcluster-etcd + topologyKey: "kubernetes.io/hostname" + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vcluster-etcd + topologyKey: topology.kubernetes.io/zone + weight: 100 + ``` + +### Configure tolerations + +Allow control plane pods to schedule on the tainted dedicated nodes (available +in Spaces v1.14+). + +1. Add tolerations for `etcd` pods + + ```yaml + controlPlanes: + etcd: + tolerations: + - key: "target" + operator: "Equal" + value: "etcd" + effect: "NoSchedule" + ``` + +2. Add tolerations for API server pods + + ```yaml + controlPlanes: + vcluster: + tolerations: + - key: "target" + operator: "Equal" + value: "apiserver" + effect: "NoSchedule" + ``` + + +## Configure autoscaling for Spaces components + + +Set up the Spaces system components to handle variable load automatically. + +### Scale API and `apollo` services + +1. Configure minimum replicas for availability + + ```yaml + api: + replicaCount: 2 + + features: + alpha: + apollo: + enabled: true + replicaCount: 2 + ``` + + Both services support horizontal and vertical scaling based on load patterns. + +### Configure router autoscaling + +The `spaces-router` is the entry point for all traffic and needs intelligent +scaling. + + +1. Enable Horizontal Pod Autoscaler + + ```yaml + router: + hpa: + enabled: true + minReplicas: 2 + maxReplicas: 8 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 80 + ``` + +2. Monitor scaling factors + + **Router scaling behavior:** + - **Vertical scaling**: Scales based on number of control planes + - **Horizontal scaling**: Scales based on request volume + - **Resource monitoring**: Monitor CPU and memory usage + + + +### Configure controller scaling + +The `spaces-controller` manages Space-level resources and requires vertical +scaling. + +1. Configure adequate resources with headroom + + ```yaml + controller: + resources: + requests: + cpu: "500m" + memory: "1Gi" + limits: + cpu: "2000m" + memory: "4Gi" + ``` + + **Important**: The controller can spike when reconciling large numbers of + control planes, so provide adequate headroom for resource spikes. + +## Set up production storage + + +### Configure Query API database + + +1. Use a managed PostgreSQL database + + **Recommended services:** + - [AWS RDS][rds] + - [Google Cloud SQL][gke-sql] + - [Azure Database for PostgreSQL][aks-sql] + + **Requirements:** + - Minimum 400 IOPS performance + + +## Monitoring + + + +Monitor key metrics to ensure healthy scaling and identify issues quickly. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +### Control plane health + +Track these `spaces-controller` metrics: + +1. **Total control planes** + + ``` + spaces_control_plane_exists + ``` + + Tracks the total number of control planes in the system. + +2. **Degraded control planes** + + ``` + spaces_control_plane_degraded + ``` + + Returns control planes that don't have a `Synced`, `Ready`, and + `Healthy` state. + +3. **Stuck control planes** + + ``` + spaces_control_plane_stuck + ``` + + Control planes stuck in a provisioning state. + +4. **Deletion issues** + + ``` + spaces_control_plane_deletion_stuck + ``` + + Control planes stuck during deletion. + +### Alerting + +Configure alerts for critical scaling and health metrics: + +- **High error rates**: Alert when 4xx/5xx response rates exceed thresholds +- **Control plane health**: Alert when degraded or stuck control planes exceed acceptable counts + +## Architecture overview + +**Spaces System Components:** + +- **`spaces-router`**: Entry point for all endpoints, dynamically builds routes to control plane API servers +- **`spaces-controller`**: Reconciles Space-level resources, serves webhooks, works with `mxp-controller` for provisioning +- **`spaces-api`**: API for managing groups, control planes, shared secrets, and telemetry objects (accessed only through spaces-router) +- **`spaces-apollo`**: Hosts the Query API, connects to PostgreSQL database populated by `apollo-syncer` pods + + +**Control Plane Components (per control plane):** +- **`mxp-controller`**: Handles provisioning tasks, serves webhooks, installs UXP and `XGQL` +- **`XGQL`**: GraphQL API powering console views +- **`kube-state-metrics`**: Collects usage metrics for billing (updated by `mxp-controller` when CRDs change) +- **`vector`**: Works with `kube-state-metrics` to send usage data to external storage for billing +- **`apollo syncer`**: Syncs `etcd` data into PostgreSQL for the Query API + + +### `up ctx` workflow + + + up ctx workflow diagram + + +### Access a control plane API server via kubectl + + + kubectl workflow diagram + + +### Query API/Apollo + + + query API workflow diagram + + +## See also + +* [Upbound Spaces deployment requirements][deployment] +* [Upbound `etcd` scaling resources][scaling] + +[up-ctx-workflow]: /img/up-ctx-workflow.png +[kubectl]: /img/kubectl-workflow.png +[query-api]: /img/query-api-workflow.png +[spaces-workflow]: /img/up-basic-flow.png +[rds]: https://aws.amazon.com/rds/postgresql/ +[gke-sql]: https://cloud.google.com/kubernetes-engine/docs/tutorials/stateful-workloads/postgresql +[aks-sql]: https://learn.microsoft.com/en-us/azure/aks/deploy-postgresql-ha?tabs=azuredisk +[deployment]: https://docs.upbound.io/spaces/howtos/self-hosted/deployment-reqs/ +[karpenter]: https://docs.aws.amazon.com/eks/latest/best-practices/karpenter.html +[gke-autoscaling]: https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-autoscaler +[aks-autoscaling]: https://learn.microsoft.com/en-us/azure/aks/cluster-autoscaler-overview +[scaling]: https://docs.upbound.io/deploy/self-hosted-spaces/scaling-resources#scaling-etcd-storage diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/controllers.md b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/controllers.md new file mode 100644 index 000000000..692740638 --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/controllers.md @@ -0,0 +1,389 @@ +--- +title: Controllers +weight: 250 +description: A guide to how to wrap and deploy an Upbound controller into control planes on Upbound. +--- + +:::important +This feature is in private preview for select customers in Upbound Spaces. If you're interested in this feature, please [contact us](https://www.upbound.io/contact-us). +::: + +Upbound's _Controllers_ feature lets you build and deploy control plane software from the Kubernetes ecosystem. With the _Controllers_ feature, you're not limited to just managing resource types defined by Crossplane. Now you can create resources from _CustomResourceDefinitions_ defined by other Kubernetes ecosystem tooling. + +This guide explains how to bundle and deploy control plane software from the Kubernetes ecosystem on a control plane in Upbound. + +## Benefits + +The Controllers feature provides the following benefits: + +* Deploy control plane software from the Kubernetes ecosystem. +* Use your control plane's package manager to handle the lifecycle of the control plane software and define dependencies between package. +* Build powerful compositions that combine both Crossplane and Kubernetes _CustomResources_. + +## How it works + +A _Controller_ is a package type that bundles control plane software from the Kubernetes ecosystem. Examples of such software includes: + +- Kubernetes policy engines +- CI/CD tooling +- Your own private custom controllers defined by your organization + +You build a _Controller_ package by wrapping a helm chart along with its requisite _CustomResourceDefinitions_. Your _Controller_ package gets pushed to an OCI registry, and from there you can apply it to a control plane like you would any other Crossplane package. Your control plane's package manager is responsible for managing the lifecycle of the software once applied. + +## Prerequisites + +Enable the Controllers feature in the Space you plan to run your control plane in: + +- Cloud Spaces: Not available yet +- Connected Spaces: Space administrator must enable this feature +- Disconnected Spaces: Space administrator must enable this feature + +Packaging a _Controller_ requires [up CLI][cli] `v0.39.0` or later. + + + +## Build a _Controller_ package + + + +_Controllers_ are a package type that get administered by your control plane's package manager. + +### Prepare the package + +To define a _Controller_, you need a Helm chart. This guide assumes the control plane software you want to build into a _Controller_ already has a Helm chart available. + +Start by making a working directory to assemble the necessary parts: + +```ini +mkdir controller-package +cd controller-package +``` + +Inside the working directory, pull the Helm chart: + +```shell +export CHART_REPOSITORY= +export CHART_NAME= +export CHART_VERSION= + +helm pull $CHART_NAME --repo $CHART_REPOSITORY --version $CHART_VERSION +``` + +Be sure to update the Helm chart repository, name, and version with your own. + +Move the Helm chart into its own folder: + +```ini +mkdir helm +mv $CHART_NAME-$CHART_VERSION.tgz helm/chart.tgz +``` + +Unpack the CRDs from the Helm chart into their own directory: + +```shell +export RELEASE_NAME= +export RELEASE_NAMESPACE= + +mkdir crds +helm template $RELEASE_NAME helm/chart.tgz -n $RELEASE_NAMESPACE --include-crds | \ + yq e 'select(.kind == "CustomResourceDefinition")' - | \ + yq -s '("crds/" + .metadata.name + ".yaml")' - +``` +Be sure to update the Helm release name, and namespace with your own. + +:::info +The instructions above assume your CRDs get deployed as part of your Helm chart. If they're deployed another way, you need to manually copy your CRDs instead. +::: + +Create a `crossplane.yaml` with your controller metadata: + +```yaml +cat < crossplane.yaml +apiVersion: meta.pkg.upbound.io/v1alpha1 +kind: Controller +metadata: + annotations: + friendly-name.meta.crossplane.io: Controller + meta.crossplane.io/description: | + A brief description of what the controller does. + meta.crossplane.io/license: Apache-2.0 + meta.crossplane.io/maintainer: + meta.crossplane.io/readme: | + An explanation of your controller. + meta.crossplane.io/source: + name: +spec: + packagingType: Helm + helm: + releaseName: + releaseNamespace: + # Value overrides for the helm release can be provided below. + # values: + # foo: bar +EOF +``` + +Your controller's file structure should look like this: + +```ini +. +├── crds +│ ├── your-crd.yaml +│ ├── second-crd.yaml +│ └── another-crd.yaml +├── crossplane.yaml +└── helm + └── chart.tgz +``` + +### Package and push the _Controller_ + +At the root of your controller's working directory, build the contents into an xpkg: + +```ini +up xpkg build +``` + +This causes an xpkg to get saved to your current directory with a name like `controller-f7091386b4c0.xpkg`. + +Push the package to your desired OCI registry: + +```shell +export UPBOUND_ACCOUNT= +export CONTROLLER_NAME= +export CONTROLLER_VERSION= +export XPKG_FILENAME= + +up xpkg push xpkg.upbound.io/$UPBOUND_ACCOUNT/$CONTROLLER_NAME:$CONTROLLER_VERSION -f $XPKG_FILENAME +``` + + + +## Deploy a _Controller_ package + + + +:::important +_Controllers_ are only installable on control planes running Crossplane `v1.19.0` or later. +::: + +Set your kubecontext to the desired control plane in Upbound. Change the package path to the OCI registry you pushed it to. Then, deploy the _Controller_ directly: + +```shell +export CONTROLLER_NAME= +export CONTROLLER_VERSION= + +cat < crossplane.yaml +apiVersion: meta.pkg.upbound.io/v1alpha1 +kind: Controller +metadata: + annotations: + friendly-name.meta.crossplane.io: Controller ArgoCD + meta.crossplane.io/description: | + The ArgoCD Controller enables continuous delivery and declarative configuration + management for Kubernetes applications using GitOps principles. + meta.crossplane.io/license: Apache-2.0 + meta.crossplane.io/maintainer: Upbound Maintainers + meta.crossplane.io/readme: | + ArgoCD is a declarative GitOps continuous delivery tool for Kubernetes that + follows the GitOps methodology to manage infrastructure and application + configurations. + meta.crossplane.io/source: https://github.com/argoproj/argo-cd + name: argocd +spec: + packagingType: Helm + helm: + releaseName: argo-cd + releaseNamespace: argo-system + # values: + # foo: bar +EOF +``` + +Your controller's file structure should look like this: + +```ini +. +├── crds +│ ├── applications.argoproj.io.yaml +│ ├── applicationsets.argoproj.io.yaml +│ └── appprojects.argoproj.io.yaml +├── crossplane.yaml +└── helm + └── chart.tgz +``` + +### Package and push controller-argocd + +At the root of your controller's working directory, build the contents into an xpkg: + +```ini +up xpkg build +``` + +This causes an xpkg to get saved to your current directory with a name like `argocd-f7091386b4c0.xpkg`. + +Push the package to your desired OCI registry: + +```shell +export UPBOUND_ACCOUNT= +export CONTROLLER_NAME=controller-argocd +export CONTROLLER_VERSION=v7.8.8 +export XPKG_FILENAME= + +up xpkg push --create xpkg.upbound.io/$UPBOUND_ACCOUNT/$CONTROLLER_NAME:$CONTROLLER_VERSION -f $XPKG_FILENAME +``` + +### Deploy controller-argocd to a control plane + +Set your kubecontext to the desired control plane in Upbound. Change the package path to the OCI registry you pushed it to. Then, deploy the _Controller_ directly: + +```ini +cat < + +## Frequently asked questions + +
+Can I package any software or are there any prerequisites to be a Controller? + +We define a *Controller* as a software that has at least one Custom Resource Definition (CRD) and a Kubernetes controller for that CRD. This is the minimum requirement to be a *Controller*. We have some checks to enforce this at packaging time. + +
+ +
+How can I package my software as a Controller? + +Currently, we support Helm charts as the underlying package format for *Controllers*. As long as you have a Helm chart, you can package it as a *Controller*. + +If you don't have a Helm chart, you can't deploy the software. We only support Helm charts as the underlying package format for *Controllers*. We may extend this to support other packaging formats like Kustomize in the future. + +
+ +
+Can I package Crossplane XRDs/Compositions as a Helm chart to deploy as a Controller? + +This is not recommended. For packaging Crossplane XRDs/ and Compositions, we recommend using the `Configuration` package format. A helm chart only with Crossplane XRDs/Compositions does not qualify as a *Controller*. + +
+ +
+How can I override the Helm values when deploying a Controller? + +Overriding the Helm values is possible at two levels: +- During packaging time, in the package manifest file. +- At runtime, using a `ControllerRuntimeConfig` resource (similar to Crossplane `DeploymentRuntimeConfig`). + +
+ +
+How can I configure the helm release name and namespace for the controller? + +Right now, it is not possible to configure this at runtime. The package author configures release name and namespace during packaging, so it is hardcoded inside the package. Unlike a regular application that is deployed by a Helm chart, *Controllers* can only be deployed once in a given control plane, so, we hope it should be ok to rely on predefined release names and namespaces. We may consider exposing these in `ControllerRuntimeConfig` later, but, we would like to keep it opinionated unless there are strong reasons to do so. + +
+ +
+Can I deploy more than one instance of a Controller package? + +No, this is not possible. Remember, a *Controller* package introduces CRDs which are cluster-scoped objects. Just like one cannot deploy more than one instance of the same Crossplane Provider package today, it is not possible to deploy more than one instance of a *Controller*. + +
+ +
+Do I need a specific Crossplane version to run Controllers? + +Yes, you need to use Crossplane v1.19.0 or later to use *Controllers*. This is because of the changes in the Crossplane codebase to support third-party package formats in dependencies. + +Spaces `v1.12.0` supports Crossplane `v1.19` in the *Rapid* release channel. + +
+ +
+Can I deploy Controllers outside of an Upbound control plane? With UXP? + +No, *Controllers* are a proprietary package format and are only available for control planes running in Spaces hosting environments in Upbound. + +
+ + +[cli]: /manuals/uxp/overview + diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/ctp-audit-logs.md b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/ctp-audit-logs.md new file mode 100644 index 000000000..52f52c776 --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/ctp-audit-logs.md @@ -0,0 +1,549 @@ +--- +title: Control plane audit logging +--- + +This guide explains how to enable and configure audit logging for control planes +in Self-Hosted Upbound Spaces. + +Starting in Spaces `v1.14.0`, each control plane contains an API server that +supports audit log collection. You can use audit logging to track creation, +updates, and deletions of Crossplane resources. Control plane audit logs +use observability features to collect audit logs with `SharedTelemetryConfig` and +send logs to an OpenTelemetry (`OTEL`) collector. + +:::info API Version Information +This guide applies to **Spaces v1.14.0 and later** (Self-Hosted only). Audit logging is not available in earlier versions. + +For API specifications on observability resources, see the [Spaces API Reference - v1.14](../../../../../1.14/reference/apis/spaces-api/). details on observability evolution across versions, see the . +::: + +## Prerequisites + +Before you begin, make sure you have: + +* Spaces `v1.14.0` or greater +* Admin access to your Spaces host cluster +* `kubectl` configured to access the host cluster +* `helm` installed +* `yq` installed +* `up` CLI installed and logged in to your organization + +## Enable observability + + +Observability graduated to General Available in `v1.14.0` but is disabled by +default. + + + + + +### Before `v1.14` +To enable the GA Observability feature, upgrade your Spaces installation to `v1.14.0` +or later and update your installation setting to the new flag: + +```diff +helm upgrade spaces upbound/spaces -n upbound-system \ +- --set "features.alpha.observability.enabled=true" ++ --set "observability.enabled=true" +``` + + + +### After `v1.14` + +To enable the GA Observability feature for `v1.14.0` and later, pass the feature +flag: + +```sh +helm upgrade spaces upbound/spaces -n upbound-system \ + --set "observability.enabled=true" + +``` + + + + +To confirm Observability is enabled, run the `helm get values` command: + + +```shell +helm get values --namespace upbound-system spaces | yq .observability +``` + +Your output should return: + +```shell-noCopy + enabled: true +``` + +## Install an observability backend + +:::note +If you already have an observability backend in your environment, skip to the +next section. +::: + + +For this guide, you'll use Grafana's `docker-otel-lgtm` bundle to validate audit log +generation. production environments, configure a dedicated observability +backend like Datadog, Splunk, or an enterprise-grade Grafana stack. + + + +First, make sure your `kubectl` context points to your Spaces host cluster: + +```shell +kubectl config current-context +``` + +The output should return your cluster name. + +Next, install `docker-otel-lgtm` as a deployment using port-forwarding to +connect to Grafana. Create a manifest file and paste the +following configuration: + +```yaml title="otel-lgtm.yaml" +apiVersion: v1 +kind: Namespace +metadata: + name: observability +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: otel-lgtm + name: otel-lgtm + namespace: observability +spec: + ports: + - name: grpc + port: 4317 + protocol: TCP + targetPort: 4317 + - name: http + port: 4318 + protocol: TCP + targetPort: 4318 + - name: grafana + port: 3000 + protocol: TCP + targetPort: 3000 + selector: + app: otel-lgtm +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: otel-lgtm + labels: + app: otel-lgtm + namespace: observability +spec: + replicas: 1 + selector: + matchLabels: + app: otel-lgtm + template: + metadata: + labels: + app: otel-lgtm + spec: + containers: + - name: otel-lgtm + image: grafana/otel-lgtm + ports: + - containerPort: 4317 + - containerPort: 4318 + - containerPort: 3000 +``` + +Next, apply the manifest: + +```shell +kubectl apply --filename otel-lgtm.yaml +``` + +Your output should return the resources: + +```shell +namespace/observability created + service/otel-lgtm created + deployment.apps/otel-lgtm created +``` + +To verify your resources deployed, use `kubectl get` to display resources with +an `ACTIVE` or `READY` status. + +Next, forward the Grafana port: + +```shell +kubectl port-forward svc/otel-lgtm --namespace observability 3000:3000 +``` + +Now you can access the Grafana UI at http://localhost:3000. + + +## Create an audit-enabled control plane + +To enable audit logging for a control plane, you need to label it so the +`SharedTelemetryConfig` can identify and apply audit settings. This section +creates a new control plane with the `audit-enabled: "true"` label. The +`audit-enabled: "true"` label marks this control plane for audit logging. The +`SharedTelemetryConfig` (created in the next section) finds control planes with +this label and enables audit logging on them. + +Create a new manifest file and paste the configuration below: + +
+```yaml title="ctp-audit.yaml" +apiVersion: v1 +kind: Namespace +metadata: + name: audit-test +--- +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + labels: + audit-enabled: "true" + name: ctp1 + namespace: audit-test +spec: + writeConnectionSecretToRef: + name: kubeconfig-ctp1 + namespace: audit-test +``` +
+ +The `metadata.labels` section contains the `audit-enabled` setting. + +Apply the manifest: + +```shell +kubectl apply --filename ctp-audit.yaml +``` + +Confirm your control plane reaches the `READY` status: + +```shell +kubectl get --filename ctp-audit.yaml +``` + +## Create a `SharedTelemetryConfig` + +The `SharedTelemetryConfig` applies to all control plane objects in a namespace +and enables audit logging and routes logs to your `OTEL` endpoint. + +Create a `SharedTelemetryConfig` manifest file and paste the configuration +below: + +
+```yaml title="sharedtelemetryconfig.yaml" +apiVersion: observability.spaces.upbound.io/v1alpha1 +kind: SharedTelemetryConfig +metadata: + name: apiserver-audit + namespace: audit-test +spec: + apiServer: + audit: + enabled: true + exporters: + otlphttp: + endpoint: http://otel-lgtm.observability:4318 + exportPipeline: + logs: [otlphttp] + controlPlaneSelector: + labelSelectors: + - matchLabels: + audit-enabled: "true" +``` +
+ +This configuration: + +* Sets `apiServer.audit.enabled` to `true` +* Configures the `otlphttp` exporter to point to the `docker-otel-lgtm` service +* Uses `controlPlaneSelector` to match any control plane in the namespace with the `audit-enabled` label set to `true` + +:::note +You can configure the `SharedTelemetryConfig` to select control planes in +several ways. more information on control plane selection, see the [control +plane selection][ctp-selection] documentation. +::: + +Apply the `SharedTelemetryConfig`: + +```shell +kubectl apply --filename sharedtelemetryconfig.yaml +``` + +Confirm the configuration selected the control plane: + +```shell +kubectl get --filename sharedtelemetryconfig.yaml +``` + +The output should return `SELECTED` as `1` and `VALIDATED` as `TRUE`. + +For more detailed status information, use `kubectl get`: + +```shell +kubectl get --filename sharedtelemetryconfig.yaml --output yaml | yq .status +``` + +## Generate and monitor audit events + +You enabled telemetry on your new control plane and can now generate events to +test the audit logging. This guide uses the `nop-provider` to simulate resource +operations. + +Switch your `up` context to the new control plane: + +```shell +up ctx /// +``` + +Create a new Provider manifest: + +```yaml title="provider-nop.yaml" +apiVersion: pkg.crossplane.io/v1 + kind: Provider + metadata: + name: crossplane-contrib-provider-nop + spec: + package: xpkg.upbound.io/crossplane-contrib/provider-nop:v0.4.0 +``` + +Apply the provider manifest: + +```shell +kubectl apply --filename provider-nop.yaml +``` + +Verify the provider installed and returns `HEALTHY` status as `TRUE`. + +Apply an example resource to kick off event generation: + + +```shell +kubectl apply --filename https://raw.githubusercontent.com/crossplane-contrib/provider-nop/refs/heads/main/examples/nopresource.yaml +``` + +In your Grafana dashboard, navigate to **Drilldown** > **Logs** under the +Grafana menu. + + +Filter for `controlplane-audit` log messages. + +Create a query to find `create` events on `nopresources` by filtering: + +* The `verb` field for `create` events +* The `objectRef_resource` field to match the Kind `nopresources` + +Review the audit log results. The log stream displays: + +*The client applying the create operation +* The resource kind +* Client details +* The response code + +Expand the example below for an audit log entry: + +
+ Audit log entry + +```json +{ + "level": "Metadata", + "auditID": "51bbe609-14ad-4874-be78-1289c10d506a", + "stage": "ResponseComplete", + "requestURI": "/apis/nop.crossplane.io/v1alpha1/nopresources?fieldManager=kubectl-client-side-apply&fieldValidation=Strict", + "verb": "create", + "user": { + "username": "kubernetes-admin", + "groups": ["system:masters", "system:authenticated"] + }, + "impersonatedUser": { + "username": "upbound:spaces:host:masterclient", + "groups": [ + "system:authenticated", + "upbound:controlplane:admin", + "upbound:spaces:host:system:masters" + ] + }, + "sourceIPs": ["10.244.0.135", "127.0.0.1"], + "userAgent": "kubectl/v1.32.2 (darwin/arm64) kubernetes/67a30c0", + "objectRef": { + "resource": "nopresources", + "name": "example", + "apiGroup": "nop.crossplane.io", + "apiVersion": "v1alpha1" + }, + "responseStatus": { "metadata": {}, "code": 201 }, + "requestReceivedTimestamp": "2025-09-19T23:03:24.540067Z", + "stageTimestamp": "2025-09-19T23:03:24.557583Z", + "annotations": { + "authorization.k8s.io/decision": "allow", + "authorization.k8s.io/reason": "RBAC: allowed by ClusterRoleBinding \"controlplane-admin\" of ClusterRole \"controlplane-admin\" to Group \"upbound:controlplane:admin\"" + } + } +``` +
+ +## Customize the audit policy + +Spaces `v1.14.0` includes a default audit policy. You can customize this policy +by creating a configuration file and passing the values to +`observability.collectors.apiServer.auditPolicy` in the helm values file. + +An example custom audit policy: + +```yaml +observability: + controlPlanes: + apiServer: + auditPolicy: | + apiVersion: audit.k8s.io/v1 + kind: Policy + rules: + # ============================================================================ + # RULE 1: Exclude health check and version endpoints + # ============================================================================ + - level: None + nonResourceURLs: + - '/healthz*' + - '/readyz*' + - /version + # ============================================================================ + # RULE 2: ConfigMaps - Write operations only + # ============================================================================ + - level: Metadata + resources: + - group: "" + resources: + - configmaps + verbs: + - create + - update + - patch + - delete + omitStages: + - RequestReceived + - ResponseStarted + # ============================================================================ + # RULE 3: Secrets - ALL operations + # ============================================================================ + - level: Metadata + resources: + - group: "" + resources: + - secrets + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + omitStages: + - RequestReceived + - ResponseStarted + # ============================================================================ + # RULE 4: Global exclusion of read-only operations + # ============================================================================ + - level: None + verbs: + - get + - list + - watch + # ========================================================================== + # RULE 5: Exclude standard Kubernetes resources from write operation logging + # ========================================================================== + - level: None + resources: + - group: "" + - group: "apps" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "storage.k8s.io" + - group: "batch" + - group: "autoscaling" + - group: "metrics.k8s.io" + - group: "node.k8s.io" + - group: "scheduling.k8s.io" + - group: "coordination.k8s.io" + - group: "discovery.k8s.io" + - group: "events.k8s.io" + - group: "flowcontrol.apiserver.k8s.io" + - group: "internal.apiserver.k8s.io" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "admissionregistration.k8s.io" + verbs: + - create + - update + - patch + - delete + # ============================================================================ + # RULE 6: Catch-all for ALL custom resources and any missed resources + # ============================================================================ + - level: Metadata + verbs: + - create + - update + - patch + - delete + omitStages: + - RequestReceived + - ResponseStarted + # ============================================================================ + # RULE 7: Final catch-all - exclude everything else + # ============================================================================ + - level: None + omitStages: + - RequestReceived + - ResponseStarted +``` +You can apply this policy during Spaces installation or upgrade using the helm values file. + +Audit policies use rules evaluated in order from top to bottom where the first +matching rule applies. Control plane audit policies follow Kubernetes conventions and use the +following logging levels: + +* **None** - Don't log events matching this rule +* **Metadata** - Log request metadata (user, timestamp, resource, verb) but not request or response bodies +* **Request** - Log metadata and request body but not response body +* **RequestResponse** - Log metadata, request body, and response body + +For more information, review the Kubernetes [Auditing] documentation. + +## Disable audit logging + +You can disable audit logging on a control plane by removing it from the +`SharedTelemetryConfig` selector or by deleting the `SharedTelemetryConfig`. + +### Disable for specific control planes + +Remove the `audit-enabled` label from control planes that should stop sending audit logs: + +```bash +kubectl label controlplane --namespace audit-enabled- +``` + +The `SharedTelemetryConfig` no longer selects this control plane, and audit log collection stops. + +### Disable for all control planes + +Delete the `SharedTelemetryConfig` to stop audit logging for all control planes it manages: + +```bash +kubectl delete sharedtelemetryconfig --namespace +``` + +[ctp-selection]: /spaces/howtos/observability/#control-plane-selection +[Auditing]: https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/ diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/declarative-ctps.md b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/declarative-ctps.md new file mode 100644 index 000000000..2c3e5331b --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/declarative-ctps.md @@ -0,0 +1,110 @@ +--- +title: Declaratively create control planes +sidebar_position: 99 +description: A tutorial to configure a Space with Argo to declaratively create and + manage control planes +--- + +In this tutorial, you learn how to configure [Argo CD][argo-cd] to communicate with a self-hosted Space. This flow allows you to declaratively create and manage control planes from Git. Argo CD is a continuous delivery tool for Kubernetes that you can use to drive GitOps flows for your control plane infrastructure. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. + +For API specifications on ControlPlane resources and their declarative creation, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +## Prerequisites + +To complete this tutorial, you need the following: + +- Have already deployed an Upbound Space. +- Have already deployed an instance of Argo CD on a Kubernetes cluster. + +## Connect your Space to Argo CD + +Fetch the kubeconfig for the Space cluster, the Kubernetes cluster where you installed the Upbound Spaces software. You must add the Space cluster as a context to Argo. + +```ini +export SPACES_CLUSTER_SERVER="https://url" +export SPACES_CLUSTER_NAME="cluster" +``` + +Switch contexts to the Kubernetes cluster where you've installed Argo. Create a secret on the Argo cluster whose data contains the connection details of the Space cluster. + +:::important +Make sure the following commands are executed against your **Argo** cluster, not your Space cluster. +::: + +Run the following command in a terminal: + +```yaml +cat < +When you install a Crossplane provider on a control plane, memory gets consumed +according to the number of custom resources it defines. Upbound [Official Provider families][official-provider-families] provide higher fidelity control +to platform teams to install providers for only the resources they need, +reducing the bloat of needlessly installing unused custom resources. Still, you +must factor provider memory usage into your calculations to ensure you've +rightsized the memory available in your Spaces cluster. + + +:::important +Be careful not to conflate `managed resource` with `custom resource definition`. +The former is an "instance" of an external resource in Crossplane, while the +latter defines the API schema of that resource. +::: + +It's estimated that each custom resource definition consumes ~3 MB of memory. +The calculation is: + +```bash +number_of_managed_resources_defined_in_provider x 3 MB = memory_required +``` + +For example, if you plan to use [provider-aws-ec2][provider-aws-ec2], [provider-aws-s3][provider-aws-s3], and [provider-aws-iam][provider-aws-iam], the resulting calculation is: + +```bash +provider-aws-ec2: 98 x 3 MB = 294 MB +provider-aws-s3: 23 x 3 MB = 69 MB +provider-aws-iam 22 x 3 MB = 66 MB +--- +total memory: 429 MB +``` + +In this scenario, you should budget ~430 MB of memory for provider usage on this control plane. + +:::tip +Do this calculation for each provider you plan to install on your control plane. +Then do this calculation for each control plane you plan to run in your Space. +::: + + +#### Total memory usage + +Add the memory usage from the previous sections. Given the preceding examples, +they result in a recommendation to budget ~1 GB memory for each control plane +you plan to run in the Space. + +:::important + +The 1 GB recommendation is an example. +You should input your own provider requirements to arrive at a final number for +your own deployment. + +::: + +### CPU considerations + +#### Managed resource CPU usage + +The number of managed resources under management by a control plane is the largest contributing factor for CPU usage in a Space. CPU usage scales linearly according to the number of managed resources under management by your control plane. In Upbound's testing, CPU usage requirements _does_ vary from provider to provider. Using the Upbound Official Provider families as a baseline: + + +| Provider | MR create operation (CPU core seconds) | MR update or reconciliation operation (CPU core seconds) | +| ---- | ---- | ---- | +| provider-family-aws | 10 | 2 to 3 | +| provider-family-gcp | 7 | 1.5 | +| provider-family-azure | 7 to 10 | 1.5 to 3 | + + +When resources are in a non-ready state, Crossplane providers reconcile often (as fast as every 15 seconds). Once a resource reaches `READY`, each Crossplane provider defaults to a 10 minute poll interval. Given this, a 16-core machine has `16x10x60 = 9600` CPU core seconds available. Interpreting this table: + +- A single control plane that needs to create 100 AWS MRs concurrently would consume 1000 CPU core seconds, or about 1.5 cores. +- A single control plane that continuously reconciles 100 AWS MRs once they've reached a `READY` state would consume 300 CPU core seconds, or a little under half a core. + +Since `provider-family-aws` has the highest recorded numbers for CPU time required, you can use that as an upper limit in your calculations. + +Using these calculations and extrapolating values, given a 16 core machine, it's recommended you don't exceed a single control plane managing 1000 MRs. Suppose you plan to run 10 control planes, each managing 1000 MRs. You want to make sure your node pool has capacity for 160 cores. If you are using a machine type that has 16 cores per machine, that would mean having a node pool of size 10. If you are using a machine type that has 32 cores per machine, that would mean having a node pool of size 5. + +#### Cloud API latency + +Oftentimes, you are using Crossplane providers to talk to external cloud APIs. Those external cloud APIs often have global API rate limits (examples: [Azure limits][azure-limits], [AWS EC2 limits][aws-ec2-limits]). + +For Crossplane providers built on [Upjet][upjet] (such as Upbound Official Provider families), these providers use Terraform under the covers. They expose some knobs (such as `--max-reconcile-rate`) you can use to tweak reconciliation rates. + +### Resource buffers + +The guidance in the preceding sections explains how to calculate CPU and memory usage requirements for: + +- a set of control planes in a Space +- tuned to the number of providers you plan to use +- according to the number of managed resource instances you plan to have managed by your control planes + +Upbound recommends budgeting an extra buffer of 20% to your resource capacity calculations. The numbers shared in the preceding sections don't account for peaks or surges since they're based off average measurements. Upbound recommends budgeting this buffer to account for these things. + +## Deploying more than one Space + +You are welcome to deploy more than one Space. You just need to make sure you have a 1:1 mapping of Space to Kubernetes clusters. Spaces are by their nature constrained to a single Kubernetes Cluster, which are regional entities. If you want to offer control planes in multiple cloud environments or multiple public clouds entirely, these are justifications for deploying >1 Spaces. + +## Cert-manager + +A Spaces deployment uses the [Certificate Custom Resource] from cert-manager to +provision certificates within the Space. This establishes a nice API boundary +between what your platform may need and the Certificate requirements of a +Space. + + +In the event you would like more control over the issuing Certificate Authority +for your deployment or the deployment of cert-manager itself, this guide is for +you. + + +### Deploying + +An Upbound Space deployment doesn't have any special requirements for the +cert-manager deployment itself. The only expectation is that cert-manager and +the corresponding Custom Resources exist in the cluster. + +You should be free to install cert-manager in the cluster in any way that makes +sense for your organization. You can find some [installation ideas] in the +cert-manager docs. + +### Issuers + +A default Upbound Space install includes a [ClusterIssuer]. This `ClusterIssuer` +is a `selfSigned` issuer that other certificates are minted from. You have a +couple of options available to you for changing the default deployment of the +Issuer: +1. Changing the issuer name. +2. Providing your own ClusterIssuer. + + +#### Changing the issuer name + +The `ClusterIssuer` name is controlled by the `certificates.space.clusterIssuer` +Helm property. You can adjust this during installation by providing the +following parameter (assuming your new name is 'SpaceClusterIssuer'): +```shell +--set "certificates.space.clusterIssuer=SpaceClusterIssuer" +``` + + + +#### Providing your own ClusterIssuer + +To provide your own `ClusterIssuer`, you need to first setup your own +`ClusterIssuer` in the cluster. The cert-manager docs have a variety of options +for providing your own. See the [Issuer Configuration] docs for more details. + +Once you have your own `ClusterIssuer` set up in the cluster, you need to turn +off the deployment of the `ClusterIssuer` included in the Spaces deployment. +To do that, provide the following parameter during installation: +```shell +--set "certificates.provision=false" +``` + +###### Considerations +If your `ClusterIssuer` has a name that's different from the default name that +the Spaces installation expects ('spaces-selfsigned'), you need to also specify +your `ClusterIssuer` name during install using: +```shell +--set "certificates.space.clusterIssuer=" +``` + +## Ingress + +To route requests from an external client (kubectl, ArgoCD, etc) to a +control plane, a Spaces deployment includes a default [Ingress] manifest. In +order to ease getting started scenarios, the current `Ingress` includes +configurations (properties and annotations) that assume that you installed the +commonly used [ingress-nginx ingress controller] in the cluster. This section +walks you through using a different `Ingress`, if that's something that your +organization needs. + +### Default manifest + +An example of what the current `Ingress` manifest included in a Spaces install +is below: + +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: mxe-router-ingress + namespace: upbound-system + annotations: + nginx.ingress.kubernetes.io/use-regex: "true" + nginx.ingress.kubernetes.io/ssl-redirect: "false" + nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" + nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" + nginx.ingress.kubernetes.io/proxy-request-buffering: "off" + nginx.ingress.kubernetes.io/proxy-body-size: "0" + nginx.ingress.kubernetes.io/proxy-http-version: "1.1" + nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + nginx.ingress.kubernetes.io/proxy-ssl-verify: "on" + nginx.ingress.kubernetes.io/proxy-ssl-secret: "upbound-system/mxp-hostcluster-certs" + nginx.ingress.kubernetes.io/proxy-ssl-name: spaces-router + nginx.ingress.kubernetes.io/configuration-snippet: | + more_set_headers "X-Request-Id: $req_id"; + more_set_headers "Request-Id: $req_id"; + more_set_headers "Audit-Id: $req_id"; +spec: + ingressClassName: nginx + tls: + - hosts: + - {{ .Values.ingress.host }} + secretName: mxe-router-tls + rules: + - host: {{ .Values.ingress.host }} + http: + paths: + - path: "/v1/controlPlanes" + pathType: Prefix + backend: + service: + name: spaces-router + port: + name: http +``` + +The notable pieces are: +1. Namespace + + + +This property represents the namespace that the spaces-router is deployed to. +In most cases this is `upbound-system`. + + + +2. proxy-ssl-* annotations + +The spaces-router pod terminates TLS using certificates located in the +mxp-hostcluster-certs `Secret` located in the `upbound-system` `Namespace`. + +3. proxy-* annotations + +Requests coming into the ingress-controller can be variable depending on what +the client is requesting. For example, `kubectl get crds` has different +requirements for the connection compared to a 'watch', for example +`kubectl get pods -w`. The ingress-controller is configured to be able to +account for either scenario. + + +4. configuration-snippets + +These commands add headers to the incoming requests that help with telemetry +and diagnosing problems within the system. + +5. Rules + +Requests coming into the control planes use a `/v1/controlPlanes` prefix and +need to be routed to the spaces-router. + + +### Using a different ingress manifest + +Operators can choose to use an `Ingress` manifest and ingress controller that +makes the most sense for their organization. If they want to turn off deploying +the default `Ingress` manifest, they can do so during installation by providing +the following parameter during installation: +```shell +--set ".Values.ingress.provision=false" +``` + +#### Considerations + + + + + +Operators will need to take into account the following considerations when +disabling the default `Ingress` deployment. + +1. Ensure the custom `Ingress` manifest is placed in the same namespace as the +`spaces-router` pod. +2. Ensure that the ingress is configured to use a `spaces-router` as a secure +backend and that the secret used is the mxp-hostcluster-certs secret. +3. Ensure that the ingress is configured to handle long-lived connections. +4. Ensure that the routing rule sends requests prefixed with +`/v1/controlPlanes` to the `spaces-router` using the `http` port. + + + + + + +[cert-manager]: https://cert-manager.io/ +[Certificate Custom Resource]: https://cert-manager.io/docs/usage/certificate/ +[ClusterIssuer]: https://cert-manager.io/docs/concepts/issuer/ +[ingress-nginx ingress controller]: https://kubernetes.github.io/ingress-nginx/deploy/ +[installation ideas]: https://cert-manager.io/docs/installation/ +[Ingress]: https://kubernetes.io/docs/concepts/services-networking/ingress/ +[Issuer Configuration]: https://cert-manager.io/docs/configuration/ +[official-provider-families]: /manuals/packages/providers/provider-families +[aws-eks]: https://aws.amazon.com/eks/ +[google-cloud-gke]: https://cloud.google.com/kubernetes-engine +[microsoft-aks]: https://azure.microsoft.com/en-us/products/kubernetes-service +[upbound-account]: https://www.upbound.io/register/?utm_source=docs&utm_medium=cta&utm_campaign=docs_spaces +[provider-aws-ec2]: https://marketplace.upbound.io/providers/upbound/provider-aws-ec2 +[provider-aws-s3]: https://marketplace.upbound.io/providers/upbound/provider-aws-s3 +[provider-aws-iam]: https://marketplace.upbound.io/providers/upbound/provider-aws-iam +[azure-limits]: https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/request-limits-and-throttling +[aws-ec2-limits]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/throttling.html#throttling-limits-rate-based +[upjet]: https://github.com/upbound/upjet diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/dr.md b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/dr.md new file mode 100644 index 000000000..67ecbfecf --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/dr.md @@ -0,0 +1,412 @@ +--- +title: Disaster Recovery +sidebar_position: 13 +description: Configure Space-wide backups for disaster recovery. +--- + +:::info API Version Information +This guide applies to **Spaces v1.9.0 and later** (Self-Hosted only). Space Backups is a alpha feature enabled by default starting in v1.14.0. + +- **v1.9.0-v1.13.x**: Alpha feature (requires manual enablement) +- **v1.14.0+**: GA (enabled by default) + +For version-specific features and backup resources, see the . control-plane backups, see [Backup and Restore](../backup-and-restore.md). +::: + +:::important +For Connected and Disconnected Spaces, this feature requires Spaces `v1.9.0` and, starting with `v1.14.0`, Spaces enables it by default. + +To enable it on versions earlier than `v1.14.0`, set features.alpha.spaceBackup.enabled=true when you install Spaces. + +```bash +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "features.alpha.spaceBackup.enabled=true" +``` +::: + +Upbound's _Space Backups_ is a built-in Space-wide backup and restore feature. This guide explains how to configure Space Backups and how to restore from one of them in case of disaster recovery. + +This feature is meant for Space administrators. Group or Control Plane users can leverage [Shared Backups][shared-backups] to backup and restore their ControlPlanes. + +## Benefits +The Space Backups feature provides the following benefits: + +* Automatic backups for all resources in a Space and all resources in control planes, without any operational overhead. +* Backup schedules. +* Selectors to specify resources to backup. + +## Prerequisites + +Enabled the Space Backups feature in the Space: + +- Cloud Spaces: Not accessible to users. +- Connected Spaces: Space administrator must enable this feature. +- Disconnected Spaces: Space administrator must enable this feature. + +## Configure a Space Backup Config + +[SpaceBackupConfig][spacebackupconfig] is a cluster-scoped resource. This resource configures the storage details and provider. Whenever a backup executes (either by schedule or manually initiated), it references a SpaceBackupConfig to tell it where store the snapshot. + + +### Backup config provider + + +The `spec.objectStorage.provider` and `spec.objectStorage.config` fields configures: + +* The object storage provider +* The path to the provider +* The credentials needed to communicate with the provider + +You can only set one provider. Upbound currently supports AWS, Azure, and GCP as providers. + + +`spec.objectStorage.config` is a freeform map of configuration options for the object storage provider. See [Thanos object storage][thanos-object-storage] for more information on the formats for each supported cloud provider. `spec.bucket` and `spec.provider` overrides the required values in the config. + + +#### AWS as a storage provider + +This example demonstrates how to use AWS as a storage provider for your backups: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupConfig +metadata: + name: default +spec: + objectStorage: + provider: AWS + bucket: spaces-backup-bucket + config: + endpoint: s3.eu-west-2.amazonaws.com + region: eu-west-2 + credentials: + source: Secret + secretRef: + name: bucket-creds + namespace: upbound-system + key: creds +``` + +This example assumes you've already created an S3 bucket called +`spaces-backup-bucket` in the `eu-west-2` AWS region. To access the bucket, +define the account credentials as a Secret in the specified Namespace +(`upbound-system` in this example). + +#### Azure as a storage provider + +This example demonstrates how to use Azure as a storage provider for your backups: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupConfig +metadata: + name: default + namespace: default +spec: + objectStorage: + provider: Azure + bucket: upbound-backups + config: + storage_account: upbackupstore + container: upbound-backups + endpoint: blob.core.windows.net + credentials: + source: Secret + secretRef: + name: bucket-creds + namespace: upbound-system + key: creds +``` + + +This example assumes you've already created an Azure storage account called +`upbackupstore` and blob `upbound-backups`. To access the blob, +define the account credentials as a Secret in the specified Namespace +(`upbound-system` in this example). + + +#### GCP as a storage provider + +This example demonstrates how to use Google Cloud Storage as a storage provider for your backups: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupConfig +metadata: + name: default + namespace: default +spec: + objectStorage: + provider: GCP + bucket: spaces-backup-bucket + credentials: + source: Secret + secretRef: + name: bucket-creds + namespace: upbound-system + key: creds +``` + + +This example assumes you've already created a Cloud bucket called +"spaces-backup-bucket" and a service account with access to this bucket. Define the key file as a Secret in the specified Namespace +(`upbound-system` in this example). + + +## Configure a Space Backup Schedule + + +[SpaceBackupSchedule][spacebackupschedule] is a cluster-scoped resource. This resource defines a backup schedule for the whole Space. + +Below is an example of a Space Backup Schedule running every day. It backs up all groups having `environment: production` labels and all control planes in those groups having `backup: please` labels. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupSchedule +metadata: + name: daily-schedule +spec: + schedule: "@daily" + configRef: + kind: SpaceBackupConfig + name: default + match: + groups: + labelSelectors: + - matchLabels: + environment: production + controlPlanes: + labelSelectors: + - matchLabels: + backup: please +``` + +### Define a schedule + +The `spec.schedule` field is a [Cron-formatted][cron-formatted] string. Some common examples are below: + +| Entry | Description | +| ----------------- | ------------------------------------------------------------------------------------------------- | +| `@hourly` | Run once an hour. | +| `@daily` | Run once a day. | +| `@weekly` | Run once a week. | +| `0 0/4 * * *` | Run every 4 hours. | +| `0/15 * * * 1-5` | Run every fifteenth minute on Monday through Friday. | +| `@every 1h30m10s` | Run every 1 hour, 30 minutes, and 10 seconds. Hour is the largest measurement of time for @every. | + +### Suspend a schedule + +Use `spec.suspend` field to suspend the schedule. It creates no new backups, but allows running backups to complete. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupSchedule +metadata: + name: daily-schedule +spec: + suspend: true +... +``` + +### Garbage collect backups when the schedule gets deleted + +Set the `spec.useOwnerReferencesInBackup` to garbage collect associated `SpaceBackup` when a `SpaceBackupSchedule` gets deleted. If set to true, backups are garbage collected when the schedule gets deleted. + +### Set the time to live + +Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. + +The time to live is a duration, for example, `168h` for 7 days. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupSchedule +metadata: + name: daily-schedule +spec: + ttl: 168h # Backup is garbage collected after 7 days +... +``` + +## Selecting space resources to backup + +By default, a SpaceBackup selects all groups and, for each of them, all control planes, secrets, and any other group-scoped resources. + +By setting `spec.match`, you can include only specific groups, control planes, secrets, or other Space resources in the backup. + +By setting `spec.exclude`, you can filter out some matched Space API resources from the backup. + +### Including space resources in a backup + +Different fields are available to include resources based on labels or names: +- `spec.match.groups` to include only some groups in the backup. +- `spec.match.controlPlanes` to include only some control planes in the backup. +- `spec.match.secrets` to include only some secrets in the backup. +- `spec.match.extras` to include only some extra resources in the backup. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackup +metadata: + name: my-backup +spec: + configRef: + kind: SpaceBackupConfig + name: default + match: + groups: + labelSelectors: + - matchLabels: + environment: production + controlPlanes: + labelSelectors: + - matchLabels: + backup: please + secrets: + names: + - my-secret + extras: + - apiGroup: "spaces.upbound.io" + kind: "SharedBackupConfig" + names: + - my-shared-backup +``` + +### Excluding Space resources from the backup + +Use the `spec.exclude` field to exclude matched Space API resources from the backup. + +Different fields are available to exclude resources based on labels or names: +- `spec.exclude.groups` to exclude some groups from the backup. +- `spec.exclude.controlPlanes` to exclude some control planes from the backup. +- `spec.exclude.secrets` to exclude some secrets from the backup. +- `spec.exclude.extras` to exclude some extra resources from the backup. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackup +metadata: + name: my-backup +spec: + ttl: 168h # Backup is garbage collected after 7 days + configRef: + kind: SpaceBackupConfig + name: default + match: + groups: + labelSelectors: + - matchLabels: + environment: production + exclude: + groups: + names: + - not-this-one-please +``` + +### Exclude resources in control planes' backups + +By default, it backs up all resources in a selected control plane. + +Use the `spec.controlPlaneBackups.excludedResources` field to exclude resources from control planes' backups. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackup +metadata: + name: my-backup +spec: + ttl: 168h # Backup is garbage collected after 7 days + configRef: + kind: SpaceBackupConfig + name: default + controlPlaneBackups: + excludedResources: + - secrets + - buckets.s3.aws.upbound.io +``` + +## Create a manual backup + +[SpaceBackup][spacebackup] is a cluster-scoped resource that causes a single backup to occur for the whole Space. + +Below is an example of a manual SpaceBackup: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackup +metadata: + name: my-backup +spec: + configRef: + kind: SpaceBackupConfig + name: default + deletionPolicy: Delete +``` + + +The backup specification `DeletionPolicy` defines backup deletion actions, +including the deletion of the backup file from the bucket. The `Deletion Policy` +value defaults to `Orphan`. Set it to `Delete` to remove uploaded files +in the bucket. +For more information on the backup and restore process, review the [Spaces API documentation][spaces-api-documentation]. + +### Set the time to live + +Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackup +metadata: + name: my-backup +spec: + ttl: 168h # Backup is garbage collected after 7 days +... +``` + +## Restore from a space backup + +Space Backup and Restore focuses only on disaster recovery. The restore procedure assumes a new Space installation with no existing resources. The restore procedure is idempotent, so you can run it multiple times without any side effects in case of failures. + +To restore a Space from an existing Space Backup, follow these steps: + +1. Install Spaces from scratch as needed. +2. Create a `SpaceBackupConfig` as needed to access the SpaceBackup from the object storage, for example named `my-backup-config`. +3. Select the backup you want to restore from, for example `my-backup`. +4. Run the following command to restore the Space: + +```shell +export SPACE_BACKUP_CONFIG=my-backup-config +export SPACE_BACKUP=my-backup +kubectl exec -ti -n upbound-system deployments/spaces-controller -c spaces -- hyperspace restore $SPACE_BACKUP $SPACE_BACKUP_CONFIG +``` + +### Restore specific control planes + +:::important +This feature is available from Spaces v1.11. +::: + +Instead of restoring the whole Space, you can choose to restore specific control planes +from a backup using the `--controlplanes` flag. You can also use +the `--skip-space-restore` flag to skip restoring Space objects. +This allows Spaces admins to restore individual control planes without +needing to restore the entire Space. + +```shell +export SPACE_BACKUP_CONFIG=my-backup-config +export SPACE_BACKUP=my-backup +kubectl exec -ti -n upbound-system deployments/spaces-controller -c spaces +-- hyperspace restore $SPACE_BACKUP $SPACE_BACKUP_CONFIG --controlplanes default/ctp1,default/ctp2 --skip-space-restore +``` + + +[shared-backups]: /spaces/howtos/self-hosted/workload-id/backup-restore-config/ +[spacebackupconfig]: /reference/apis/spaces-api/v1_9 +[thanos-object-storage]: https://thanos.io/tip/thanos/storage.md/ +[spacebackupschedule]: /reference/apis/spaces-api/v1_9 +[cron-formatted]: https://en.wikipedia.org/wiki/Cron +[spacebackup]: /reference/apis/spaces-api/v1_9 +[spaces-api-documentation]: /reference/apis/spaces-api/v1_9 + diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/gitops-with-argocd.md b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/gitops-with-argocd.md new file mode 100644 index 000000000..004247a10 --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/gitops-with-argocd.md @@ -0,0 +1,142 @@ +--- +title: GitOps with ArgoCD in Self-Hosted Spaces +sidebar_position: 80 +description: Set up GitOps workflows with Argo CD in self-hosted Spaces +plan: "business" +--- + +:::info Deployment Model +This guide applies to **self-hosted Spaces** deployments. For Upbound Cloud Spaces, see [GitOps with Upbound Control Planes](/spaces/howtos/cloud-spaces/gitops-on-upbound/). +::: + +GitOps is an approach for managing a system by declaratively describing desired resources' configurations in Git and using controllers to realize the desired state. Upbound's control planes are compatible with this pattern and it's strongly recommended you integrate GitOps in the platforms you build on Upbound. + + +## Integrate with Argo CD + + +[Argo CD][argo-cd] is a project in the Kubernetes ecosystem commonly used for +GitOps. You can use it in tandem with Upbound control planes to achieve GitOps +flows. The sections below explain how to integrate these tools with Upbound. + +### Configure connection secrets for control planes + +You can configure control planes to write their connection details to a secret. +Do this by setting the +[`spec.writeConnectionSecretToRef`][spec-writeconnectionsecrettoref] field in a +control plane manifest. For example: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: ctp1 + namespace: default +spec: + writeConnectionSecretToRef: + name: kubeconfig-ctp1 + namespace: default +``` + + +### Configure Argo CD + + +To configure Argo CD for Annotation resource tracking, edit the Argo CD +ConfigMap in the Argo CD namespace. Add `application.resourceTrackingMethod: +annotation` to the data section as below. + +Next, configure the [auto respect RBAC for the Argo CD +controller][auto-respect-rbac-for-the-argo-cd-controller-1]. By default, Argo CD +attempts to discover some Kubernetes resource types that don't exist in a +control plane. You must configure Argo CD to respect the cluster's RBAC rules so +that Argo CD can sync. Add `resource.respectRBAC: normal` to the data section as +below. + +```bash +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-cm +data: + ... + application.resourceTrackingMethod: annotation + resource.respectRBAC: normal +``` + +:::tip +The `resource.respectRBAC` configuration above tells Argo to respect RBAC for +_all_ cluster contexts. If you're using an Argo CD instance to manage more than +only control planes, you should consider changing the `clusters` string match +for the configuration to apply only to control planes. For example, if every +control plane context name followed the convention of being named +`controlplane-`, you could set the string match to be `controlplane-*` +::: + + +### Create a cluster context definition + + +Once the control plane is ready, extract the following values from the secret +containing the kubeconfig: + +```bash +kubeconfig_content=$(kubectl get secrets kubeconfig-ctp1 -n default -o jsonpath='{.data.kubeconfig}' | base64 -d) +server=$(echo "$kubeconfig_content" | grep 'server:' | awk '{print $2}') +bearer_token=$(echo "$kubeconfig_content" | grep 'token:' | awk '{print $2}') +ca_data=$(echo "$kubeconfig_content" | grep 'certificate-authority-data:' | awk '{print $2}') +``` + +Generate a new secret in the cluster where you installed Argo, using the prior +values extracted: + +```yaml +cat < + +import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; + + + + +:::important +This feature is only available for select Business Critical customers. You can't +set up your own Managed Space without the assistance of Upbound. If you're +interested in this deployment mode, please [contact us][contact]. +::: + + + +A Managed Space deployed on AWS is a single-tenant deployment of a control plane +space in your AWS organization in an isolated sub-account. With Managed Spaces, +you can use the same API, CLI, and Console that Upbound offers, with the benefit +of running entirely in a cloud account that you own and Upbound manages for you. + +The following guide walks you through setting up a Managed Space in your AWS +organization. If you have any questions while working through this guide, +contact your Upbound Account Representative for help. + + + + + +A Managed Space deployed on GCP is a single-tenant deployment of a control plane +space in your GCP organization in an isolated project. With Managed Spaces, you +can use the same API, CLI, and Console that Upbound offers, with the benefit of +running entirely in a cloud account that you own and Upbound manages for you. + +The following guide walks you through setting up a Managed Space in your GCP +organization. If you have any questions while working through this guide, +contact your Upbound Account Representative for help. + + + + +## Managed Space on your cloud architecture + + + +A Managed Space is a deployment of the Upbound Spaces software inside an +Upbound-controlled sub-account in your AWS cloud environment. The Spaces +software runs in this sub-account, orchestrated by Kubernetes. Backups and +billing data get stored inside bucket or blob storage in the same sub-account. +The control planes deployed and controlled by the Spaces software runs on the +Kubernetes cluster which gets deployed into the sub-account. + +The diagram below illustrates the high-level architecture of Upbound Managed Spaces: + +![Upbound Managed Spaces arch](/img/managed-arch-aws.png) + +The Spaces software gets deployed on an EKS Cluster in the region of your +choice. This EKS cluster is where your control planes are ultimately run. +Upbound also deploys buckets, 1 for the collection of the billing data and 1 for +control plane backups. + +Upbound doesn't have access to other sub-accounts nor your organization-level +settings in your cloud environment. Outside of your cloud organization, Upbound +runs the Upbound Console, which includes the Upbound API and web application, +including the dashboard you see at `console.upbound.io`. By default, all +connections are encrypted, but public. Optionally, you also have the option to +use private network connectivity through [AWS PrivateLink][aws-privatelink]. + + + + + + +A Managed Space is a deployment of the Upbound Spaces software inside an +Upbound-controlled project in your GCP cloud environment. The Spaces software +runs in this project, orchestrated by Kubernetes. Backups and billing data get +stored inside bucket or blob storage in the same project. The control planes +deployed and controlled by the Spaces software runs on the Kubernetes cluster +which gets deployed into the project. + +The diagram below illustrates the high-level architecture of Upbound Managed Spaces: + +![Upbound Managed Spaces arch](/img/managed-arch-gcp.png) + +The Spaces software gets deployed on a GKE Cluster in the region of your choice. +This GKE cluster is where your control planes are ultimately run. Upbound also +deploys cloud buckets, 1 for the collection of the billing data and 1 for +control plane backups. + +Upbound doesn't have access to other projects nor your organization-level +settings in your cloud environment. Outside of your cloud organization, Upbound +runs the Upbound Console, which includes the Upbound API and web application, +including the dashboard you see at `console.upbound.io`. By default, all +connections are encrypted, but public. Optionally, you also have the option to +use private network connectivity through [GCP Private Service +Connect][gcp-private-service-connect]. + + + +## Prerequisites + +- An organization created on Upbound + + + +- You should have a preexisting AWS organization to complete this guide. +- You must create a new AWS sub-account. Read the [AWS documentation][aws-documentation] to learn how to create a new sub-account in an existing organization on AWS. + +After the sub-account information gets provided to Upbound, **don't change it +any further.** Any changes made to the sub-account or the resources created by +Upbound for the purposes of the Managed Space deployments voids the SLA you have +with Upbound. If you want to make configuration changes, contact your Upbound +Solutions Architect. + + + + + +- You should have a preexisting GCP organization with an active Cloud Billing account to complete this guide. +- You must create a new GCP project. Read the [GCP documentation][gcp-documentation] to learn how to create a new project in an existing organization on GCP. + +After the project information gets provided to Upbound, **don't change it any +further.** Any changes made to the project or the resources created by Upbound +for the purposes of the Managed Space deployments voids the SLA you have with +Upbound. If you want to make configuration changes, contact your Upbound +Solutions Architect. + + + + + +## Set up cross-account management + +Upbound supports using AWS Key Management Service with cross-account IAM +permissions. This enables the isolation of keys so the infrastructure operated +by Upbound has limited access to symmetric keys. + +In the KMS key's account, apply the baseline key policy: + +```json +{ + "Sid": "Allow Upbound to use this key", + "Effect": "Allow", + "Principal": { + "AWS": ["[Managed Space sub-account ID]"] + }, + "Action": ["kms:Encrypt", "kms:Decrypt", "kms:ReEncrypt*", "kms:GenerateDataKey*", "kms:DescribeKey"], + "Resource": "*" +} +``` + +You need another key policy to let the sub-account create persistent resources +with the KMS key: + +```json +{ + "Sid": "Allow attachment of persistent resources for an Upbound Managed Space", + "Effect": "Allow", + "Principal": { + "AWS": "[Managed Space sub-account ID]" + }, + "Action": ["kms:CreateGrant", "kms:ListGrants", "kms:RevokeGrant"], + "Resource": "*", + "Condition": { + "Bool": { + "kms:GrantIsForAWSResource": "true" + } + } +} +``` + +### Configure PrivateLink + +By default, all connections to the Upbound Console are encrypted, but public. +AWS PrivateLink is a feature that allows VPC peering whereby your traffic +doesn't traverse the public internet. To have this configured, contact your +Upbound Account Representative. + + + + + +## Enable APIs + +Enable the following APIs in the new project: + +- Kubernetes Engine API +- Cloud Resource Manager API +- Compute Engine API +- Cloud DNS API + +:::tip +Read how to enable APIs in a GCP project [here][here]. +::: + +## Create a service account + +Create a service account in the new project. Name the service account, +upbound-sa. Give the service account the following roles: + +- Compute Admin +- Project IAM Admin +- Service Account Admin +- DNS Administrator +- Editor + +Select the service account you just created. Select keys. Add a new key and +select JSON. The key gets downloaded to your machine. Save this for later. + +## Create a DNS Zone + +Create a DNS Zone, set the **Zone type** to `Public`. + +### Configure Private Service Connect + +By default, all connections to the Upbound Console are encrypted, but public. +GCP Private Service Connect is a feature that allows VPC peering whereby your +traffic doesn't traverse the public internet. To have this configured, contact +your Upbound Account Representative. + + + +## Provide information to Upbound + +Once these policies get attached to the key, tell your Upbound Account +Representative, providing them the following: + + + +- the full ARN of the KMS key. +- the name of the organization that you created in Upbound. Use the up CLI command, `up org list`, so see this information. +- Confirmation of which region in AWS you want the deployment to target. + + + + + +- The service account JSON key +- The NS records associated with the DNS name created in the last step. +- the name of the organization that you created in Upbound. Use the up CLI command, `up org list`, so see this information. +- Confirmation of which region in GCP you want the deployment to target. + + + +Once Upbound has this information, the request gets processed in a business day. + +## Use your Managed Space + +Once the Managed Space gets deployed, you can see it in the Space selector when browsing your environment on [`console.upbound.io`][console-upbound-io]. + + + + +[contact]: https://www.upbound.io/contact-us +[aws-privatelink]: #configure-privatelink +[aws-documentation]: https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_create.html#orgs_manage_accounts_create-new +[gcp-private-service-connect]: #configure-private-service-connect +[gcp-documentation]: https://cloud.google.com/resource-manager/docs/creating-managing-organization +[here]: https://cloud.google.com/apis/docs/getting-started#enabling_apis +[console-upbound-io]: https://console.upbound.io/ diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/oidc-configuration.md b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/oidc-configuration.md new file mode 100644 index 000000000..cbef4dc42 --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/oidc-configuration.md @@ -0,0 +1,289 @@ +--- +title: Configure OIDC +sidebar_position: 20 +description: Configure OIDC in your Space +--- +:::important +This guide is only applicable for administrators who've deployed self-hosted Spaces. general RBAC in Upbound, read [Upbound RBAC][upbound-rbac]. +::: + +Upbound uses the Kubernetes [Structured Authentication Configuration][structured-auth-config] to validate OIDC tokens sent to the API. Upbound stores this +configuration as a `ConfigMap` and authenticates with the Upbound router +component during installation with Helm. + +This guide walks you through how to create and apply an authentication +configuration to validate Upbound with an external identity provider. Each +section focuses on a specific part of the configuration file. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. + +For details on authentication and access control across versions, see the . related platform authentication features, see the [Platform manual](../../../../platform/). +::: + +## Creating the `AuthenticationConfiguration` file + +First, create a file called `config.yaml` with an `AuthenticationConfiguration` +kind. The `AuthenticationConfiguration` is the initial authentication structure +necessary for Upbound to communicate with your chosen identity provider. + +```yaml +apiVersion: apiserver.config.k8s.io/v1beta1 +kind: AuthenticationConfiguration +jwt: +- issuer: + url: oidc-issuer-url + audiences: + - oidc-client-id + claimMappings: # optional + username: + claim: oidc-username-claim + prefix: oidc-username-prefix + groups: + claim: oidc-groups-claim + prefix: oidc-groups-prefix +``` + + +For detailed configuration options, including the CEL-based token validation, +review the feature [documentation][structured-auth-config]. + + +The `AuthenticationConfiguration` allows you to configure multiple JWT +authenticators as separate issuers. + +### Configure an issuer + +The `jwt` array requires an `issuer` specification and typically contains: + +- A `username` claim mapping +- A `groups` claim mapping +Optionally, the configuration may also include: +- A set of claim validation rules +- A set of user validation rules + +The `issuer` URL must be unique across all configured authenticators. + +```yaml +issuer: + url: https://example.com + discoveryUrl: https://discovery.example.com/.well-known/openid-configuration + certificateAuthority: |- + + audiences: + - client-id-a + - client-id-b + audienceMatchPolicy: MatchAny +``` + +By default, the authenticator assumes the OIDC Discovery URL is +`{issuer.url}/.well-known/openid-configuration`. Most identity providers follow +this structure, and you can omit the `discoveryUrl` field. To use a separate +discovery service, specify the full path to the discovery endpoint in this +field. + +If the CA for the Issuer isn't public, provide the PEM encoded CA for the Discovery URL. + +At least one of the `audiences` entries must match the `aud` claim in the JWT. +For OIDC tokens, this is the Client ID of the application attempting to access +the Upbound API. Having multiple values set allows the same configuration to +apply to multiple client applications, for example the `kubectl` CLI and an +Internal Developer Portal. + +If you specify multiple `audiences` , `audienceMatchPolicy` must equal `MatchAny`. + +### Configure `claimMappings` + +#### Username claim mapping + +By default, the authenticator uses the `sub` claim as the user name. To override this, either: + +- specify *both* `claim` and `prefix`. `prefix` may be explicitly set to the empty string. +or + +- specify a CEL `expression` to calculate the user name. + +```yaml +claimMappings: + username: + claim: "sub" + prefix: "keycloak" + # + expression: 'claims.username + ":external-user"' +``` + + +#### Groups claim mapping + +By default, this configuration doesn't map groups, unless you either: + +- specify both `claim` and `prefix`. `prefix` may be explicitly set to the empty string. +or + +- specify a CEL `expression` that returns a string or list of strings. + + +```yaml +claimMappings: + groups: + claim: "groups" + prefix: "" + # + expression: 'claims.roles.split(",")' +``` + + +### Validation rules + + +Validation rules are outside the scope of this document. Review the +[documentation][structured-auth-config] for more information. Examples include +using CEL expressions to validate authentication such as: + + +- Validating that a token claim has a specific value +- Validating that a token has a limited lifetime +- Ensuring usernames and groups don't contain reserved prefixes + +## Required claims + +To interact with Space and ControlPlane APIs, users must have the `upbound.io/aud` claim set to one of the following: + +| Upbound.io Audience | Notes | +| -------------------------------------------------------- | -------------------------------------------------------------------- | +| `[]` | No Access to Space-level or ControlPlane APIs | +| `['upbound:spaces:api']` | This Identity is only for Space-level APIs | +| `['upbound:spaces:controlplanes']` | This Identity is only for ControlPlane APIs | +| `['upbound:spaces:api', 'upbound:spaces:controlplanes']` | This Identity is for both Space-level and ControlPlane APIs | + + +You can set this claim in two ways: + +- In the identity provider mapped in the ID token. +- Inject in the authenticator with the `jwt.claimMappings.extra` array. + +For example: +```yaml +apiVersion: apiserver.config.k8s.io/v1beta1 +kind: AuthenticationConfiguration +jwt: +- issuer: + url: https://keycloak:8443/realms/master + certificateAuthority: |- + + audiences: + - master-realm + audienceMatchPolicy: MatchAny + claimMappings: + username: + claim: "preferred_username" + prefix: "keycloak:" + groups: + claim: "groups" + prefix: "" + extra: + - key: 'upbound.io/aud' + valueExpression: "['upbound:spaces:controlplanes', 'upbound:spaces:api']" +``` + +## Install the `AuthenticationConfiguration` + +Once you create an `AuthenticationConfiguration` file, specify this file as a +`ConfigMap` in the host cluster for the Upbound Space. + +```sh +kubectl create configmap -n upbound-system --from-file=config.yaml=./path/to/config.yaml +``` + + +To enable OIDC authentication and disable Upbound IAM when installing the Space, +reference the configuration and pass an empty value to the Upbound IAM issuer +parameter: + + +```sh +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "authentication.structuredConfig=" \ + --set "router.controlPlane.extraArgs[0]=--upbound-iam-issuer-url=" +``` + +## Configure RBAC + + +In this scenario, the external identity provider handles authentication, but +permissions for Spaces and ControlPlane APIs use standard RBAC objects. + +### Spaces APIs + +The Spaces APIs include: +```yaml +- apiGroups: + - spaces.upbound.io + resources: + - controlplanes + - sharedexternalsecrets + - sharedsecretstores + - backups + - backupschedules + - sharedbackups + - sharedbackupconfigs + - sharedbackupschedules +- apiGroups: + - observability.spaces.upbound.io + resources: + - sharedtelemetryconfigs +``` + +### ControlPlane APIs + + + +Crossplane specifies three [roles][crossplane-managed-clusterroles] for a +ControlPlane: admin, editor, and viewer. These map to the verbs `admin`, `edit`, +and `view` on the `controlplanes/k8s` resource in the `spaces.upbound.io` API +group. + + +### Control access + +The `groups` claim in the `AuthenticationConfiguration` allows you to control +resource access when you create a `ClusterRoleBinding`. A `ClusterRole` defines +the role parameters and a `ClusterRoleBinding` subject. + +The example below allows `admin` permissions for all ControlPlanes to members of +the `ctp-admins` group: + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: allow-ctp-admin +rules: +- apiGroups: + - spaces.upbound.io + resources: + - controlplanes/k8s + verbs: + - admin +``` + +ctp-admins ClusterRoleBinding +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: allow-ctp-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: allow-ctp-admin +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: Group + name: ctp-admins +``` + +[structured-auth-config]: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#using-authentication-configuration +[crossplane-managed-clusterroles]: https://github.com/crossplane/crossplane/blob/master/design/design-doc-rbac-manager.md#managed-rbac-clusterroles +[upbound-rbac]: /manuals/platform/concepts/authorization/upbound-rbac diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/proxies-config.md b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/proxies-config.md new file mode 100644 index 000000000..3802e4cb0 --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/proxies-config.md @@ -0,0 +1,31 @@ +--- +title: Proxied configuration +sidebar_position: 20 +description: Configure Upbound within a proxied environment +--- + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments in proxied environments. Proxy configuration procedures are consistent across versions. + +For version-specific deployment considerations, see the . +::: + + + +When you install Upbound with Helm in a proxied environment, please update the specified registry with your internal registry. + + + +```bash +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "authentication.hubIdentities=true" \ + --set "authorization.hubRBAC=true" \ + --set "registry=registry.company.corp/spaces" \ + --set "controlPlanes.uxp.registryOverride=registry.company.corp/xpkg.upbound.io" \ + --set "controlPlanes.uxp.repository=registry.company.corp/spaces" \ + --wait +``` diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/query-api.md b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/query-api.md new file mode 100644 index 000000000..c112e9001 --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/query-api.md @@ -0,0 +1,396 @@ +--- +title: Deploy Query API infrastructure +weight: 130 +description: Query API +aliases: + - /all-spaces/self-hosted-spaces/query-api + - /self-hosted-spaces/query-api + - all-spaces/self-hosted-spaces/query-api +--- + + + + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). Query API has evolved across versions: + +- **Cloud Spaces**: Available since v1.6 (enabled by default) +- **Self-Hosted**: Available since v1.8 (requires manual enablement) + +For details on Query API availability across versions, see the . +::: + +:::important + +This feature is in preview. The Query API is available in the Cloud Space offering in `v1.6` and enabled by default. + +This is a requirement to be able to connect a Space since `v1.8.0`, and is off by default, see below to enable it. + +::: + +Upbound's Query API allows users to inspect objects and resources within their control planes. The read-only `up alpha query` and `up alpha get` CLI commands allow you to gather information on your control planes in a fast and efficient package. These commands follow the [`kubectl` conventions][kubectl-conventions] for filtering, sorting, and retrieving information from your Space. + +Query API requires a PostgreSQL database to store the data. You can use the default PostgreSQL instance provided by Upbound or bring your own PostgreSQL instance. + +## Managed setup + +:::tip +If you don't have specific requirements for your setup, Upbound recommends following this approach. +::: + +To enable this feature, set `features.alpha.apollo.enabled=true` and `apollo.apollo.storage.postgres.create=true` when installing Spaces. + +However, you need to install CloudNativePG (`CNPG`) to provide the PostgreSQL instance. You can let the `up` CLI do this for you, or install it manually. + +For more customization, see the [Helm chart reference][helm-chart-reference]. You can modify the number +of PostgreSQL instances, pooling instances, storage size, and more. + +If you have specific requirements not addressed in the Helm chart, see below for more information on how to bring your own [PostgreSQL setup][postgresql-setup]. + +### Using the up CLI + +Before you begin, make sure you have the most recent version of the [`up` CLI installed][up-cli-installed]. + +To enable this feature, set `features.alpha.apollo.enabled=true` and `apollo.apollo.storage.postgres.create=true` when installing Spaces: + +```bash +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "features.alpha.apollo.enabled=true" \ + --set "apollo.apollo.storage.postgres.create=true" +``` + +`up space init` and `up space upgrade` install CloudNativePG automatically, if needed. + +### Helm chart + +If you are installing the Helm chart in some other way, you can manually install CloudNativePG in one of the [supported ways][supported-ways], for example: + +```shell +kubectl apply --server-side -f \ + https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.24.1/cnpg-1.24.1.yaml +kubectl rollout status -n cnpg-system deployment cnpg-controller-manager -w --timeout 120s +``` + +Next, install the Spaces Helm chart with the necessary values, for example: + +```shell +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + ... + --set "features.alpha.apollo.enabled=true" \ + --set "apollo.apollo.storage.postgres.create=true" \ + --wait +``` + +## Self-hosted PostgreSQL configuration + + +If your workflow requires more customization, you can provide your own +PostgreSQL instance and configure credentials manually. + +Using your own PostgreSQL instance requires careful architecture consideration. +Review the architecture and requirements guidelines. + +### Architecture + +The Query API architecture uses three components, other than a PostgreSQL database: +* **Apollo Syncers**: Watching `ETCD` for changes and syncing them to PostgreSQL. One, or more, per control plane. +* **Apollo Server**: Serving the Query API out of the data in PostgreSQL. One, or more, per Space. + +The default setup also uses the `PgBouncer` connection pooler to manage connections from the syncers. +```mermaid +graph LR + User[User] + + subgraph Cluster["Cluster (Spaces)"] + direction TB + Apollo[apollo] + + subgraph ControlPlanes["Control Planes"] + APIServer[API Server] + Syncer[apollo-syncer] + end + end + + PostgreSQL[(PostgreSQL)] + + User -->|requests| Apollo + + Apollo -->|connects| PostgreSQL + Apollo -->|creates schemas & users| PostgreSQL + + Syncer -->|watches| APIServer + Syncer -->|writes| PostgreSQL + + PostgreSQL -->|data| Apollo + + style PostgreSQL fill:#e1f5ff,stroke:#333,stroke-width:2px,color:#000 + style Apollo fill:#ffe1e1,stroke:#333,stroke-width:2px,color:#000 + style Cluster fill:#f0f0f0,stroke:#333,stroke-width:2px,color:#000 + style ControlPlanes fill:#fff,stroke:#666,stroke-width:1px,stroke-dasharray: 5 5,color:#000 +``` + + +Each component needs to connect to the PostgreSQL database. + +In the event of database issues, you can provide a new database and the syncers +automatically repopulate the data. + +### Requirements + +* A PostgreSQL 16 instance or cluster. +* A database, for example named `upbound`. +* **Optional**: A dedicated user for the Apollo Syncers, otherwise the Spaces Controller generates a dedicated set of credentials per syncer with the necessary permissions, for example named `syncer`. +* A dedicated **superuser or admin account** for the Apollo Server. +* **Optional**: A connection pooler, like PgBouncer, to manage connections from the Apollo Syncers. If you didn't provide the optional users, you might have to configure the pooler to allow users to connect using the same credentials as PostgreSQL. +* **Optional**: A read replica for the Apollo Syncers to connect to, to reduce load on the primary database, this might cause a slight delay in the data being available through the Query API. + +Below you can find examples of setups to get you started, you can mix and match the examples to suit your needs. + +### In-cluster setup + +:::tip + +If you don't have strong opinions on your setup, but still want full control on +the resources created for some unsupported customizations, Upbound recommends +the in-cluster setup. + +::: + +For more customization than the managed setup, you can use CloudNativePG for +PostgreSQL in the same cluster. + +For in-cluster setup, manually deploy the operator in one of the [supported ways][supported-ways-1], for example: + +```shell +kubectl apply --server-side -f \ + https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.24.1/cnpg-1.24.1.yaml +kubectl rollout status -n cnpg-system deployment cnpg-controller-manager -w --timeout 120s +``` + +Then create a `Cluster` and `Pooler` in the `upbound-system` namespace, for example: + +```shell +kubectl create ns upbound-system + +kubectl apply -f - < + +### External setup + + +:::tip + +If you want to run your PostgreSQL instance outside the cluster, but are fine with credentials being managed by the `apollo` user, this is the suggested way to proceed. + +::: + +When using this setup, you must manually create the required Secrets in the +`upbound-system` namespace. The `apollo` user must have permissions to create +schemas and users. + +```shell + +kubectl create ns upbound-system + +# A Secret containing the necessary credentials to connect to the PostgreSQL instance +kubectl create secret generic spaces-apollo-pg-app -n upbound-system \ + --from-literal=password=supersecret + +# A Secret containing the necessary CA certificate to verify the connection to the PostgreSQL instance +kubectl create secret generic spaces-apollo-pg-ca -n upbound-system \ + --from-file=ca.crt=/path/to/ca.crt +``` + +Next, install Spaces with the necessary settings: + +```shell +export PG_URL=your-postgres-host:5432 +export PG_POOLED_URL=your-pgbouncer-host:5432 # this could be the same as above + +helm upgrade --install ... \ + --set "features.alpha.apollo.enabled=true" \ + --set "apollo.apollo.storage.postgres.create=false" \ + --set "apollo.apollo.storage.postgres.connection.url=$PG_URL" \ + --set "apollo.apollo.storage.postgres.connection.credentials.secret.name=spaces-apollo-pg-app" \ + --set "apollo.apollo.storage.postgres.connection.credentials.format=basicauth" \ + --set "apollo.apollo.storage.postgres.connection.ca.name=spaces-apollo-pg-ca" \ + --set "apollo.apollo.storage.postgres.connection.syncer.url=$PG_POOLED_URL" +``` + +### External setup with all custom credentials + +For custom credentials with Apollo Syncers or Server, create a new secret in the +`upbound-system` namespace: + +```shell +export APOLLO_SYNCER_USER=syncer +export APOLLO_SERVER_USER=apollo + +kubectl create ns upbound-system + +# A Secret containing the necessary credentials to connect to the PostgreSQL instance +kubectl create secret generic spaces-apollo-pg-app -n upbound-system \ + --from-literal=password=supersecret + +# A Secret containing the necessary CA certificate to verify the connection to the PostgreSQL instance +kubectl create secret generic spaces-apollo-pg-ca -n upbound-system \ + --from-file=ca.crt=/path/to/ca.crt + +# A Secret containing the necessary credentials for the Apollo Syncers to connect to the PostgreSQL instance. +# These will be used by all Syncers in the Space. +kubectl create secret generic spaces-apollo-pg-syncer -n upbound-system \ + --from-literal=username=$APOLLO_SYNCER_USER \ + --from-literal=password=supersecret + +# A Secret containing the necessary credentials for the Apollo Server to connect to the PostgreSQL instance. +kubectl create secret generic spaces-apollo-pg-apollo -n upbound-system \ + --from-literal=username=$APOLLO_SERVER_USER \ + --from-literal=password=supersecret +``` + +Next, install Spaces with the necessary settings: + +```shell +export PG_URL=your-postgres-host:5432 +export PG_POOLED_URL=your-pgbouncer-host:5432 # this could be the same as above + +helm ... \ + --set "features.alpha.apollo.enabled=true" \ + --set "apollo.apollo.storage.postgres.create=false" \ + --set "apollo.apollo.storage.postgres.connection.url=$PG_URL" \ + --set "apollo.apollo.storage.postgres.connection.credentials.secret.name=spaces-apollo-pg-app" \ + --set "apollo.apollo.storage.postgres.connection.credentials.format=basicauth" \ + --set "apollo.apollo.storage.postgres.connection.ca.name=spaces-apollo-pg-ca" \ + --set "apollo.apollo.storage.postgres.connection.syncer.url=$PG_POOLED_URL" \ + + #. the syncers + --set "apollo.apollo.storage.postgres.connection.syncer.credentials.format=basicauth" \ + --set "apollo.apollo.storage.postgres.connection.syncer.credentials.user=$APOLLO_SYNCER_USER" \ + --set "apollo.apollo.storage.postgres.connection.syncer.credentials.secret.name=spaces-apollo-pg-syncer" \ + + #. the server + --set "apollo.apollo.storage.postgres.connection.apollo.credentials.format=basicauth" \ + --set "apollo.apollo.storage.postgres.connection.apollo.credentials.user=$APOLLO_SERVER_USER" \ + --set "apollo.apollo.storage.postgres.connection.apollo.credentials.secret.name=spaces-apollo-pg-apollo" \ + --set "apollo.apollo.storage.postgres.connection.apollo.url=$PG_POOLED_URL" +``` + + +## Using the Query API + + +See the [Query API documentation][query-api-documentation] for more information on how to use the Query API. + + + + +[postgresql-setup]: #self-hosted-postgresql-configuration +[up-cli-installed]: /manuals/cli/overview +[query-api-documentation]: /spaces/howtos/query-api + +[helm-chart-reference]: /reference/helm-reference +[kubectl-conventions]: https://kubernetes.io/docs/reference/kubectl/generated/kubectl_get/ +[supported-ways]: https://cloudnative-pg.io/documentation/current/installation_upgrade/ +[supported-ways-1]: https://cloudnative-pg.io/documentation/current/installation_upgrade/ +[cloudnativepg-documentation]: https://cloudnative-pg.io/documentation/1.24/storage/#configuration-via-a-pvc-template +[postgresql-cluster]: https://cloudnative-pg.io/documentation/1.24/resource_management/ +[pooler]: https://cloudnative-pg.io/documentation/1.24/connection_pooling/#pod-templates +[postgresql-cluster-2]: https://cloudnative-pg.io/documentation/1.24/replication/ +[pooler-3]: https://cloudnative-pg.io/documentation/1.24/connection_pooling/#high-availability-ha +[postgresql-cluster-4]: https://cloudnative-pg.io/documentation/1.24/operator_capability_levels/#override-of-operand-images-through-the-crd +[pooler-5]: https://cloudnative-pg.io/documentation/1.24/connection_pooling/#pod-templates +[cloudnativepg-documentation-6]: https://cloudnative-pg.io/documentation/1.24/postgresql_conf/ diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/scaling-resources.md b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/scaling-resources.md new file mode 100644 index 000000000..7bb04d2c2 --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/scaling-resources.md @@ -0,0 +1,184 @@ +--- +title: Scaling vCluster and etcd Resources +weight: 950 +description: A guide for scaling vCluster and etcd resources in self-hosted Spaces +aliases: + - /all-spaces/self-hosted-spaces/scaling-resources + - /spaces/scaling-resources +--- + +In large workloads or control plane migration, you may performance impacting +resource constraints. This guide explains how to scale vCluster and `etcd` +resources for optimal performance in your self-hosted Space. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Scaling procedures are consistent across versions. + +For version-specific resource requirements and capacity planning, see the . +::: + +## Signs of resource constraints + +You may need to scale your vCluster or `etcd` resources if you observe: + +- API server timeout errors such as `http: Handler timeout` +- Error messages about `too many requests` and requests to `try again later` +- Operations like provider installation failing with errors like `cannot apply provider package secret` +- vCluster pods experiencing continuous restarts +- API performance degrades with high resource volume + + +## Scaling vCluster resources + + +The vCluster component handles Kubernetes API requests for your control planes. +Deployments with multiple control planes or providers may exceed default resource allocations. + +```yaml +# Default settings +controlPlanes.vcluster.resources.limits.cpu: "3000m" +controlPlanes.vcluster.resources.limits.memory: "3960Mi" +controlPlanes.vcluster.resources.requests.cpu: "170m" +controlPlanes.vcluster.resources.requests.memory: "1320Mi" +``` + +For larger workloads, like migrating from an existing control plane with several +providers, increase these resource limits in your Spaces `values.yaml` file. + +```yaml +controlPlanes: + vcluster: + resources: + limits: + cpu: "4000m" # Increase to 4 cores + memory: "6Gi" # Increase to 6GB memory + requests: + cpu: "500m" # Increase baseline CPU request + memory: "2Gi" # Increase baseline memory request +``` + +## Scaling `etcd` storage + +Kubernetes relies on `etcd` performance, which can lead to IOPS (input/output +operations per second) bottlenecks. Upbound allocates `50Gi` volumes for `etcd` +in cloud environments to ensure adequate IOPS performance. + +```yaml +# Default setting +controlPlanes.etcd.persistence.size: "5Gi" +``` + +For production environments or when migrating large control planes, increase +`etcd` volume size and specify an appropriate storage class: + +```yaml +controlPlanes: + etcd: + persistence: + size: "50Gi" # Recommended for production + storageClassName: "fast-ssd" # Use a high-performance storage class +``` + +### Storage class considerations + +For AWS: +- Use GP3 volumes with adequate IOPS +-. AWS GP3 volumes, IOPS scale with volume size (3000 IOPS baseline) +-. optimal performance, provision at least 32Gi to support up to 16,000 IOPS + +For GCP and Azure: +- Use SSD-based persistent disk types for optimal performance +- Consider premium storage options for high-throughput workloads + +## Scaling Crossplane resources + +Crossplane manages provider resources in your control planes. You may need to increase provider resources for larger deployments: + +```yaml +# Default settings +controlPlanes.uxp.resourcesCrossplane.requests.cpu: "370m" +controlPlanes.uxp.resourcesCrossplane.requests.memory: "400Mi" +``` + + +For environments with many providers or managed resources: + + +```yaml +controlPlanes: + uxp: + resourcesCrossplane: + limits: + cpu: "1000m" # Add CPU limit + memory: "1Gi" # Add memory limit + requests: + cpu: "500m" # Increase CPU request + memory: "512Mi" # Increase memory request +``` + +## High availability configuration + +For production environments, enable High Availability mode to ensure resilience: + +```yaml +controlPlanes: + ha: + enabled: true +``` + +## Best practices for migration scenarios + +When migrating from existing control planes into a self-hosted Space: + +1. **Pre-scale resources**: Scale up resources before performing the migration +2. **Monitor resource usage**: Watch resource consumption during and after migration with `kubectl top pods` +3. **Scale incrementally**: If issues persist, increase resources incrementally until performance stabilizes +4. **Consider storage performance**: `etcd` is sensitive to storage I/O performance + +## Helm values configuration + +Apply these settings through your Spaces Helm values file: + +```yaml +controlPlanes: + vcluster: + resources: + limits: + cpu: "4000m" + memory: "6Gi" + requests: + cpu: "500m" + memory: "2Gi" + etcd: + persistence: + size: "50Gi" + storageClassName: "gp3" # Use your cloud provider's fast storage class + uxp: + resourcesCrossplane: + limits: + cpu: "1000m" + memory: "1Gi" + requests: + cpu: "500m" + memory: "512Mi" + ha: + enabled: true #. production environments +``` + +Apply the configuration using Helm: + +```bash +helm upgrade --install spaces oci://xpkg.upbound.io/spaces-artifacts/spaces \ + -f values.yaml \ + -n upbound-system +``` + +## Considerations + +- **Provider count**: Each provider adds resource overhead - consider using provider families to optimize resource usage +- **Managed resources**: The number of managed resources impacts CPU usage more than memory +- **Vertical pod autoscaling**: Consider using vertical pod autoscaling in Kubernetes to automatically adjust resources based on usage +- **Storage performance**: Storage performance is as important as capacity for etcd +- **Network latency**: Low-latency connections between components improve performance + + diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/self-hosted-spaces-deployment.md b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/self-hosted-spaces-deployment.md new file mode 100644 index 000000000..e549e3939 --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/self-hosted-spaces-deployment.md @@ -0,0 +1,461 @@ +--- +title: Deployment Workflow +sidebar_position: 3 +description: A quickstart guide for Upbound Spaces +tier: "business" +--- +import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; + + + + + +This guide deploys a self-hosted Upbound cluster in AWS. + + + + + +This guide deploys a self-hosted Upbound cluster in Azure. + + + + + +This guide deploys a self-hosted Upbound cluster in GCP. + + + +Disconnected Spaces allows you to host control planes in your preferred environment. + +## Prerequisites + +To get started deploying your own Disconnected Space, you need: + +- An Upbound organization account string, provided by your Upbound account representative +- A `token.json` license, provided by your Upbound account representative + + + +- An AWS account and the AWS CLI + + + + + +- An Azure account and the Azure CLI + + + + + +- An GCP account and the GCP CLI + + + +:::important +Disconnected Spaces are a business critical feature of Upbound and requires a license token to successfully complete the installation. [Contact Upbound][contact-upbound] if you want to try out Upbound with Disconnected Spaces. +::: + +## Provision the hosting environment + +### Create a cluster + + + +Configure the name and target region you want the EKS cluster deployed to. + +```ini +export SPACES_CLUSTER_NAME=upbound-space-quickstart +export SPACES_REGION=us-east-1 +``` + +Provision a 3-node cluster using eksctl. + +```bash +cat < + + + +Configure the name and target region you want the AKS cluster deployed to. + +```ini +export SPACES_RESOURCE_GROUP_NAME=upbound-space-quickstart +export SPACES_CLUSTER_NAME=upbound-space-quickstart +export SPACES_LOCATION=westus +``` + +Provision a new Azure resource group. + +```bash +az group create --name ${SPACES_RESOURCE_GROUP_NAME} --location ${SPACES_LOCATION} +``` + +Provision a 3-node cluster. + +```bash +az aks create -g ${SPACES_RESOURCE_GROUP_NAME} -n ${SPACES_CLUSTER_NAME} \ + --enable-managed-identity \ + --node-count 3 \ + --node-vm-size Standard_D4s_v4 \ + --enable-addons monitoring \ + --enable-msi-auth-for-monitoring \ + --generate-ssh-keys \ + --network-plugin kubenet \ + --network-policy calico +``` + +Get the kubeconfig of your AKS cluster. + +```bash +az aks get-credentials --resource-group ${SPACES_RESOURCE_GROUP_NAME} --name ${SPACES_CLUSTER_NAME} +``` + + + + + +Configure the name and target region you want the GKE cluster deployed to. + +```ini +export SPACES_PROJECT_NAME=upbound-spaces-project +export SPACES_CLUSTER_NAME=upbound-spaces-quickstart +export SPACES_LOCATION=us-west1-a +``` + +Create a new project and set it as the current project. + +```bash +gcloud projects create ${SPACES_PROJECT_NAME} +gcloud config set project ${SPACES_PROJECT_NAME} +``` + +Provision a 3-node cluster. + +```bash +gcloud container clusters create ${SPACES_CLUSTER_NAME} \ + --enable-network-policy \ + --num-nodes=3 \ + --zone=${SPACES_LOCATION} \ + --machine-type=e2-standard-4 +``` + +Get the kubeconfig of your GKE cluster. + +```bash +gcloud container clusters get-credentials ${SPACES_CLUSTER_NAME} --zone=${SPACES_LOCATION} +``` + + + +## Configure the pre-install + +### Set your Upbound organization account details + +Set your Upbound organization account string as an environment variable for use in future steps + +```ini +export UPBOUND_ACCOUNT= +``` + +### Set up pre-install configurations + +Export the path of the license token JSON file provided by your Upbound account representative. + +```ini {copy-lines="2"} +# Change the path to where you saved the token. +export SPACES_TOKEN_PATH="/path/to/token.json" +``` + +Set the version of Spaces software you want to install. + +```ini +export SPACES_VERSION= +``` + +Set the router host and cluster type. The `SPACES_ROUTER_HOST` is the domain name that's used to access the control plane instances. It's used by the ingress controller to route requests. + +```ini +export SPACES_ROUTER_HOST="proxy.upbound-127.0.0.1.nip.io" +``` + +:::important +Make sure to replace the placeholder text in `SPACES_ROUTER_HOST` and provide a real domain that you own. +::: + + +## Install the Spaces software + + +### Install cert-manager + +Install cert-manager. + +```bash +kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.11.0/cert-manager.yaml +kubectl wait deployment -n cert-manager cert-manager-webhook --for condition=Available=True --timeout=360s +``` + + + +### Install ALB Load Balancer + +```bash +helm install aws-load-balancer-controller aws-load-balancer-controller --namespace kube-system \ + --repo https://aws.github.io/eks-charts \ + --set clusterName=${SPACES_CLUSTER_NAME} \ + --set serviceAccount.create=false \ + --set serviceAccount.name=aws-load-balancer-controller \ + --wait +``` + + + +### Install ingress-nginx + +Starting with Spaces v1.10.0, you need to configure the ingress-nginx +controller to allow SSL-passthrough mode. You can do so by passing the +`--enable-ssl-passthrough=true` command-line option to the controller. +The following Helm install command enables this with the `controller.extraArgs` +parameter: + + + +```bash +helm upgrade --install ingress-nginx ingress-nginx \ + --create-namespace --namespace ingress-nginx \ + --repo https://kubernetes.github.io/ingress-nginx \ + --version 4.12.1 \ + --set 'controller.service.type=LoadBalancer' \ + --set 'controller.extraArgs.enable-ssl-passthrough=true' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-type=external' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-scheme=internet-facing' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-nlb-target-type=ip' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-healthcheck-protocol=http' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-healthcheck-path=/healthz' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-healthcheck-port=10254' \ + --wait +``` + + + + + +```bash +helm upgrade --install ingress-nginx ingress-nginx \ + --create-namespace --namespace ingress-nginx \ + --repo https://kubernetes.github.io/ingress-nginx \ + --version 4.12.1 \ + --set 'controller.service.type=LoadBalancer' \ + --set 'controller.extraArgs.enable-ssl-passthrough=true' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/azure-load-balancer-health-probe-request-path=/healthz' \ + --wait +``` + + + + + +```bash +helm upgrade --install ingress-nginx ingress-nginx \ + --create-namespace --namespace ingress-nginx \ + --repo https://kubernetes.github.io/ingress-nginx \ + --version 4.12.1 \ + --set 'controller.service.type=LoadBalancer' \ + --set 'controller.extraArgs.enable-ssl-passthrough=true' \ + --wait +``` + + + +### Install Upbound Spaces software + +Create an image pull secret so that the cluster can pull Upbound Spaces images. + +```bash +kubectl create ns upbound-system +kubectl -n upbound-system create secret docker-registry upbound-pull-secret \ + --docker-server=https://xpkg.upbound.io \ + --docker-username="$(jq -r .accessId $SPACES_TOKEN_PATH)" \ + --docker-password="$(jq -r .token $SPACES_TOKEN_PATH)" +``` + +Log in with Helm to be able to pull chart images for the installation commands. + +```bash +jq -r .token $SPACES_TOKEN_PATH | helm registry login xpkg.upbound.io -u $(jq -r .accessId $SPACES_TOKEN_PATH) --password-stdin +``` + +Install the Spaces software. + +```bash +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "authentication.hubIdentities=true" \ + --set "authorization.hubRBAC=true" \ + --wait +``` + +### Create a DNS record + +:::important +If you chose to create a public ingress, you also need to create a DNS record for the load balancer of the public facing ingress. Do this before you create your first control plane. +::: + +Create a DNS record for the load balancer of the public facing ingress. To get the address for the Ingress, run the following: + + + +```bash +kubectl get ingress \ + -n upbound-system mxe-router-ingress \ + -o jsonpath='{.status.loadBalancer.ingress[0].hostname}' +``` + + + + + +```bash +kubectl get ingress \ + -n upbound-system mxe-router-ingress \ + -o jsonpath='{.status.loadBalancer.ingress[0].ip}' +``` + + + + + +```bash +kubectl get ingress \ + -n upbound-system mxe-router-ingress \ + -o jsonpath='{.status.loadBalancer.ingress[0].ip}' +``` + + + +If the preceding command doesn't return a load balancer address then your provider may not have allocated it yet. Once it's available, add a DNS record for the `ROUTER_HOST` to point to the given load balancer address. If it's an IPv4 address, add an A record. If it's a domain name, add a CNAME record. + +## Configure the up CLI + +With your kubeconfig pointed at the Kubernetes cluster where you installed +Upbound Spaces, create a new profile in the `up` CLI. This profile interacts +with your Space: + +```bash +up profile create --use ${SPACES_CLUSTER_NAME} --type=disconnected --organization ${UPBOUND_ACCOUNT} +``` + +Optionally, log in to your Upbound account using the new profile so you can use the Upbound Marketplace with this profile as well: + +```bash +up login +``` + + +## Connect to your Space + + +Use `up ctx` to create a kubeconfig context pointed at your new Space: + +```bash +up ctx disconnected/$(kubectl config current-context) +``` + +## Create your first control plane + +You can now create a control plane with the `up` CLI: + +```bash +up ctp create ctp1 +``` + +You can also create a control plane with kubectl: + +```yaml +cat < +```yaml +observability: + spacesCollector: + env: + - name: API_KEY + valueFrom: + secretKeyRef: + name: my-secret + key: api-key + config: + exporters: + otlphttp: + endpoint: "" + headers: + api-key: ${env:API_KEY} + exportPipeline: + logs: + - otlphttp + metrics: + - otlphttp + traces: + - otlphttp +``` + + +You can export metrics, logs, and traces from your Crossplane installation, Spaces +infrastructure (controller, API, router, etc.), provider-helm, and +provider-kubernetes. + +### Router metrics + +The Spaces router component uses Envoy as a reverse proxy and exposes detailed +metrics about request handling, circuit breakers, and connection pooling. +Upbound collects these metrics in your Space after you enable Space-level +observability. + +Envoy metrics in Upbound include: + +- **Upstream cluster metrics** - Request status codes, timeouts, retries, and latency for traffic to control planes and services +- **Circuit breaker metrics** - Connection and request circuit breaker state for both `DEFAULT` and `HIGH` priority levels +- **Downstream listener metrics** - Client connections and requests received +- **HTTP connection manager metrics** - End-to-end HTTP request processing and latency + +For a complete list of available router metrics and example PromQL queries, see the [Router metrics reference][router-ref]. + +### Router tracing + +The Spaces router generates distributed traces through OpenTelemetry integration, +providing end-to-end visibility into request flow across the system. Use these +traces to debug latency issues, understand request paths, and correlate errors +across services. + +The router uses: + +- **Protocol**: OTLP (OpenTelemetry Protocol) over gRPC +- **Service name**: `spaces-router` +- **Transport**: TLS-encrypted connection to telemetry collector + +#### Trace configuration + +Enable tracing and configure the sampling rate with the following Helm values: + +```yaml +observability: + enabled: true + tracing: + enabled: true + sampling: + rate: 0.1 # Sample 10% of new traces (0.0-1.0) +``` + +The sampling behavior depends on whether a parent trace context exists: + +- **With parent context**: If a `traceparent` header is present, the parent's + sampling decision is respected, enabling proper distributed tracing across services. +- **Root spans**:. new traces without a parent, Envoy samples based on + `x-request-id` hashing. The default sampling rate is 10%. + +#### TLS configuration for external collectors + +To send traces to an external OTLP collector, configure the endpoint and TLS settings: + +```yaml +observability: + enabled: true + tracing: + enabled: true + endpoint: "otlp-gateway.example.com" + port: 443 + tls: + caBundleSecretRef: "custom-ca-secret" +``` + +If `caBundleSecretRef` is set, the router uses the CA bundle from the referenced +Kubernetes secret. The secret must contain a key named `ca.crt` with the +PEM-encoded CA bundle. If not set, the router uses the Spaces CA for the +in-cluster collector. + +#### Custom trace tags + +The router adds custom tags to every span to enable filtering and grouping by +control plane: + +| Tag | Source | Description | +|-----|--------|-------------| +| `controlplane.id` | `x-upbound-mxp-id` header | Control plane UUID | +| `controlplane.name` | `x-upbound-mxp-host` header | Internal vcluster hostname | +| `hostcluster.id` | `x-upbound-hostcluster-id` header | Host cluster identifier | + +These tags enable queries like "show all slow requests to control plane X" or +"find errors for control planes in host cluster Y." + +#### Example trace + +The following example shows the attributes from a successful GET request: + +```text +Span: ingress +├─ Service: spaces-router +├─ Duration: 8.025ms +├─ Attributes: +│ ├─ http.method: GET +│ ├─ http.status_code: 200 +│ ├─ upstream_cluster: ctp-b2b37aaa-ee55-492c-ba0c-4d561a6325fa-api-cluster +│ ├─ controlplane.id: b2b37aaa-ee55-492c-ba0c-4d561a6325fa +│ ├─ controlplane.name: vcluster.mxp-b2b37aaa-ee55-492c-ba0c-4d561a6325fa-system +│ └─ response_size: 1827 +``` + +## Available metrics + +Space-level observability collects metrics from multiple infrastructure components: + +### Infrastructure component metrics + +- Crossplane controller metrics +- Spaces controller, API, and router metrics +- Provider metrics (provider-helm, provider-kubernetes) + +### Router metrics + +The router component exposes Envoy proxy metrics for monitoring traffic flow and +service health. Key metric categories include: + +- `envoy_cluster_upstream_rq_*` - Upstream request metrics (status codes, timeouts, retries, latency) +- `envoy_cluster_circuit_breakers_*` - Circuit breaker state and capacity +- `envoy_listener_downstream_*` - Client connection and request metrics +- `envoy_http_downstream_*` - HTTP request processing metrics + +Example query to monitor total request rate: + +```promql +sum(rate(envoy_cluster_upstream_rq_total{job="spaces-router-envoy"}[5m])) +``` + +Example query for P95 latency: + +```promql +histogram_quantile( + 0.95, + sum by (le) ( + rate(envoy_cluster_upstream_rq_time_bucket{job="spaces-router-envoy"}[5m]) + ) +) +``` + +For detailed router metrics documentation and more query examples, see the [Router metrics reference][router-ref]. + + +## OpenTelemetryCollector image + + +Control plane (`SharedTelemetry`) and Space observability deploy the same custom +OpenTelemetry Collector image. The OpenTelemetry Collector image supports +`otlhttp`, `datadog`, and `debug` exporters. + +For more information on observability configuration, review the [Helm chart reference][helm-chart-reference]. + +## Observability in control planes + +Read the [observability documentation][observability-documentation] to learn +about the features Upbound offers for collecting telemetry from control planes. + + +## Router metrics reference {#router-ref} + +To avoid overwhelming observability tools with hundreds of Envoy metrics, an +allow-list filters metrics to only the following metric families. + +### Upstream cluster metrics + +Metrics tracking requests sent from Envoy to configured upstream clusters. +Individual control planes, spaces-api, and other services are each considered +an upstream cluster. Use these metrics to monitor service health, identify +upstream errors, and measure backend latency. + +| Metric | Description | +|--------|-------------| +| `envoy_cluster_upstream_rq_xx_total` | HTTP status codes (2xx, 3xx, 4xx, 5xx) with label `envoy_response_code_class` | +| `envoy_cluster_upstream_rq_timeout_total` | Requests that timed out waiting for upstream | +| `envoy_cluster_upstream_rq_retry_limit_exceeded_total` | Requests that exhausted retry attempts | +| `envoy_cluster_upstream_rq_total` | Total upstream requests | +| `envoy_cluster_upstream_rq_time_bucket` | Latency histogram (for P50/P95/P99 calculations) | +| `envoy_cluster_upstream_rq_time_sum` | Sum of request durations | +| `envoy_cluster_upstream_rq_time_count` | Count of requests | + +### Circuit breaker metrics + + + +Metrics tracking circuit breaker state and remaining capacity. Circuit breakers +prevent cascading failures by limiting connections and concurrent requests to +unhealthy upstreams. Two priority levels exist: `DEFAULT` for watch requests and +`HIGH` for API requests. + + +| Name | Description | +|--------|-------------| +| `envoy_cluster_circuit_breakers_default_cx_open` | `DEFAULT` priority connection circuit breaker open (gauge) | +| `envoy_cluster_circuit_breakers_default_rq_open` | `DEFAULT` priority request circuit breaker open (gauge) | +| `envoy_cluster_circuit_breakers_default_remaining_cx` | Available `DEFAULT` priority connections (gauge) | +| `envoy_cluster_circuit_breakers_default_remaining_rq` | Available `DEFAULT` priority request slots (gauge) | +| `envoy_cluster_circuit_breakers_high_cx_open` | `HIGH` priority connection circuit breaker open (gauge) | +| `envoy_cluster_circuit_breakers_high_rq_open` | `HIGH` priority request circuit breaker open (gauge) | +| `envoy_cluster_circuit_breakers_high_remaining_cx` | Available `HIGH` priority connections (gauge) | +| `envoy_cluster_circuit_breakers_high_remaining_rq` | Available `HIGH` priority request slots (gauge) | + +### Downstream listener metrics + +Metrics tracking requests received from clients such as kubectl and API consumers. +Use these metrics to monitor client connection patterns, overall request volume, +and responses sent to external users. + +| Name | Description | +|--------|-------------| +| `envoy_listener_downstream_rq_xx_total` | HTTP status codes for responses sent to clients | +| `envoy_listener_downstream_rq_total` | Total requests received from clients | +| `envoy_listener_downstream_cx_total` | Total connections from clients | +| `envoy_listener_downstream_cx_active` | Currently active client connections (gauge) | + + + +### HTTP connection manager metrics + + +Metrics from Envoy's HTTP connection manager tracking end-to-end request +processing. These metrics provide a comprehensive view of the HTTP request +lifecycle including status codes and client-perceived latency. + +| Name | Description | +|--------|-------------| +| `envoy_http_downstream_rq_xx` | HTTP status codes (note: no `_total` suffix for this metric family) | +| `envoy_http_downstream_rq_total` | Total HTTP requests received | +| `envoy_http_downstream_rq_time_bucket` | Downstream request latency histogram | +| `envoy_http_downstream_rq_time_sum` | Sum of downstream request durations | +| `envoy_http_downstream_rq_time_count` | Count of downstream requests | + +[router-ref]: #router-ref +[observability-documentation]: /spaces/howtos/observability +[opentelemetry-collector]: https://opentelemetry.io/docs/collector/ +[opentelemetry-operator]: https://opentelemetry.io/docs/kubernetes/operator/ +[helm-chart-reference]: /reference/helm-reference diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/spaces-management.md b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/spaces-management.md new file mode 100644 index 000000000..3df61c306 --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/spaces-management.md @@ -0,0 +1,219 @@ +--- +title: Interacting with Disconnected Spaces +sidebar_position: 10 +description: Common operations in Spaces +--- + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Spaces management procedures are consistent across versions. + +For version compatibility details, see the . +::: + +## Spaces management + +### Create a Space + +To install an Upbound Space into a cluster, it's recommended you dedicate an entire Kubernetes cluster for the Space. You can use [up space init][up-space-init] to install an Upbound Space. Below is an example: + +```bash +up space init "v1.9.0" +``` +:::tip +For a full guide to get started with Spaces, read the [quickstart][quickstart] guide: +::: + +You can also install the helm chart for Spaces directly. In order for a Spaces install to succeed, you must install some prerequisites first and configure them. This includes: + +- UXP +- provider-helm and provider-kubernetes +- cert-manager + +Furthermore, the Spaces chart requires a pull secret, which Upbound must provide to you. + +```bash +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "v1.9.0" \ + --set "ingress.host=your-host.com" \ + --set "clusterType=eks" \ + --set "account=your-upbound-account" \ + --wait +``` +For a complete tutorial of the helm install, read one of the deployment guides for [AWS][aws], [Azure][azure] , or [GCP][gcp] which cover the step-by-step process. + +### Upgrade a Space + +To upgrade a Space from one version to the next, use [up space upgrade][up-space-upgrade]. Spaces supports upgrading from version `ver x.N.*` to version `ver x.N+1.*`. + +```bash +up space upgrade "v1.9.0" +``` + +You can also upgrade a Space by manually bumping the Helm chart version. Before +upgrading, review the release notes for any breaking changes or +special requirements: + +1. Review the release notes for the target version in the [Spaces Release Notes][spaces-release-notes] +2. Upgrade the Space by updating the helm chart version: + +```bash +helm -n upbound-system upgrade spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "v1.9.0" \ + --reuse-values \ + --wait +``` + +For major version upgrades or configuration changes, extract your current values +and adjust: + +```bash +# Extract current values to a file +helm -n upbound-system get values spaces > spaces-values.yaml + +# Upgrade with modified values +helm -n upbound-system upgrade spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "v1.9.0" \ + -f spaces-values.yaml \ + --wait +``` + +### Downgrade a Space + +To rollback a Space from one version to the previous, use [up space upgrade][up-space-upgrade-1]. Spaces supports downgrading from version `ver x.N.*` to version `ver x.N-1.*`. + +```bash +up space upgrade --rollback +``` + +You can also downgrade a Space manually using Helm by specifying an earlier version: + +```bash +helm -n upbound-system upgrade spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "v1.8.0" \ + --reuse-values \ + --wait +``` + +When downgrading, make sure to: +1. Check the [release notes][release-notes] for specific downgrade instructions +2. Verify compatibility between the downgraded Space and any control planes +3. Back up any critical data before proceeding + +### Uninstall a Space + +To uninstall a Space from a Kubernetes cluster, use [up space destroy][up-space-destroy]. A destroy operation uninstalls core components and orphans control planes and their associated resources. + +```bash +up space destroy +``` + +## Control plane management + +You can manage control planes in a Space via the [up CLI][up-cli] or the Spaces-local Kubernetes API. When you install a Space, it defines new a API type, `kind: Controlplane`, that you can use to create and manage control planes in the Space. + +### Create a control plane + +To create a control plane in a Space using `up`, run the following: + +```bash +up ctp create ctp1 +``` + +You can also declare a new control plane like the example below and apply it to your Spaces cluster: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: ctp1 + namespace: default +spec: + writeConnectionSecretToRef: + name: kubeconfig-ctp1 + namespace: default +``` + +This manifest: + +- Creates a new control plane in the space called `ctp1`. +- Publishes the kubeconfig to connect to the control plane to a secret in the Spaces cluster, called `kubeconfig-ctp1` + +### Connect to a control plane + +To connect to a control plane in a Space using `up`, run the following: + +```bash +up ctp connect new-control-plane +``` + +The command changes your kubeconfig's current context to the control plane you specify. If you want to change your kubeconfig back to a previous context, run: + +```bash +up ctp disconnect +``` + +If you configured your control plane to publish connection details, you can also access it this way. Once the control plane is ready, use the secret (containing connection details) to connect to the API server of your control plane. + +```bash +kubectl get secret -n default -o jsonpath='{.data.kubeconfig}' | base64 -d > /tmp/.yaml +``` + +Reference the kubeconfig whenever you want to interact directly with the API server of the control plane (vs the Space's API server): + +```bash +kubectl get providers --kubeconfig=/tmp/.yaml +``` + +### Configure a control plane + +Spaces offers a built-in feature that allows you to connect a control plane to a Git source. This experience is like when a control plane runs in [Upbound's SaaS environment][upbound-s-saas-environment]. Upbound recommends using the built-in Git integration to drive configuration of your control planes in a Space. + +Learn more in the [Spaces Git integration][spaces-git-integration] documentation. + +### List control planes + +To list all control planes in a Space using `up`, run the following: + +```bash +up ctp list +``` + +Or you can use Kubernetes-style semantics to list the control plane: + +```bash +kubectl get controlplanes +``` + + +### Delete a control plane + +To delete a control plane in a Space using `up`, run the following: + +```bash +up ctp delete ctp1 +``` + +Or you can use Kubernetes-style semantics to delete the control plane: + +```bash +kubectl delete controlplane ctp1 +``` + + +[up-space-init]: /reference/cli-reference +[quickstart]: / +[aws]: /spaces/howtos/self-hosted/self-hosted-spaces-deployment +[azure]:/spaces/howtos/self-hosted/self-hosted-spaces-deployment +[gcp]:/spaces/howtos/self-hosted/self-hosted-spaces-deployment +[up-space-upgrade]: /reference/cli-reference +[spaces-release-notes]: /reference/release-notes/spaces +[up-space-upgrade-1]: /reference/cli-reference +[release-notes]: /reference/release-notes/spaces +[up-space-destroy]: /reference/cli-reference +[up-cli]: /reference/cli-reference +[upbound-s-saas-environment]: /spaces/howtos/self-hosted/spaces-management +[spaces-git-integration]: /spaces/howtos/self-hosted/gitops diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/troubleshooting.md b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/troubleshooting.md new file mode 100644 index 000000000..8d1ca6517 --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/troubleshooting.md @@ -0,0 +1,132 @@ +--- +title: Troubleshooting +sidebar_position: 100 +description: A guide for troubleshooting an issue that occurs in a Space +--- + +Find guidance below on how to find solutions for issues you encounter when deploying and using an Upbound Space. Use the tips below as a supplement to the observability metrics discussed in the [Observability][observability] page. + +## General tips + +Most issues fall into two general categories: + +1. issues with the Spaces management plane +2. issues on a control plane + +If your control plane doesn't reach a `Ready` state, it's indicative of the former. If your control plane is in a created and running state, but resources aren't reconciling, it's indicative of the latter. + +### Spaces component layout + +Run `kubectl get pods -A` against the cluster hosting a Space. You should see a variety of pods across several namespaces. It should look something like this: + +```bash +NAMESPACE NAME READY STATUS RESTARTS AGE +cert-manager cert-manager-6d6769565c-mc5df 1/1 Running 0 25m +cert-manager cert-manager-cainjector-744bb89575-nw4fg 1/1 Running 0 25m +cert-manager cert-manager-webhook-759d6dcbf7-ps4mq 1/1 Running 0 25m +ingress-nginx ingress-nginx-controller-7f8ccfccc6-6szlp 1/1 Running 0 25m +kube-system coredns-5d78c9869d-4p477 1/1 Running 0 26m +kube-system coredns-5d78c9869d-pdxt6 1/1 Running 0 26m +kube-system etcd-kind-control-plane 1/1 Running 0 26m +kube-system kindnet-8s7pq 1/1 Running 0 26m +kube-system kube-apiserver-kind-control-plane 1/1 Running 0 26m +kube-system kube-controller-manager-kind-control-plane 1/1 Running 0 26m +kube-system kube-proxy-l68r8 1/1 Running 0 26m +kube-system kube-scheduler-kind-control-plane 1/1 Running 0 26m +local-path-storage local-path-provisioner-6bc4bddd6b-qsdjt 1/1 Running 0 26m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system coredns-5dc69d6447-f56rh-x-kube-system-x-vcluster 1/1 Running 0 21m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system crossplane-6b6d67bc66-6b8nx-x-upbound-system-x-vcluster 1/1 Running 0 20m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system crossplane-rbac-manager-78f6fc7cb4-pjkhc-x-upbound-s-12253c3c4e 1/1 Running 0 20m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system kube-state-metrics-7f8f4dcc5b-8p8c4 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system mxp-gateway-68f546b9c8-xnz5j-x-upbound-system-x-vcluster 1/1 Running 0 20m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system mxp-ksm-config-54655667bb-hv9br 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system mxp-readyz-5f7f97d967-b98bw 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system otlp-collector-56d7d46c8d-g5sh5-x-upbound-system-x-vcluster 1/1 Running 0 20m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-67c9fb8959-ppb2m 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-api-6bfbccc49d-ffgpj 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-controller-7cc6855656-8c46b 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-etcd-0 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vector-754b494b84-wljw4 1/1 Running 0 22m +mxp-system mxp-charts-chartmuseum-7587f77558-8tltb 1/1 Running 0 23m +upbound-system crossplane-b4dc7b4c9-6hjh5 1/1 Running 0 25m +upbound-system crossplane-contrib-provider-helm-ce18dd03e6e4-7945d8985-4gcwr 1/1 Running 0 24m +upbound-system crossplane-contrib-provider-kubernetes-1f1e32c1957d-577756gs2x4 1/1 Running 0 24m +upbound-system crossplane-rbac-manager-d8cb49cbc-gbvvf 1/1 Running 0 25m +upbound-system spaces-controller-6647677cf9-5zl5q 1/1 Running 0 24m +upbound-system spaces-router-bc78c96d7-kzts2 2/2 Running 0 24m +``` + +What you are seeing is: + +- Pods in the `upbound-system` namespace are components required to run the management plane of the Space. This includes the `spaces-controller`, `spaces-router`, and install of UXP. +- Pods in the `mxp-{GUID}-system` namespace are components that collectively power a control plane. Notable call outs include pod names that look like `vcluster-api-{GUID}` and `vcluster-controller-{GUID}`, which are integral components of a control plane. +- Pods in other notable namespaces, including `cert-manager` and `ingress-nginx`, are prerequisite components that support a Space's successful operation. + + + +### Troubleshooting tips for the Spaces management plane + +Start by getting the status of all the pods in a Space: + +1. Make sure the current context of your kubeconfig points at the Kubernetes cluster hosting your Space +2. Get the status of all the pods in the Space: +```bash +kubectl get pods -A +``` +3. Scan the `Status` column to see if any of the pods report a status besides `Running`. +4. Scan the `Restarts` column to see if any of the pods have restarted. +5. If you notice a Status other than `Running` or see pods that restarted, you should investigate their events by running +```bash +kubectl describe pod -n +``` + +Next, inspect the status of objects and releases: + +1. Make sure the current context of your kubeconfig points at the Kubernetes cluster hosting your Space +2. Inspect the objects in your Space. If any are unhealthy, describe those objects to get the events: +```bash +kubectl get objects +``` +3. Inspect the releases in your Space. If any are unhealthy, describe those releases to get the events: +```bash +kubectl get releases +``` + +### Troubleshooting tips for control planes in a Space + +General troubleshooting in a control plane starts by fetching the events of the control plane: + +1. Make sure the current context of your kubeconfig points at the Kubernetes cluster hosting your Space +2. Run the following to fetch your control planes. +```bash +kubectl get ctp +``` +3. Describe the control plane by providing its name, found in the preceding instruction. +```bash +kubectl describe controlplanes.spaces.upbound.io +``` + +## Issues + + +### Your control plane is stuck in a 'creating' state + +#### Error: unknown field "ports" in io.k8s.api.networking.v1.NetworkPolicySpec + +This error is emitted by a Helm release named `control-plane-host-policies` attempting to be installed by the Spaces software. The full error is: + +_CannotCreateExternalResource failed to install release: unable to build kubernetes objects from release manifest: error validating "": error validating data: ValidationError(NetworkPolicy.spec): unknown field "ports" in io.k8s.api.networking.v1.NetworkPolicySpec_ + +This error may be caused by running a Space on an earlier version of Kubernetes than is supported (`v1.26 or later`). To resolve this issue, upgrade the host Kubernetes cluster version to 1.25 or later. + +### Your Spaces install fails + +#### Error: You tried to install a Space on a previous Crossplane installation + +If you try to install a Space on an existing cluster that previously had Crossplane or UXP on it, you may encounter errors. Due to how the Spaces installer tests for the presence of UXP, it may detect orphaned CRDs that weren't cleaned up by the previous uninstall of Crossplane. You may need to manually [remove old Crossplane CRDs][remove-old-crossplane-crds] for the installer to properly detect the UXP prerequisite. + + + + +[observability]: /spaces/howtos/observability +[remove-old-crossplane-crds]: https://docs.crossplane.io/latest/guides/uninstall-crossplane/ diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/use-argo.md b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/use-argo.md new file mode 100644 index 000000000..d58f7db44 --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/use-argo.md @@ -0,0 +1,228 @@ +--- +title: Use ArgoCD Plugin +sidebar_position: 15 +description: A guide for integrating Argo with control planes in a Space. +aliases: + - /all-spaces/self-hosted-spaces/use-argo + - /deploy/disconnected-spaces/use-argo-flux + - /all-spaces/self-hosted-spaces/use-argo-flux + - /connect/use-argo +--- + + +:::info API Version Information +This guide covers the ArgoCD Plugin, a preview feature available in all supported versions (v1.9-v1.14+) for self-hosted deployments. + +For details on GitOps patterns and related features across versions, see [Automation and GitOps Overview](/spaces/howtos/automation-and-gitops/overview/). +::: + +:::important +This feature is in preview and is off by default. To enable, set `features.alpha.argocdPlugin.enabled=true` when installing Spaces: + +```bash +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "features.alpha.argocdPlugin.enabled=true" +``` +::: + +Spaces provides an optional plugin to assist with integrating a control plane in a Space with Argo CD. You must enable the plugin for the entire Space at Spaces install or upgrade time. The plugin's job is to propagate the connection details of each control plane in a Space to Argo CD. By default, Upbound stores these connection details in a Kubernetes secret named after the control plane. To run Argo CD across multiple namespaces, Upbound recommends enabling the `features.alpha.argocdPlugin.useUIDFormatForCTPSecrets` flag to use a UID-based format for secret names to avoid conflicts. + +:::tip +For general guidance on integrating Upbound with GitOps flows, see [GitOps with Control Planes][gitops-with-control-planes]. +::: + +## On cluster Argo CD + +If you are running Argo CD on the same cluster as the Space, run the following to enable the plugin: + + + + + + +```bash {hl_lines="3-4"} +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "features.alpha.argocdPlugin.enabled=true" \ + --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ + --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" +``` + + + + + +```bash {hl_lines="7-8"} +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "features.alpha.argocdPlugin.enabled=true" \ + --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ + --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ + --wait +``` + + + + + + +The important flags are: + +- `features.alpha.argocdPlugin.enabled=true` +- `features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true` +- `features.alpha.argocdPlugin.target.secretNamespace=argocd` + +The first flag enables the feature and the second indicates the namespace on the cluster where you installed Argo CD. + +Be sure to [configure Argo][configure-argo] after it's installed. + +## External cluster Argo CD + +If you are running Argo CD on an external cluster from where you installed your Space, you need to provide some extra flags: + + + + + + +```bash {hl_lines="3-7"} +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "features.alpha.argocdPlugin.enabled=true" \ + --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ + --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ + --set "features.alpha.argocdPlugin.target.externalCluster.enabled=true" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig" +``` + + + + + +```bash {hl_lines="7-11"} +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "features.alpha.argocdPlugin.enabled=true" \ + --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ + --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ + --set "features.alpha.argocdPlugin.target.externalCluster.enabled=true" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig" \ + --wait +``` + + + + + +```bash +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "features.alpha.argocdPlugin.enabled=true" \ + --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ + --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ + --set "features.alpha.argocdPlugin.target.externalCluster.enabled=true" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig" \ + --wait +``` + +The extra flags are: + +- `features.alpha.argocdPlugin.target.externalCluster.enabled=true` +- `features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true` +- `features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster` +- `features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig` + +These flags tell the plugin (running in Spaces) where your Argo CD instance is. After you've done this at install-time, you also need to create a `Secret` on the Spaces cluster. This secret must contain a kubeconfig pointing to your Argo CD instance. The secret needs to be in the same namespace as the `spaces-controller`, which is `upbound-system`. + +Once you enable the plugin and configure it, the plugin automatically propagates connection details for your control planes to your Argo CD instance. You can then target the control plane and use Argo to sync Crossplane-related objects to it. + +Be sure to [configure Argo][configure-argo-1] after it's installed. + +## Configure Argo + +Argo's default configuration causes it to try to query for resource kinds that don't exist in control planes. You should configure Argo's [general configmap][general-configmap] to include the resource group/kinds which make sense in the context of control planes. example, the concept of `nodes` isn't exposed in control planes. + +To configure Argo CD, connect to the cluster where you've installed it and edit the configmap: + +```bash +kubectl edit configmap argocd-cm -n argocd +``` + +Adjust the resource inclusions and exclusions under the `data` field of the configmap: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-cm + namespace: argocd +data: + resource.exclusions: | + - apiGroups: + - "*" + kinds: + - "*" + clusters: + - "*" + resource.inclusions: | + - apiGroups: + - "*" + kinds: + - Provider + - Configuration + clusters: + - "*" +``` + +The preceding configuration causes Argo to exclude syncing **all** resource group/kinds--except Crossplane `providers` and `configurations`--for **all** control planes. You're encouraged to adjust the `resource.inclusions` to include the types that make sense for your control plane, such as an `XRD` you've built with Crossplane. You're also encouraged to customize the `clusters` pattern to selectively apply these exclusions/inclusions to control planes (for example, `control-plane-prod-*`). + +## Control plane connection secrets + +To deploy control planes through Argo CD, you need to configure the `writeConnectionSecretToRef` field in your control plane spec. This field specifies where to store the control plane's `kubeconfig` and makes connection details available to Argo CD. + +### Basic Configuration + +In your control plane manifest, include the `writeConnectionSecretToRef` field: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: my-control-plane + namespace: my-control-plane-group +spec: + writeConnectionSecretToRef: + name: kubeconfig-my-control-plane + namespace: my-control-plane-group + # ... other control plane configuration +``` + +### Parameters + +The `writeConnectionSecretToRef` field requires two parameters: + +- `name`: A unique name for the secret containing the kubeconfig (`kubeconfig-my-control-plane`) +- `namespace`: The Kubernetes namespace where you store the secret, which must match the metadata namespace. The system copies it into the `argocd` namespace when you set the `features.alpha.argocdPlugin.target.secretNamespace=argocd` configuration parameter. + +Control plane labels automatically propagate to the connection secret, which allows you to use label selectors in Argo CD for automated discovery and management. + +This configuration enables Argo CD to automatically discover and manage resources on your control planes. + + +[gitops-with-control-planes]: /spaces/howtos/cloud-spaces/gitops +[configure-argo]: #configure-argo +[configure-argo-1]: #configure-argo +[general-configmap]: https://argo-cd.readthedocs.io/en/stable/operator-manual/argocd-cm-yaml/ diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/workload-id/_category_.json b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/workload-id/_category_.json new file mode 100644 index 000000000..c5ecc93f6 --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/workload-id/_category_.json @@ -0,0 +1,11 @@ +{ + "label": "Workload Identity Configuration", + "position": 2, + "collapsed": true, + "customProps": { + "plan": "business" + } + +} + + diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/workload-id/backup-restore-config.md b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/workload-id/backup-restore-config.md new file mode 100644 index 000000000..935ca69ec --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/workload-id/backup-restore-config.md @@ -0,0 +1,384 @@ +--- +title: Backup and Restore Workload ID +weight: 1 +description: Configure workload identity for Spaces Backup and Restore +--- +import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; + + + + + + + +Workload-identity authentication lets you use access policies to grant temporary +AWS credentials to your Kubernetes pod with a service account. Assigning IAM roles and service accounts allows the pod to assume the IAM role dynamically and much more securely than static credentials. + +This guide walks you through creating an IAM trust role policy and applying it +to your EKS cluster to handle backup and restore storage. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary Azure credentials to your Kubernetes pod based on +a service account. Assigning managed identities and service accounts allows the pod to +authenticate with Azure resources dynamically and much more securely than static credentials. + +This guide walks you through creating a managed identity and federated credential for your AKS +cluster to handle backup and restore storage. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary GCP credentials to your Kubernetes pod based on +a service account. Assigning IAM roles and service accounts allows the pod to +access cloud resources dynamically and much more securely than static credentials. + +This guide walks you through configuring workload identity for your GKE +cluster to handle backup and restore storage. + + + +## Prerequisites + + +To set up a workload-identity, you'll need: + + +- A self-hosted Space cluster +- Administrator access in your cloud provider +- Helm and `kubectl` + +## About the backup and restore component + +The `mxp-controller` component handles backup and restore workloads. It needs to +access your cloud storage to store and retrieve backups. By default, this +component runs in each control plane's host namespace. + +## Configuration + + + +Upbound supports workload-identity configurations in AWS with IAM Roles for +Service Accounts and EKS pod identity association. + +#### IAM Roles for Service Accounts (IRSA) + +With IRSA, you can associate a Kubernetes service account in an EKS cluster with +an AWS IAM role. Upbound authenticates workloads with that service account as +the IAM role using temporary credentials instead of static role credentials. +IRSA relies on AWS `AssumeRoleWithWebIdentity` `STS` to exchange OIDC ID tokens with +the IAM role's temporary credentials. IRSA uses the `eks.amazon.aws/role-arn` +annotation to link the service account and the IAM role. + +First, create an IAM role with appropriate permissions to access your S3 bucket: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:ListBucket", + "s3:DeleteObject" + ], + "Resource": [ + "arn:aws:s3:::${YOUR_BACKUP_BUCKET}", + "arn:aws:s3:::${YOUR_BACKUP_BUCKET}/*" + ] + } + ] +} +``` + +Next, ensure your EKS cluster has an OIDC identity provider: + +```shell +eksctl utils associate-iam-oidc-provider --cluster ${YOUR_CLUSTER_NAME} --approve +``` + +Configure the IAM role trust policy with the namespace for each +provisioned control plane. + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:oidc-provider/${YOUR_OIDC_PROVIDER}" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + "${YOUR_OIDC_PROVIDER}:aud": "sts.amazonaws.com", + "${YOUR_OIDC_PROVIDER}:sub": "system:serviceaccount:${YOUR_NAMESPACE}:mxp-controller" + } + } + } + ] +} +``` + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the Backup and Restore component: + +```shell +--set controlPlanes.mxpController.serviceAccount.annotations."eks\.amazonaws\.com/role-arn"="${SPACES_BR_IAM_ROLE_ARN}" +``` + +This command allows the backup and restore component to authenticate with your +dedicated IAM role in your EKS cluster environment. + +#### EKS pod identities + +Upbound also supports EKS Pod Identity configuration. EKS Pod Identities allow +you to create a pod identity association with your Kubernetes namespace, a +service account, and an IAM role, which allows the EKS control plane to +automatically handle the credential exchange. + +First, create an IAM role with appropriate permissions to access your S3 bucket: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:ListBucket", + "s3:DeleteObject" + ], + "Resource": [ + "arn:aws:s3:::${YOUR_BACKUP_BUCKET}", + "arn:aws:s3:::${YOUR_BACKUP_BUCKET}/*" + ] + } + ] +} +``` + +When you install or upgrade your Space with Helm, add the backup/restore values: + +```shell +helm upgrade spaces spaces-helm-chart \ + --set "billing.enabled=true" \ + --set "backup.enabled=true" \ + --set "backup.storage.provider=aws" \ + --set "backup.storage.aws.region= ${YOUR_AWS_REGION}" \ + --set "backup.storage.aws.bucket= ${YOUR_BACKUP_BUCKET}" +``` + +After Upbound provisions your control plane, create a Pod Identity Association +with the `aws` CLI: + +```shell +aws eks create-pod-identity-association \ + --cluster-name ${YOUR_CLUSTER_NAME} \ + --namespace ${YOUR_CONTROL_PLANE_NAMESPACE} \ + --service-account mxp-controller \ + --role-arn arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/backup-restore-role +``` + + + + + +Upbound supports workload-identity configurations in Azure with Azure's built-in +workload identity feature. + +#### Prepare your cluster + +First, enable the OIDC issuer and workload identity in your AKS cluster: + +```shell +az aks update --resource-group ${YOUR_RESOURCE_GROUP} --name ${YOUR_AKS_CLUSTER_NAME} --enable-oidc-issuer --enable-workload-identity +``` + +Next, find and store the OIDC issuer URL as an environment variable: + +```shell +export AKS_OIDC_ISSUER="$(az aks show --name ${YOUR_AKS_CLUSTER_NAME} --resource-group ${YOUR_RESOURCE_GROUP} --query "oidcIssuerProfile.issuerUrl" --output tsv)" +``` + +#### Create a User-Assigned Managed Identity + +Create a new managed identity to associate with the backup and restore component: + +```shell +az identity create --name backup-restore-identity --resource-group ${YOUR_RESOURCE_GROUP} --location ${YOUR_LOCATION} +``` + +Retrieve the client ID and store it as an environment variable: + +```shell +export USER_ASSIGNED_CLIENT_ID="$(az identity show --name backup-restore-identity --resource-group ${YOUR_RESOURCE_GROUP} --query clientId -otsv)" +``` + +Grant the managed identity you created to access your Azure Storage account: + +```shell +az role assignment create \ + --role "Storage Blob Data Contributor" \ + --assignee ${USER_ASSIGNED_CLIENT_ID} \ + --scope /subscriptions/${YOUR_SUBSCRIPTION_ID}/resourceGroups/${YOUR_RESOURCE_GROUP}/providers/Microsoft.Storage/storageAccounts/${YOUR_STORAGE_ACCOUNT} +``` + +#### Apply the managed identity role + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the backup and restore component: + +```shell +--set controlPlanes.mxpController.serviceAccount.annotations."azure\.workload\.identity/client-id"="${YOUR_USER_ASSIGNED_CLIENT_ID}" +--set controlPlanes.mxpController.pod.customLabels."azure\.workload\.identity/use"="true" +``` + +#### Create a Federated Identity credential + +```shell +az identity federated-credential create \ + --name backup-restore-federated-identity \ + --identity-name backup-restore-identity \ + --resource-group ${YOUR_RESOURCE_GROUP} \ + --issuer ${AKS_OIDC_ISSUER} \ + --subject system:serviceaccount:${YOUR_CONTROL_PLANE_NAMESPACE}:mxp-controller +``` + + + + + +Upbound supports workload-identity configurations in GCP with IAM principal +identifiers and service account impersonation. + +#### Prepare your cluster + +First, enable Workload Identity Federation on your GKE cluster: + +```shell +gcloud container clusters update ${YOUR_CLUSTER_NAME} \ + --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ + --region=${YOUR_REGION} +``` + +#### Create a Google Service Account + +Create a service account for the backup and restore component: + +```shell +gcloud iam service-accounts create backup-restore-sa \ + --display-name "Backup Restore Service Account" \ + --project ${YOUR_PROJECT_ID} +``` + +Grant the service account access to your Google Cloud Storage bucket: + +```shell +gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ + --member "serviceAccount:backup-restore-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" \ + --role "roles/storage.objectAdmin" +``` + +#### Configure Workload Identity + +Create an IAM binding to grant the Kubernetes service account access to the Google service account: + +```shell +gcloud iam service-accounts add-iam-policy-binding \ + backup-restore-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com \ + --role roles/iam.workloadIdentityUser \ + --member "serviceAccount:${YOUR_PROJECT_ID}.svc.id.goog[${YOUR_CONTROL_PLANE_NAMESPACE}/mxp-controller]" +``` + +#### Apply the service account configuration + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the backup and restore component: + +```shell +--set controlPlanes.mxpController.serviceAccount.annotations."iam\.gke\.io/gcp-service-account"="backup-restore-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" +``` + + + +## Verify your configuration + +After you apply the configuration use `kubectl` to verify the service account +has the correct annotation: + +```shell +kubectl get serviceaccount mxp-controller -n ${YOUR_CONTROL_PLANE_NAMESPACE} -o yaml +``` + +Verify the `mxp-controller` pod is running: + +```shell +kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep mxp-controller +``` + +## Restart workload + +You must manually restart a workload's pod when you add the workload identity annotations to the running pod's service account. + + + +This restart enables the EKS pod identity webhook to inject the necessary +environment for using IRSA. + + + + + +This restart enables the workload identity webhook to inject the necessary +environment for using Azure workload identity. + + + + + +This restart enables the workload identity webhook to inject the necessary +environment for using GCP workload identity. + + + +```shell +kubectl rollout restart deployment mxp-controller -n ${YOUR_CONTROL_PLANE_NAMESPACE} +``` + +## Use cases + + +Configuring backup and restore with workload identity eliminates the need for +static credentials in your cluster and the overhead of credential rotation. +These benefits are helpful in: + +* Disaster recovery scenarios +* Control plane migration +* Compliance requirements +* Rollbacks after unsuccessful upgrades + +## Next steps + +Now that you have a workload identity configured for the backup and restore +component, visit the [Backup Configuration][backup-restore-guide] documentation. + +Other workload identity guides are: +* [Billing][billing] +* [Shared Secrets][secrets] + +[backup-restore-guide]: /spaces/howtos/backup-and-restore +[billing]: /spaces/howtos/self-hosted/workload-id/billing-config +[secrets]: /spaces/howtos/self-hosted/workload-id/eso-config diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/workload-id/billing-config.md b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/workload-id/billing-config.md new file mode 100644 index 000000000..323a6122f --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/workload-id/billing-config.md @@ -0,0 +1,454 @@ +--- +title: Billing Workload ID +weight: 1 +description: Configure workload identity for Spaces Billing +--- +import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; + + + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary AWS credentials to your Kubernetes pod based on +a service account. Assigning IAM roles and service accounts allows the pod to +assume the IAM role dynamically and much more securely than static credentials. + +This guide walks you through creating an IAM trust role policy and applying it to your EKS +cluster for billing in your Space cluster. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary Azure credentials to your Kubernetes pod based on +a service account. Assigning managed identities and service accounts allows the pod to +authenticate with Azure resources dynamically and much more securely than static credentials. + +This guide walks you through creating a managed identity and federated credential for your AKS +cluster for billing in your Space cluster. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary GCP credentials to your Kubernetes pod based on +a service account. Assigning IAM roles and service accounts allows the pod to +access cloud resources dynamically and much more securely than static +credentials. + +This guide walks you through configuring workload identity for your GKE +cluster's billing component. + + + +## Prerequisites + + +To set up a workload-identity, you'll need: + + +- A self-hosted Space cluster +- Administrator access in your cloud provider +- Helm and `kubectl` + +## About the billing component + +The `vector.dev` component handles billing metrics collection in spaces. It +stores account data in your cloud storage. By default, this component runs in +each control plane's host namespace. + +## Configuration + + + +Upbound supports workload-identity configurations in AWS with IAM Roles for +Service Accounts and EKS pod identity association. + +#### IAM Roles for Service Accounts (IRSA) + +With IRSA, you can associate a Kubernetes service account in an EKS cluster with +an AWS IAM role. Upbound authenticates workloads with that service account as +the IAM role using temporary credentials instead of static role credentials. +IRSA relies on AWS `AssumeRoleWithWebIdentity` `STS` to exchange OIDC ID tokens with +the IAM role's temporary credentials. IRSA uses the `eks.amazon.aws/role-arn` +annotation to link the service account and the IAM role. + +**Create an IAM role and trust policy** + +First, create an IAM role appropriate permissions to access your S3 bucket: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:ListBucket", + "s3:DeleteObject" + ], + "Resource": [ + "arn:aws:s3:::${YOUR_BILLING_BUCKET}", + "arn:aws:s3:::${YOUR_BILLING_BUCKET}/*" + ] + } + ] +} +``` + +You must configure the IAM role trust policy with the exact match for each +provisioned control plane. An example of a trust policy for a single control +plane is below: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:oidc-provider/${YOUR_OIDC_PROVIDER}" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + ":aud": "sts.amazonaws.com", + ":sub": "system:serviceaccount:${YOUR_NAMESPACE}:vector" + } + } + } + ] +} +``` + +**Configure the EKS OIDC provider** + +Next, ensure your EKS cluster has an OIDC identity provider: + +```shell +eksctl utils associate-iam-oidc-provider --cluster ${YOUR_CLUSTER_NAME} --approve +``` + +**Apply the IAM role** + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the Billing component: + +```shell +--set "billing.enabled=true" +--set "billing.storage.provider=aws" +--set "billing.storage.aws.region=${YOUR_AWS_REGION}" +--set "billing.storage.aws.bucket=${YOUR_BILLING_BUCKET}" +--set "billing.storage.secretRef.name=" +--set controlPlanes.vector.serviceAccount.customAnnotations."eks\.amazonaws\.com/role-arn"="arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_BILLING_ROLE_NAME}" +``` + +:::important +You **must** set the `billing.storage.secretRef.name` to an empty string to +enable workload identity for the billing component +::: + +#### EKS pod identities + +Upbound also supports EKS Pod Identity configuration. EKS Pod Identities allow +you to create a pod identity association with your Kubernetes namespace, a +service account, and an IAM role, which allows the EKS control plane to +automatically handle the credential exchange. + +**Create an IAM role** + +First, create an IAM role appropriate permissions to access your S3 bucket: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:ListBucket" + ], + "Resource": [ + "arn:aws:s3:::${YOUR_BILLING_BUCKET}", + "arn:aws:s3:::${YOUR_BILLING_BUCKET}/*" + ] + } + ] +} +``` + +**Configure your Space with Helm** + +When you install or upgrade your Space with Helm, add the billing values: + +```shell +helm upgrade spaces spaces-helm-chart \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=aws" \ + --set "billing.storage.aws.region=${YOUR_AWS_REGION}" \ + --set "billing.storage.aws.bucket=${YOUR_BILLING_BUCKET}" \ + --set "billing.storage.secretRef.name=" +``` + +**Create a Pod Identity Association** + +After Upbound provisions your control plane, create a Pod Identity Association +with the `aws` CLI: + +```shell +aws eks create-pod-identity-association \ + --cluster-name ${YOUR_CLUSTER_NAME} \ + --namespace ${YOUR_CONTROL_PLANE_NAMESPACE} \ + --service-account vector \ + --role-arn arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_BILLING_ROLE_NAME} +``` + + + + + +Upbound supports workload-identity configurations in Azure with Azure's built-in +workload identity feature. + +First, enable the OIDC issuer and workload identity in your AKS cluster: + +```shell +az aks update --resource-group ${YOUR_RESOURCE_GROUP} --name ${YOUR_AKS_CLUSTER_NAME} --enable-oidc-issuer --enable-workload-identity +``` + +Next, find and store the OIDC issuer URL as an environment variable: + +```shell +export AKS_OIDC_ISSUER="$(az aks show --name ${YOUR_AKS_CLUSTER_NAME} --resource-group ${YOUR_RESOURCE_GROUP} --query "oidcIssuerProfile.issuerUrl" --output tsv)" +``` + +Create a new managed identity to associate with the billing component: + +```shell +az identity create --name billing-identity --resource-group ${YOUR_RESOURCE_GROUP} --location ${YOUR_LOCATION} +``` + +Retrieve the client ID and store it as an environment variable: + +```shell +export USER_ASSIGNED_CLIENT_ID="$(az identity show --name billing-identity --resource-group ${YOUR_RESOURCE_GROUP} --query clientId -otsv)" +``` + +Grant the managed identity you created to access your Azure Storage account: + +```shell +az role assignment create --role "Storage Blob Data Contributor" --assignee $USER_ASSIGNED_CLIENT_ID --scope /subscriptions/${YOUR_SUBSCRIPTION_ID}/resourceGroups/${YOUR_RESOURCE_GROUP}/providers/Microsoft.Storage/storageAccounts/${YOUR_STORAGE_ACCOUNT} +``` + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the billing component: + +```shell +--set "billing.enabled=true" +--set "billing.storage.provider=azure" +--set "billing.storage.azure.storageAccount=${SPACES_BILLING_STORAGE_ACCOUNT}" +--set "billing.storage.azure.container=${SPACES_BILLING_STORAGE_CONTAINER}" +--set "billing.storage.secretRef.name=" +--set controlPlanes.vector.serviceAccount.customAnnotations."azure\.workload\.identity/client-id"="${SPACES_BILLING_APP_ID}" +--set controlPlanes.vector.pod.customLabels."azure\.workload\.identity/use"="true" +``` + +Create a federated credential to establish trust between the managed identity +and your AKS OIDC provider: + +```shell +az identity federated-credential create \ + --name billing-federated-identity \ + --identity-name billing-identity \ + --resource-group ${YOUR_RESOURCE_GROUP} \ + --issuer ${AKS_OIDC_ISSUER} \ + --subject system:serviceaccount:${YOUR_CONTROL_PLANE_NAMESPACE}:vector +``` + + + + + +Upbound supports workload-identity configurations in GCP with IAM principal +identifiers or service account impersonation. + +#### IAM principal identifiers + +IAM principal identifiers allow you to grant permissions directly to +Kubernetes service accounts without additional annotation. Upbound recommends +this approach for ease-of-use and flexibility. + +First, enable Workload Identity Federation on your GKE cluster: + +```shell +gcloud container clusters update ${YOUR_CLUSTER_NAME} \ + --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ + --region=${YOUR_REGION} +``` + +Next, configure your Spaces installation with the Spaces Helm chart parameters: + +```shell +--set "billing.enabled=true" +--set "billing.storage.provider=gcp" +--set "billing.storage.gcp.bucket=${YOUR_BILLING_BUCKET}" +--set "billing.storage.secretRef.name=" +``` + +:::important +You **must** set the `billing.storage.secretRef.name` to an empty string to +enable workload identity for the billing component. +::: + +Grant the necessary permissions to your Kubernetes service account: + +```shell +gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ + --member="principalSet://iam.googleapis.com/projects/${YOUR_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${YOUR_PROJECT_ID}.svc.id.goog/attribute.kubernetes_namespace/${YOUR_CONTROL_PLANE_NAMESPACE}/attribute.kubernetes_service_account/vector" \ + --role="roles/storage.objectAdmin" +``` + +Enable uniform bucket-level access on your storage bucket: + +```shell +gcloud storage buckets update gs://${YOUR_BILLING_BUCKET} --uniform-bucket-level-access +``` + +#### Service account impersonation + +Service account impersonation allows you to link a Kubernetes service account to +a GCP service account. The Kubernetes service account assumes the permissions of +the GCP service account you specify. + +Enable workload id federation on your GKE cluster: + +```shell +gcloud container clusters update ${YOUR_CLUSTER_NAME} \ + --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ + --region=${YOUR_REGION} +``` + +Next, create a dedicated service account for your billing operations: + +```shell +gcloud iam service-accounts create billing-sa \ + --project=${YOUR_PROJECT_ID} +``` + +Grant storage permissions to the service account you created: + +```shell +gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ + --member="serviceAccount:billing-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" \ + --role="roles/storage.objectAdmin" +``` + +Link the Kubernetes service account to the GCP service account: + +```shell +gcloud iam service-accounts add-iam-policy-binding \ + billing-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com \ + --role="roles/iam.workloadIdentityUser" \ + --member="serviceAccount:${YOUR_PROJECT_ID}.svc.id.goog[${YOUR_CONTROL_PLANE_NAMESPACE}/vector]" +``` + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the billing component: + +```shell +--set "billing.enabled=true" +--set "billing.storage.provider=gcp" +--set "billing.storage.gcp.bucket=${YOUR_BILLING_BUCKET}" +--set "billing.storage.secretRef.name=" +--set controlPlanes.vector.serviceAccount.customAnnotations."iam\.gke\.io/gcp-service-account"="billing-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" +``` + + + +## Verify your configuration + +After you apply the configuration use `kubectl` to verify the service account +has the correct annotation: + +```shell +kubectl get serviceaccount vector -n ${YOUR_CONTROL_PLANE_NAMESPACE} -o yaml +``` + +Verify the `vector` pod is running: + +```shell +kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep vector +``` + +## Restart workload + + + +You must manually restart a workload's pod when you add the +`eks.amazonaws.com/role-arn key` annotation to the running pod's service +account. + +This restart enables the EKS pod identity webhook to inject the necessary +environment for using IRSA. + + + + + +You must manually restart a workload's pod when you add the workload identity annotations to the running pod's service account. + +This restart enables the workload identity webhook to inject the necessary +environment for using Azure workload identity. + + + + + +GCP workload identity doesn't require pod restarts after configuration changes. +If you do need to restart the workload, use the `kubectl` command to force the +component restart: + + + +```shell +kubectl rollout restart deployment vector +``` + + +## Use cases + + +Using workload identity authentication for billing eliminates the need for static +credentials in your cluster as well as the overhead of credential rotation. +These benefits are helpful in: + +* Resource usage tracking across teams/projects +* Cost allocation for multi-tenant environments +* Financial auditing requirements +* Capacity billing and resource optimization +* Automated billing workflows + +## Next steps + +Now that you have workload identity configured for the billing component, visit +the [Billing guide][billing-guide] for more information. + +Other workload identity guides are: +* [Backup and restore][backuprestore] +* [Shared Secrets][secrets] + +[billing-guide]: /spaces/howtos/self-hosted/billing +[backuprestore]: /spaces/howtos/self-hosted/workload-id/backup-restore-config +[secrets]: /spaces/howtos/self-hosted/workload-id/eso-config diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/workload-id/eso-config.md b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/workload-id/eso-config.md new file mode 100644 index 000000000..c1418c171 --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/workload-id/eso-config.md @@ -0,0 +1,503 @@ +--- +title: Shared Secrets Workload ID +weight: 1 +description: Configure workload identity for Spaces Shared Secrets +--- +import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; + + + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary AWS credentials to your Kubernetes pod based on +a service account. Assigning IAM roles and service accounts allows the pod to +assume the IAM role dynamically and much more securely than static credentials. + +This guide walks you through creating an IAM trust role policy and applying it to your EKS +cluster for secret sharing with Kubernetes. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary Azure credentials to your Kubernetes pod based on +a service account. Assigning managed identities and service accounts allows the pod to +authenticate with Azure resources dynamically and much more securely than static credentials. + +This guide walks you through creating a managed identity and federated credential for your AKS +cluster for shared secrets in your Space cluster. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary GCP credentials to your Kubernetes pod based on +a service account. Assigning IAM roles and service accounts allows the pod to +access cloud resources dynamically and much more securely than static +credentials. + +This guide walks you through configuring workload identity for your GKE +cluster's Shared Secrets component. + + + +## Prerequisites + + +To set up a workload-identity, you'll need: + + +- A self-hosted Space cluster +- Administrator access in your cloud provider +- Helm and `kubectl` + + +## About the Shared Secrets component + + + + +The External Secrets Operator (ESO) runs in each control plane's host namespace as `external-secrets-controller`. It needs to access +your external secrets management service like AWS Secrets Manager. + +To configure your shared secrets workflow controller, you must: + +* Annotate the Kubernetes service account to associate it with a cloud-side + principal (such as an IAM role, service account, or enterprise application). The workload must then + use this service account. +* Label the workload (pod) to allow the injection of a temporary credential set, + enabling authentication. + + + + + +The External Secrets Operator (ESO) component runs in each control plane's host +namespace as `external-secrets-controller`. It synchronizes secrets from +external APIs into Kubernetes secrets. Shared secrets allow you to manage +credentials outside your Kubernetes cluster while making them available to your +application + + + + + +The External Secrets Operator (ESO) component runs in each control plane's host +namespace as `external-secrets-controller`. It synchronizes secrets from +external APIs into Kubernetes secrets. Shared secrets allow you to manage +credentials outside your Kubernetes cluster while making them available to your +application + + + +## Configuration + + + +Upbound supports workload-identity configurations in AWS with IAM Roles for +Service Accounts or EKS pod identity association. + +#### IAM Roles for Service Accounts (IRSA) + +With IRSA, you can associate a Kubernetes service account in an EKS cluster with +an AWS IAM role. Upbound authenticates workloads with that service account as +the IAM role using temporary credentials instead of static role credentials. +IRSA relies on AWS `AssumeRoleWithWebIdentity` `STS` to exchange OIDC ID tokens with +the IAM role's temporary credentials. IRSA uses the `eks.amazon.aws/role-arn` +annotation to link the service account and the IAM role. + +**Create an IAM role and trust policy** + +First, create an IAM role with appropriate permissions to access AWS Secrets Manager: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "secretsmanager:GetSecretValue", + "secretsmanager:DescribeSecret", + "ssm:GetParameter" + ], + "Resource": [ + "arn:aws:secretsmanager:${YOUR_REGION}:${YOUR_AWS_ACCOUNT_ID}:secret:${YOUR_SECRET_PREFIX}*", + "arn:aws:ssm:${YOUR_REGION}:${YOUR_AWS_ACCOUNT_ID}:parameter/${YOUR_PARAMETER_PREFIX}*" + ] + } + ] +} +``` + +You must configure the IAM role trust policy with the exact match for each +provisioned control plane. An example of a trust policy for a single control +plane is below: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:oidc-provider/${YOUR_OIDC_PROVIDER}" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + ":aud": "sts.amazonaws.com" + }, + "StringLike": { + ":sub": "system:serviceaccount:*:external-secrets-controller" + } + } + } + ] +} +``` + +**Configure the EKS OIDC provider** + +Next, ensure your EKS cluster has an OIDC identity provider: + +```shell +eksctl utils associate-iam-oidc-provider --cluster ${YOUR_CLUSTER_NAME} --approve +``` + +**Apply the IAM role** + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the shared secrets component: + +```yaml +--set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."eks\.amazonaws\.com/role-arn"="arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_ESO_ROLE_NAME}" +``` + +This command allows the shared secrets component to authenticate with your +dedicated IAM role in your EKS cluster environment. + +#### EKS pod identities + +Upbound also supports EKS Pod Identity configuration. EKS Pod Identities allow +you to create a pod identity association with your Kubernetes namespace, a +service account, and an IAM role, which allows the EKS control plane to +automatically handle the credential exchange. + +**Create an IAM role** + +First, create an IAM role with appropriate permissions to access AWS Secrets Manager: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "secretsmanager:GetSecretValue", + "secretsmanager:DescribeSecret", + "ssm:GetParameter" + ], + "Resource": [ + "arn:aws:secretsmanager:${YOUR_AWS_REGION}:${YOUR_AWS_ACCOUNT_ID}:secret:${YOUR_SECRET_PREFIX}*", + "arn:aws:ssm:${YOUR_AWS_REGION}:${YOUR_AWS_ACCOUNT_ID}:parameter/${YOUR_PARAMETER_PREFIX}*" + ] + } + ] +} +``` + +**Configure your Space with Helm** + +When you install or upgrade your Space with Helm, add the shared secrets value: + +```shell +helm upgrade spaces spaces-helm-chart \ + --set "sharedSecrets.enabled=true" +``` + +**Create a Pod Identity Association** + +After Upbound provisions your control plane, create a Pod Identity Association +with the `aws` CLI: + +```shell +aws eks create-pod-identity-association \ + --cluster-name ${YOUR_CLUSTER_NAME} \ + --namespace ${YOUR_CONTROL_PLANE_NAMESPACE} \ + --service-account external-secrets-controller \ + --role-arn arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_ROLE_NAME} +``` + + + + + +Upbound supports workload-identity configurations in Azure with Azure's built-in +workload identity feature. + +First, enable the OIDC issuer and workload identity in your AKS cluster: + +```shell +az aks update --resource-group ${YOUR_RESOURCE_GROUP} --name ${YOUR_AKS_CLUSTER_NAME} --enable-oidc-issuer --enable-workload-identity +``` + +Next, find and store the OIDC issuer URL as an environment variable: + +```shell +export AKS_OIDC_ISSUER="$(az aks show --name ${YOUR_AKS_CLUSTER_NAME} --resource-group ${YOUR_RESOURCE_GROUP} --query "oidcIssuerProfile.issuerUrl" --output tsv)" +``` + +Create a new managed identity to associate with the shared secrets component: + +```shell +az identity create --name secrets-identity --resource-group ${YOUR_RESOURCE_GROUP} --location ${YOUR_LOCATION} +``` + +Retrieve the client ID and store it as an environment variable: + +```shell +export USER_ASSIGNED_CLIENT_ID="$(az identity show --name secrets-identity --resource-group ${YOUR_RESOURCE_GROUP} --query clientId -otsv)" +``` + +Grant the managed identity you created to access your Azure Storage account: + +```shell +az keyvault set-policy --name ${YOUR_KEY_VAULT_NAME} \ + --resource-group ${YOUR_RESOURCE_GROUP} \ + --object-id $(az identity show --name secrets-identity --resource-group ${YOUR_RESOURCE_GROUP} --query principalId -otsv) \ + --secret-permissions get list +``` + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the shared secrets component: + +```shell +--set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."azure\.workload\.identity/client-id"="${USER_ASSIGNED_CLIENT_ID}" +--set controlPlanes.sharedSecrets.pod.customLabels."azure\.workload\.identity/use"="true" +``` + +Next, create a federated credential to establish trust between the managed identity +and your AKS OIDC provider: + +```shell +az identity federated-credential create \ + --name secrets-federated-identity \ + --identity-name secrets-identity \ + --resource-group ${YOUR_RESOURCE_GROUP} \ + --issuer ${AKS_OIDC_ISSUER} \ + --subject system:serviceaccount:${YOUR_CONTROL_PLANE_NAMESPACE}:external-secrets-controller +``` + + + + + +Upbound supports workload-identity configurations in GCP with IAM principal +identifiers or service account impersonation. + +#### IAM principal identifiers + +IAM principal identifiers allow you to grant permissions directly to +Kubernetes service accounts without additional annotation. Upbound recommends +this approach for ease-of-use and flexibility. + +First, enable Workload Identity Federation on your GKE cluster: + +```shell +gcloud container clusters update ${YOUR_CLUSTER_NAME} \ + --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ + --region=${YOUR_REGION} +``` + +Next, grant the necessary permissions to your Kubernetes service account: + +```shell +gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ + --member="principalSet://iam.googleapis.com/projects/${YOUR_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${YOUR_PROJECT_ID}.svc.id.goog/attribute.kubernetes_namespace/${YOUR_CONTROL_PLANE_NAMESPACE}/attribute.kubernetes_service_account/external-secrets-controller" \ + --role="roles/secretmanager.secretAccessor" +``` + +#### Service account impersonation + +Service account impersonation allows you to link a Kubernetes service account to +a GCP service account. The Kubernetes service account assumes the permissions of +the GCP service account you specify. + +Enable workload id federation on your GKE cluster: + +```shell +gcloud container clusters update ${YOUR_CLUSTER_NAME} \ + --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ + --region=${YOUR_REGION} +``` + +Next, create a dedicated service account for your secrets operations: + +```shell +gcloud iam service-accounts create secrets-sa \ + --project=${YOUR_PROJECT_ID} +``` + +Grant secret access permissions to the service account you created: + +```shell +gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ + --member="serviceAccount:secrets-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" \ + --role="roles/secretmanager.secretAccessor" +``` + +Link the Kubernetes service account to the GCP service account: + +```shell +gcloud iam service-accounts add-iam-policy-binding \ + secrets-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com \ + --role="roles/iam.workloadIdentityUser" \ + --member="serviceAccount:${YOUR_PROJECT_ID}.svc.id.goog[${YOUR_CONTROL_PLANE_NAMESPACE}/external-secrets-controller]" +``` + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the shared secrets component: + +```shell +--set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."iam\.gke\.io/gcp-service-account"="secrets-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" +``` + + + +## Verify your configuration + +After you apply the configuration use `kubectl` to verify the service account +has the correct annotation: + +```shell +kubectl get serviceaccount external-secrets-controller -n ${YOUR_CONTROL_PLANE_NAMESPACE} -o yaml +``` + + + +Verify the `external-secrets` pod is running correctly: + +```shell +kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep external-secrets +``` + + + + + +Verify the External Secrets Operator pod is running correctly: + +```shell +kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep external-secrets +``` + + + + + +Verify the `external-secrets` pod is running correctly: + +```shell +kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep external-secrets +``` + + + +## Restart workload + + + +You must manually restart a workload's pod when you add the +`eks.amazonaws.com/role-arn key` annotation to the running pod's service +account. + +This restart enables the EKS pod identity webhook to inject the necessary +environment for using IRSA. + + + + + +You must manually restart a workload's pod when you add the workload identity annotations to the running pod's service account. + +This restart enables the workload identity webhook to inject the necessary +environment for using Azure workload identity. + + + + + +GCP workload identity doesn't require pod restarts after configuration changes. +If you do need to restart the workload, use the `kubectl` command to force the +component restart: + + + +```shell +kubectl rollout restart deployment external-secrets +``` + +## Use cases + + + + +Shared secrets with workload identity eliminates the need for static credentials +in your cluster. These benefits are particularly helpful in: + +* Secure application credentials management +* Database connection string storage +* API token management +* Compliance with secret rotation security standards +* Multi-environment configuration with centralized secret management + + + + + +Using workload identity authentication for shared secrets eliminates the need for static +credentials in your cluster as well as the overhead of credential rotation. +These benefits are particularly helpful in: + +* Secure application credentials management +* Database connection string storage +* API token management +* Compliance with secret rotation security standards + + + + + +Configuring the external secrets operator with workload identity eliminates the need for +static credentials in your cluster and the overhead of credential rotation. +These benefits are particularly helpful in: + +* Secure application credentials management +* Database connection string storage +* API token management +* Compliance with secret rotation security standards + + + +## Next steps + +Now that you have workload identity configured for the shared secrets component, visit +the [Shared Secrets][eso-guide] guide for more information. + +Other workload identity guides are: +* [Backup and restore][backuprestore] +* [Billing][billing] + +[eso-guide]: /spaces/howtos/secrets-management +[backuprestore]: /spaces/howtos/self-hosted/workload-id/backup-restore-config +[billing]: /spaces/howtos/self-hosted/workload-id/billing-config diff --git a/spaces_versioned_docs/version-v1.12/howtos/simulations.md b/spaces_versioned_docs/version-v1.12/howtos/simulations.md new file mode 100644 index 000000000..26cb0e657 --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/howtos/simulations.md @@ -0,0 +1,110 @@ +--- +title: Simulate changes to your Control Plane Projects +sidebar_position: 100 +description: Use the Up CLI to mock operations before deploying to your environments. +--- + +:::info API Version Information +This guide covers Simulations, available in v1.10+ (GA since v1.13). version-specific availability and features, see the . + +For API specifications on the Simulations CRD, see the [Spaces API Reference](../../../reference/apis/spaces-api/). +::: + +:::important +The Simulations feature is in private preview. more information, [reach out to Upbound][reach-out-to-upbound]. +::: + +Control plane simulations allow you to preview changes to your resources before +applying them to your control planes. Like a plan or dry-run operation, +simulations expose the impact of updates to compositions or claims without +changing your actual resources. + +A control plane simulation creates a temporary copy of your control plane and +returns a preview of the desired changes. The simulation change plan helps you +reduce the risk of unexpected behavior based on your changes. + +## Simulation benefits + +Control planes are dynamic systems that automatically reconcile resources to +match your desired state. Simulations provide visibility into this +reconciliation process by showing: + + +* New resources to create +* Existing resources to change +* Existing resources to delete +* How configuration changes propagate through the system + +These insights are crucial when planning complex changes or upgrading Crossplane +packages. + +## Requirements + +Simulations are available to select customers on Upbound Cloud with Team +Tier or higher. more information, [reach out to Upbound][reach-out-to-upbound-1]. + +## How to simulate your control planes + +Before you start a simulation, build your project and use the `up +project run` command to run your control plane. + +Use the `up project simulate` command with your control plane name to start the +simulation: + +```ini {copy-lines="all"} +up project simulate --complete-after=60s --terminate-on-finish +``` + +The `complete-after` flag determines how long to run the simulation before it completes and calculates the results. Depending on the change, a simulation may not complete within your defined interval leaving unaffected resources as `unchanged`. + +The `terminate-on-finish` flag terminates the simulation after the time +you set - deleting the control plane that ran the simulation. + +At the end of your simulation, your CLI returns: +* A summary of the resources created, modified, or deleted +* Diffs for each resource affected + +## View your simulation in the Upbound Console +You can also view your simulation results in the Upbound Console: + +1. Navigate to your base control plane in the Upbound Console +2. Select the "Simulations" tab in the menu +3. Select a simulation object for a change list of all + resources affected. + +The Console provides visual indications of changes: + +- Created Resources: Marked with green +- Modified Resources: Marked with yellow +- Deleted Resources: Marked with red +- Unchanged Resources: Displayed in gray + +![Upbound Console Simulation](/img/simulations.png) + +## Considerations + +Simulations is a **private preview** feature. + +Be aware of the following limitations: + +- Simulations can't predict the exact behavior of external systems due to the + complexity and non-deterministic reconciliation pattern in Crossplane. + +- The only completion criteria for a simulation is time. Your simulation may not + receive a conclusive result within that interval. Upbound recommends the + default `60s` value. + +- Providers don't run in simulations. Simulations can't compose resources that + rely on the status of Managed Resources. + + +The Upbound team is working to improve these limitations. Your feedback is always appreciated. + +## Next steps + +For more information, follow the [tutorial][tutorial] on Simulations. + + +[tutorial]: /manuals/cli/howtos/simulations +[reach-out-to-upbound]: https://www.upbound.io/contact-us +[reach-out-to-upbound-1]: https://www.upbound.io/contact-us diff --git a/spaces_versioned_docs/version-v1.12/overview/_category_.json b/spaces_versioned_docs/version-v1.12/overview/_category_.json new file mode 100644 index 000000000..54bb16430 --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/overview/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Overview", + "position": 0 +} diff --git a/spaces_versioned_docs/version-v1.12/overview/index.md b/spaces_versioned_docs/version-v1.12/overview/index.md new file mode 100644 index 000000000..7b79f6e44 --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/overview/index.md @@ -0,0 +1,14 @@ +--- +title: Spaces Overview +sidebar_position: 0 +--- + +# Upbound Spaces + +Welcome to the Upbound Spaces documentation. This section contains comprehensive documentation for Spaces API and Spaces operations across all supported versions (v1.9 through v1.15). + +## Get Started + +- **[Concepts](/spaces/concepts/control-planes)** - Core concepts for Spaces +- **[How-To Guides](/spaces/howtos/auto-upgrade)** - Step-by-step guides for operating Spaces +- **[API Reference](/spaces/reference/)** - API specifications and resources diff --git a/spaces_versioned_docs/version-v1.12/reference/_category_.json b/spaces_versioned_docs/version-v1.12/reference/_category_.json new file mode 100644 index 000000000..4a6a139c4 --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/reference/_category_.json @@ -0,0 +1,5 @@ +{ + "label": "Spaces API", + "position": 1, + "collapsed": true +} diff --git a/spaces_versioned_docs/version-v1.12/reference/index.md b/spaces_versioned_docs/version-v1.12/reference/index.md new file mode 100644 index 000000000..5e68b0768 --- /dev/null +++ b/spaces_versioned_docs/version-v1.12/reference/index.md @@ -0,0 +1,72 @@ +--- +title: Spaces API Reference +description: Documentation for the Spaces API resources (v1.15 - Latest) +sidebar_position: 1 +--- +import CrdDocViewer from '@site/src/components/CrdViewer'; + + +This page documents the Custom Resource Definitions (CRDs) for the Spaces API. + + +## Control Planes +### Control Planes + + +## Observability +### Shared Telemetry Configs + + +## `pkg` +### Controller Revisions + + +### Controller Runtime Configs + + +### Controllers + + +### Remote Configuration Revisions + + +### Remote Configurations + + +## Policy +### Shared Upbound Policies + + +## References +### Referenced Objects + + +## Scheduling +### Environments + + +## Secrets +### Shared External Secrets + + +### Shared Secret Stores + + +## Simulations + + +## Spaces Backups +### Backups + + +### Backup Schedules + + +### Shared Backup Configs + + +### Shared Backups + + +### Shared Backup Schedules + diff --git a/spaces_versioned_docs/version-v1.13/concepts/_category_.json b/spaces_versioned_docs/version-v1.13/concepts/_category_.json new file mode 100644 index 000000000..4b8667e29 --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/concepts/_category_.json @@ -0,0 +1,7 @@ +{ + "label": "Concepts", + "position": 2, + "collapsed": true +} + + diff --git a/spaces_versioned_docs/version-v1.13/concepts/control-planes.md b/spaces_versioned_docs/version-v1.13/concepts/control-planes.md new file mode 100644 index 000000000..7066343de --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/concepts/control-planes.md @@ -0,0 +1,227 @@ +--- +title: Control Planes +weight: 1 +description: An overview of control planes in Upbound +--- + + +Control planes in Upbound are fully isolated Crossplane control plane instances that Upbound manages for you. This means: + +- the underlying lifecycle of infrastructure (compute, memory, and storage) required to power your instance. +- scaling of the infrastructure. +- the maintenance of the core Crossplane components that make up a control plane. + +This lets users focus on building their APIs and operating their control planes, while Upbound handles the rest. Each control plane has its own dedicated API server connecting users to their control plane. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). + +For detailed information about Control Plane API specifications and CRD fields, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +## Control plane architecture + +![Managed Control Plane Architecture](/img/mcp.png) + +Along with underlying infrastructure, Upbound manages the Crossplane system components. You don't need to manage the Crossplane API server or core resource controllers because Upbound manages your control plane lifecycle from creation to deletion. + +### Crossplane API + +Each control plane offers a unified endpoint. You interact with your control plane through Kubernetes and Crossplane API calls. Each control plane runs a Kubernetes API server to handle API requests. You can make API calls in the following ways: + +- Direct calls: HTTP/gRPC +- Indirect calls: the up CLI, Kubernetes clients such as kubectl, or the Upbound Console. + +Like in Kubernetes, the API server is the hub for all communication for the control plane. All internal components such as system processes and provider controllers act as clients of the API server. + +Your API requests tell Crossplane your desired state for the resources your control plane manages. Crossplane attempts to constantly maintain that state. Crossplane lets you configure objects in the API either imperatively or declaratively. + +### Crossplane versions and features + +Upbound automatically upgrades Crossplane system components on control planes to new Crossplane versions for updated features and improvements in the open source project. With [automatic upgrades][automatic-upgrades], you choose the cadence that Upbound automatically upgrades the system components in your control plane. You can also choose to manually upgrade your control plane to a different Crossplane version. + +For detailed information on versions and upgrades, refer to the [release notes][release-notes] and the automatic upgrade documentation. If you don't enroll a control plane in a release channel, Upbound doesn't apply automatic upgrades. + +Features considered "alpha" in Crossplane are by default not supported in a control plane unless otherwise specified. + +### Hosting environments + +Every control plane in Upbound belongs to a [control plane group][control-plane-group]. Control plane groups are a logical grouping of one or more control planes with shared objects (such as secrets or backup configuration). Every group resides in a [Space][space] in Upbound, which are hosting environments for control planes. + +Think of a Space as being conceptually the same as an AWS, Azure, or GCP region. Regardless of the Space type you run a control plane in, the core experience is identical. + +## Management + +### Create a control plane + +You can create a new control plane from the Upbound Console, [up CLI][up-cli], or with Kubernetes clients such as `kubectl`. + + + + + +To use the CLI, run the following: + +```shell +up ctp create +``` + +To learn more about control plane-related commands in `up`, go to the [CLI reference][cli-reference] documentation. + + + +You can create and manage control planes declaratively in Upbound. Before you +begin, ensure you're logged into Upbound and set the correct context: + +```bash +up login +# Example: acmeco/upbound-gcp-us-west-1/default +up ctx ${yourOrganization}/${yourSpace}/${yourGroup} +```` + +```yaml +#controlplane-a.yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: controlplane-a +spec: + crossplane: + autoUpgrade: + channel: Rapid +``` + +```bash +kubectl apply -f controlplane-a.yaml +``` + + + + + +### Connect directly to your control plane + +Each control plane offers a unified endpoint. You interact with your control plane through Kubernetes and Crossplane API calls. Each control plane runs a Kubernetes API server to handle API requests. + +You can connect to a control plane's API server directly via the up CLI. Use the [`up ctx`][up-ctx] command to set your kubeconfig's current context to a control plane: + +```shell +# Example: acmeco/upbound-gcp-us-west-1/default/ctp1 +up ctx ${yourOrganization}/${yourSpace}/${yourGroup}/${yourControlPlane} +``` + +To disconnect from your control plane and revert your kubeconfig's current context to the previous entry, run the following: + +```shell +up ctx .. +``` + +You can also generate a `kubeconfig` file for a control plane with [`up ctx -f`][up-ctx-f]. + +```shell +up ctx ${yourOrganization}/${yourSpace}/${yourGroup}/${yourControlPlane} -f - > ctp-kubeconfig.yaml +``` + +:::tip +To learn more about how to use `up ctx` to navigate different contexts in Upbound, read the [CLI documentation][cli-documentation]. +::: + +## Configuration + +When you create a new control plane, Upbound provides you with a fully isolated instance of Crossplane. Configure your control plane by installing packages that extend its capabilities, like to create and manage the lifecycle of new types of infrastructure resources. + +You're encourage to install any available Crossplane package type (Providers, Configurations, Functions) available in the [Upbound Marketplace][upbound-marketplace] on your control planes. + +### Install packages + +Below are a couple ways to install Crossplane packages on your control plane. + + + + + + +Use the `up` CLI to install Crossplane packages from the [Upbound Marketplace][upbound-marketplace-1] on your control planes. Connect directly to your control plane via `up ctx`. Then, to install a provider: + +```shell +up ctp provider install xpkg.upbound.io/upbound/provider-family-aws +``` + +To install a Configuration: + +```shell +up ctp configuration install xpkg.upbound.io/upbound/platform-ref-aws +``` + +To install a Function: + +```shell +up ctp function install xpkg.upbound.io/crossplane-contrib/function-kcl +``` + + +You can use kubectl to directly apply any Crossplane manifest. Below is an example for installing a Crossplane provider: + +```yaml +cat < + + + +For production-grade scenarios, it's recommended you configure your control plane declaratively via Git plus a Continuous Delivery (CD) Engine such as Argo. guidance on this topic, read [GitOps with control planes][gitops-with-control-planes]. + + + + + + +### Configure Crossplane ProviderConfigs + +#### ProviderConfigs with OpenID Connect + +Use OpenID Connect (`OIDC`) to authenticate to Upbound control planes without credentials. OIDC lets your control plane exchange short-lived tokens directly with your cloud provider. Read how to [connect control planes to external services][connect-control-planes-to-external-services] to learn more. + +#### Generic ProviderConfigs + +The Upbound Console doesn't allow direct editing of ProviderConfigs that don't support `Upbound` authentication. To edit these ProviderConfigs on your control plane, connect to the control plane directly by following the instructions in the previous section and using `kubectl`. + +### Configure secrets + +Upbound gives users the ability to configure the synchronization of secrets from external stores into control planes. Configure this capability at the group-level, explained in the [Spaces documentation][spaces-documentation]. + +### Configure backups + +Upbound gives users the ability to configure backup schedules, take impromptu backups, and conduct self-service restore operations. Configure this capability at the group-level, explained in the [Spaces documentation][spaces-documentation-1]. + +### Configure telemetry + + +Upbound gives users the ability to configure the collection of telemetry (logs, metrics, and traces) in their control planes. Using Upbound's built-in [OTEL][otel] support, you can stream this data out to your preferred observability solution. Configure this capability at the group-level, explained in the [Spaces documentation][spaces-documentation-2]. + + + +[automatic-upgrades]: /spaces/howtos/auto-upgrade +[release-notes]: https://github.com/upbound/universal-crossplane/releases +[control-plane-group]: /spaces/concepts/groups +[space]: /spaces/overview +[up-cli]: /reference/cli-reference +[cli-reference]: /reference/cli-reference +[up-ctx]: /reference/cli-reference +[up-ctx-f]: /reference/cli-reference +[cli-documentation]: /manuals/cli/concepts/contexts +[upbound-marketplace]: https://marketplace.upbound.io +[upbound-marketplace-1]: https://marketplace.upbound.io +[gitops-with-control-planes]: /spaces/howtos/cloud-spaces/gitops +[connect-control-planes-to-external-services]: /manuals/platform/howtos/oidc +[spaces-documentation]: /spaces/howtos/secrets-management +[spaces-documentation-1]: /spaces/howtos/backup-and-restore +[otel]: https://otel.com +[spaces-documentation-2]: /spaces/howtos/observability diff --git a/spaces_versioned_docs/version-v1.13/concepts/deployment-modes.md b/spaces_versioned_docs/version-v1.13/concepts/deployment-modes.md new file mode 100644 index 000000000..f5e718f88 --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/concepts/deployment-modes.md @@ -0,0 +1,53 @@ +--- +title: Deployment Modes +sidebar_position: 10 +description: An overview of deployment modes for Spaces +--- + +Upbound Spaces can be deployed and used in a variety of modes: + +- **Cloud Spaces:** Multi-tenant Upbound-hosted, Upbound-managed Space environment. Cloud Spaces provide a typical SaaS experience. +- **[Dedicated Spaces][dedicated-spaces]:** Single-tenant Upbound-hosted, Upbound-managed Space environment. Dedicated Spaces provide a SaaS experience, with additional isolation guarantees that your workloads run in a fully isolated context. +- **[Managed Spaces][managed-spaces]:** Single-tenant customer-hosted, Upbound-managed Space environment. Managed Spaces provide a SaaS-like experience, with additional guarantees of all hosting infrastructure being served from your own cloud account. +- **[Self-Hosted Spaces][self-hosted-spaces]:** Single-tenant customer-hosted, customer-managed Space environment. This is a fully self-hosted, self-managed software experience for using Spaces. Upbound delivers the Spaces software and you run it yourself. + +The Upbound platform uses a federated model to connect each Space back to a +central service called the [Upbound Console][console], which is deployed and +managed by Upbound. + +By default, customers have access to a set of Cloud Spaces. + +## Supported clouds + +You can use host Upbound Spaces on Amazon Web Services (AWS), Microsoft Azure, +and Google Cloud Platform (GCP). Regardless of the hosting platform, you can use +Spaces to deploy control planes that manage the lifecycle of your resources. + +## Supported regions + +This table lists the cloud service provider regions supported by Upbound. + +### GCP + +| Region | Location | +| --- | --- | +| `us-west-1` | Western US (Oregon) +| `us-central-1` | Central US (Iowa) +| `eu-west-3` | Eastern Europe (Frankfurt) + +### AWS + +| Region | Location | +| --- | --- | +| `us-east-1` | Eastern US (Northern Virginia) + +### Azure + +| Region | Location | +| --- | --- | +| `us-east-1` | Eastern US (Iowa) + +[dedicated-spaces]: /spaces/howtos/cloud-spaces/dedicated-spaces-deployment +[managed-spaces]: /spaces/howtos/self-hosted/managed-spaces-deployment +[self-hosted-spaces]: /spaces/howtos/self-hosted/self-hosted-spaces-deployment +[console]: /manuals/console/upbound-console/ diff --git a/spaces_versioned_docs/version-v1.13/concepts/groups.md b/spaces_versioned_docs/version-v1.13/concepts/groups.md new file mode 100644 index 000000000..d2ccacdb3 --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/concepts/groups.md @@ -0,0 +1,115 @@ +--- +title: Control Plane Groups +sidebar_position: 2 +description: An introduction to the Control Plane Groups in Upbound +plan: "enterprise" +--- + + + +In Upbound, Control Plane Groups (or just, 'groups') are a logical grouping of one or more control planes with shared resources like [secrets][secrets] or [backups][backups]. It's a mechanism for isolating these groups of resources within a single [Space][space]. All role-based access control in Upbound happens at the control plane group-level. + +## When to use multiple groups + +You should use groups in environments where there's a need to have Crossplane manage infrastructure across multiple cloud accounts or projects. For users who only need to deploy and manage resources in a couple cloud accounts, you shouldn't need to think about groups at all. + +Groups are a way to divide access in Upbound between multiple teams. Think of a group as being analogous to a Kubernetes _namespace_. + +## The 'default' group + +Every Cloud Space in Upbound has a group named _default_ available. + +## Working with groups + +### View groups + +You can list groups in a Space using: + +```shell +up group list +``` + +If you're operating in a single-tenant Space and have access to the underlying cluster, you can list namespaces that have the group label: + +```shell +kubectl get namespaces -l spaces.upbound.io/group=true +``` + +### Set the group for a request + +Several commands in _up_ have a group context. To set the group for a request, use the `--group` flag: + +```shell +up ctp list --group=team1 +``` +```shell +up ctp create new-ctp --group=team2 +``` + +### Set the group preference + +The _up_ CLI operates upon a single [Upbound context][upbound-context]. Whatever context gets set is then used as the preference for other commands. An Upbound context is capable of pointing at a variety of altitudes: + +1. A Space in Upbound +2. A group within a Space +3. a control plane within a group + +To set the group preference, use `up ctx` to choose a group as your preferred Upbound context. For example: + +```shell +# This sets the context for the up CLI to the default group in an Upbound-managed Cloud Space (gcp-us-west-1) for an organization called 'acmeco' +up ctx acmeco/upbound-gcp-us-west-1/default/ +``` + +### Create a group + +To create a group, login to Upbound and set your context to your desired Space: + +```shell +up login +up ctx '/' +# Example: up ctx acmeco/upbound-gcp-us-west-1 +``` + + +Create a group: + +```shell +up group create my-new-group +``` + +### Delete a group + +To delete a group, login to Upbound and set your context to your desired Space: + +```shell +up login +up ctx '/' +# Example: up ctx acmeco/upbound-gcp-us-west-1 +``` + +Delete a group: + +```shell +up group delete my-new-group +``` + +### Protected groups + +Once a control plane gets created in a group, Upbound enforces a protection policy on the group. Upbound prevents accidental deletion of the group. To delete a group that has control planes in it, you should first delete all control planes in the group. + +## Groups in the context of single-tenant Spaces + +Upbound offers a variety of deployment models to use the product. If you deploy your own single-tenant Upbound Space (whether connected or disconnected), you're self-hosting Upbound software in a Kubernetes cluster. In these environments, a control plane group maps to a corresponding namespace in the cluster which hosts the Space. + +Most Kubernetes clusters come with some set of predefined namespaces. Because a group maps to a corresponding Kubernetes namespace, whenever a group gets created, there too must be a Kubernetes namespace accordingly. When the Spaces software is newly installed, no groups exist. You _can_ elevate a Kubernetes namespace to become a group by doing the following: + +1. Creating a group with the same name as a preexisting Kubernetes namespace +2. Creating a control plane in a preexisting Kubernetes namespace +3. Labeling a Kubernetes namespace with the label `spaces.upbound.io/group=true` + + +[secrets]: /spaces/howtos/secrets-management +[backups]: /spaces/howtos/self-hosted/workload-id/backup-restore-config/ +[space]: /spaces/overview +[upbound-context]: /manuals/cli/concepts/contexts diff --git a/spaces_versioned_docs/version-v1.13/howtos/_category_.json b/spaces_versioned_docs/version-v1.13/howtos/_category_.json new file mode 100644 index 000000000..d3a8547aa --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/_category_.json @@ -0,0 +1,7 @@ +{ + "label": "How-tos", + "position": 3, + "collapsed": true +} + + diff --git a/spaces_versioned_docs/version-v1.13/howtos/api-connector.md b/spaces_versioned_docs/version-v1.13/howtos/api-connector.md new file mode 100644 index 000000000..a14468f52 --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/api-connector.md @@ -0,0 +1,413 @@ +--- +title: API Connector +weight: 90 +description: Connect Kubernetes clusters to remote Crossplane control planes for resource synchronization +aliases: + - /api-connector + - /concepts/api-connector +--- +:::info API Version Information +This guide covers API Connector, a preview feature available in all supported versions (v1.9-v1.14+). + +For related API specifications and available resources, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +:::warning +API Connector is currently in **Preview**. The feature is under active +development and subject to breaking changes. Use for testing and evaluation +purposes only. +::: + +API Connector enables seamless integration between Kubernetes application +clusters consuming APIs and remote Crossplane control planes providing and +reconciling APIs. + +You can use the API Connector to decouple where Crossplane is running (for +example in an Upbound control plane), and where APIs are consumed +(for example in an existing Kubernetes cluster). This gives you flexibility and +consistency in your control plane operations. + + + +Unlike the [Control Plane Connector](ctp-connector.md) which offers only +coarse-grained connectivity between app clusters and a control plane, API +connector offers fine-grained configuration of which APIs get offered along with +multi-cluster connectivity. + +## Architecture overview + +![API Connector Architecture](/img/api-connector.png) + +API Connector uses a **provider-consumer** model: + +- **Provider control plane**: The Upbound control plane that provides APIs and manages infrastructure. +- **Consumer cluster**: Any Kubernetes cluster where its users wants to use APIs provided by the provider control plane, without having to run Crossplane. API connector gets installed in the consumer cluster, and bidirectionally syncs API objects to the provider. + +### Key components + +**Custom Resource Definitions (CRDs)**: + + +- `ClusterConnection`: Establishes a connection from the consumer to the provider cluster. Pulls bindable CRD APIs from the provider into the consumer cluster for use. + +- `ClusterAPIBinding`: Instructs API connector to sync all API objects cluster-wide with a given API group to a given provider cluster. +- `APIBinding`: Namespaced version of `ClusterAPIBinding`. Instructs API connector to sync API objects within a given namespace and with a given API group to a given provider cluster. + + +## Prerequisites + +Before using API Connector, ensure: + +1. **Consumer cluster** has network access to the provider control plane +1. You have an license to use API connector. If you are unsure, [contact Upbound][contact] or your sales representative. + +This guide walks through how to automate connecting your cluster to an Upbound +control plane. You can also manually configure the API Connector. + +## Publishing APIs in the provider cluster + + + + +First, log in to your provider control plane, and choose which CRD APIs you want +to make accessible to the consumer cluster's. API connector only syncs +these "bindable" CRDs. + + + + + + +Use the `up` CLI to login: + +```bash +up login +``` + +Connect to your control plane: + +```bash +up ctx +``` + +Check what CRDs are available: + +```bash +kubectl get crds +``` + + +Label all CRDs you want to publish with the bindable label: + + +```bash +kubectl label crd 'connect.upbound.io/bindable'='true' --overwrite +``` + + + + +Change context to the provider cluster: +```bash +kubectl config set-context +``` + +Check what CRDs are available: +```bash +kubectl get crds +``` + + +Label all CRDs you want to publish with the bindable label + +```bash +kubectl label crd 'connect.upbound.io/bindable'='true' --overwrite +``` + + + +## Installation + + + + +The up CLI provides the simplest installation method with automatic +configuration: + +Make sure the current Kubeconfig context is set to the **provider control plane** +```bash +up ctx + +up controlplane api-connector install --consumer-kubeconfig [OPTIONS] +``` + +The command: +1. creates a Robot account (named ``) in the Upbound Cloud organization ``, +1. Gives the created robot account `admin` permissions to the provider control plane `` +1. Generates a JWT token for the robot account, and stores it in a Kubernetes Secret in the consumer cluster. +1. Installs the API connector Helm chart in the consumer cluster. +1. Creates a `ClusterConnection` object in the consumer cluster, referring to the newly generated Secret, so that API connector can authenticate successfully to the provider control plane. +1. API connector pulls all published CRDs from the previous step into the consumer cluster. + +**Example**: +```bash +up controlplane api-connector install \ + --consumer-kubeconfig ~/.kube/config \ + --consumer-context my-cluster \ + --upbound-token +``` + +This command uses provided token to authenticate with the **Provider control plane** +and create a `ClusterConnection` resource in the **Consumer cluster** to connect to the +**Provider control plane**. + +**Key Options**: +- `--consumer-kubeconfig`: Path to consumer cluster kubeconfig (required) +- `--consumer-context`: Context name for consumer cluster (required) +- `--name`: Custom name for connection resources (optional) +- `--upbound-token`: API token for authentication (optional) +- `--upgrade`: Upgrade existing installation (optional) +- `--version`: Specific version to install (optional) + + + + +For manual installation or custom configurations: + +```bash +helm upgrade --install api-connector oci://xpkg.upbound.io/spaces-artifacts/api-connector \ + --namespace upbound-system \ + --create-namespace \ + --version \ + --set consumerClusterDisplayName= +``` + +### Authentication methods + +API Connector supports two authentication methods: + + + + +For Upbound Spaces integration: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: spaces-secret + namespace: upbound-system +type: Opaque +stringData: + token: + organization: + spacesBaseURL: + controlPlaneGroupName: + controlPlaneName: +``` + + + +For direct cluster access: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: provider-kubeconfig + namespace: upbound-system +type: Opaque +data: + kubeconfig: +``` + + + + +### Connection setup + +Create a `ClusterConnection` to establish connectivity: + + + + +```yaml +apiVersion: connect.upbound.io/v1alpha1 +kind: ClusterConnection +metadata: + name: spaces-connection + namespace: upbound-system +spec: + secretRef: + kind: UpboundRobotToken + name: spaces-secret + namespace: upbound-system + crdManagement: + pullBehavior: Pull +``` + + + + +```yaml +apiVersion: connect.upbound.io/v1alpha1 +kind: ClusterConnection +metadata: + name: provider-connection + namespace: upbound-system +spec: + secretRef: + kind: KubeConfig + name: provider-kubeconfig + namespace: upbound-system + crdManagement: + pullBehavior: Pull +``` + + + + + + + +### Configuration + +Bind APIs to make them available in your consumer cluster: + +```yaml +apiVersion: connect.upbound.io/v1alpha1 +kind: ClusterAPIBinding +metadata: + name: +spec: + connectionRef: + kind: ClusterConnection + name: # Or --name value +``` + + + + +The `ClusterAPIBinding` name must match the **Resource.Group** (name of the CustomResourceDefinition) of the CRD you want to bind. + + + + +## Usage example + +After configuration, you can create API objects (in the consumer cluster) that +will be synchronized to the provider cluster: + +```yaml +apiVersion: nop.example.org/v1alpha1 +kind: NopResource +metadata: + name: my-resource + namespace: default +spec: + coolField: "Synchronized resource" + compositeDeletePolicy: Foreground +``` + +Verify the resource status: + +```bash +kubectl get nopresource my-resource -o yaml + +``` +When the `APIBound=True` condition is present, it means that the API object has +been synced to the provider cluster, and is being reconciled there. Whenever the +API object in the provider cluster gets status updates (for example +`Ready=True`), that status is synced back to the consumer cluster. + +Switch contexts to the provider cluster to see the API object being created: + +```bash +up ctx +# or kubectl config set-context +``` + +```bash +kubectl get nopresource my-resource -o yaml +``` + +Note that in the provider cluster, the API object is labeled with information on +where the API object originates from, and `connect.upbound.io/managed=true`. + +## Monitoring and troubleshooting + +### Check connection status + +```bash +kubectl get clusterconnection +``` + +Expected output: +``` +NAME STATUS MESSAGE +spaces-connection Ready Provider controlplane is available +``` + +### View available APIs + +```bash +kubectl get clusterconnection spaces-connection -o jsonpath='{.status.offeredAPIs[*].name}' +``` + +### Check API binding status + +```bash +kubectl get clusterapibinding +``` + +### Debug resource synchronization + +```bash +kubectl describe +``` + +## Removal + +### Using the up CLI + +```bash +up controlplane api-connector uninstall \ + --consumer-kubeconfig ~/.kube/config \ + --all +``` + +The `--all` flag removes all resources including connections and secrets. +Without the flag, only runtime related resources won't be removed. + +:::note +Uninstall doesn't remove any API objects in the provider control plane. If you +want to clean up all API objects there, delete all API objects from the consumer +cluster before API connector uninstallation, and wait for the objects to get +deleted. +::: + + +### Using Helm + +```bash +helm uninstall api-connector -n upbound-system +``` + +## Limitations + +- **Preview feature**: Subject to breaking changes. Not yet production grade. +- **CRD updates**: CRDs are pulled once but not automatically updated. If multiple Crossplane clusters offer the same CRD API, API changes must be synchronized out of band, for example using a [Crossplane Configuration](https://docs.crossplane.io/latest/packages/). +- **Network requirements**: Consumer cluster must have direct network access to provider cluster. +- **Wide permissions needed in consumer cluster**: Because the API connector doesn't know up front the names of the APIs it needs to reconcile, it currently runs with full "root" privileges in the consumer cluster. + +- **Connector polling**: API Connector checks for drift between the consumer and provider cluster + periodically through polling. The poll interval can be changed with the `pollInterval` Helm value. + + +## Advanced configuration + +### Multiple connections + +You can connect to multiple provider clusters simultaneously by creating multiple `ClusterConnection` resources with different names and configurations. + +[contact]: https://www.upbound.io/contact-us diff --git a/spaces_versioned_docs/version-v1.13/howtos/auto-upgrade.md b/spaces_versioned_docs/version-v1.13/howtos/auto-upgrade.md new file mode 100644 index 000000000..249056fb4 --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/auto-upgrade.md @@ -0,0 +1,131 @@ +--- +title: Automatically upgrade control planes +sidebar_position: 50 +description: How to configure automatic upgrades of Crossplane in a control plane +plan: "standard" +--- + + + +Upbound Spaces can automatically upgrade the version of Upbound Crossplane in managed control plane instances. You can edit the `spec.crossplane.autoUpgrade` field in your `ControlPlane` specification with the available release channels below. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). The `spec.crossplane.autoUpgrade` field has been available since v1.9. + +For ControlPlane API specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +| Channel | Description | Example | +|------------|-----------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------| +| **None** | Disables auto upgrades. | _Uses version specified in `spec.crossplane.version`._ | +| **Patch** | Upgrades to the latest supported patch release. | _Control plane version 1.12.2-up.2 auto upgrades to 1.12.3-up.1 upon release._ | +| **Stable** | Default setting. Upgrades to the latest supported patch release on minor version _N-1_ where N is the latest supported minor version. | _If latest supported minor version is 1.14, auto upgrades to latest patch - 1.13.2-up.3_ | +| **Rapid** | Upgrades to the latest supported patch release on the latest supported minor version. | _If the latest supported minor version is 1.14, auto upgrades to the latest patch of minor version. 1.14 upgrades to 1.14.5-up.1_ | + + +:::warning + +The `Rapid` channel is only recommended for users willing to accept the risk of new features and potentially breaking changes. + +::: + +## Examples + +The specs below are examples of how to edit the `autoUpgrade` channel in your `ControlPlane` specification. + +To run a control plane with the `Rapid` auto upgrade channel, your spec should look like this: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: example-ctp +spec: + crossplane: + autoUpgrade: + channel: Rapid + writeConnectionSecretToRef: + name: kubeconfig-example-ctp +``` + +To run a control plane with a pinned version of Crossplane, specify in the `version` field: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: example-ctp +spec: + crossplane: + version: 1.14.3-up.1 + autoUpgrade: + channel: None + writeConnectionSecretToRef: + name: kubeconfig-example-ctp +``` + +## Supported Crossplane versions + +Spaces supports the three [preceding minor versions][preceding-minor-versions] from the last supported minor version. example, if the last supported minor version is `1.14`, minor versions `1.13` and `1.12` are also supported. Versions older than the three most recent minor versions aren't supported. Only supported Crossplane versions are valid specifications for new control planes. + +Current Crossplane version support by Spaces version: + +| Spaces Version | Crossplane Version Min | Crossplane Version Max | +|:--------------:|:----------------------:|:----------------------:| +| 1.2 | 1.13 | 1.15 | +| 1.3 | 1.13 | 1.15 | +| 1.4 | 1.14 | 1.16 | +| 1.5 | 1.14 | 1.16 | +| 1.6 | 1.14 | 1.16 | +| 1.7 | 1.14 | 1.16 | +| 1.8 | 1.15 | 1.17 | +| 1.9 | 1.16 | 1.18 | +| 1.10 | 1.16 | 1.18 | +| 1.11 | 1.16 | 1.18 | +| 1.12 | 1.17 | 1.19 | + + +Upbound offers extended support for all installed Crossplane versions released within a 12 month window since the last Spaces release. Contact your Upbound sales representative for more information on version support. + + +:::warning + +If the auto upgrade channel is `Stable` or `Rapid`, the Crossplane version will always stay within the support window after auto upgrade. If set to `Patch` or `None`, the minor version may be outside the support window. You are responsible for upgrading to a supported version + +::: + +To view the support status of a control plane instance, use `kubectl get ctp`. + +```bash +kubectl get ctp +NAME CROSSPLANE VERSION SUPPORTED READY MESSAGE AGE +example-ctp 1.13.2-up.3 True True 31m + +``` + +Unsupported versions return `SUPPORTED: False`. + +```bash +kubectl get ctp +NAME CROSSPLANE VERSION SUPPORTED READY MESSAGE AGE +example-ctp 1.11.5-up.1 False True 31m + +``` + +For more information, use the `-o yaml` flag to return more information. + +```bash +kubectl get controlplanes.spaces.upbound.io example-ctp -o yaml +status: +conditions: +... +- lastTransitionTime: "2024-01-23T06:36:10Z" + message: Crossplane version 1.11.5-up.1 is outside of the support window. + Oldest supported minor version is 1.12. + reason: UnsupportedCrossplaneVersion + status: "False" + type: Supported +``` + + +[preceding-minor-versions]: /reference/usage/lifecycle/#maintenance-and-updates diff --git a/spaces_versioned_docs/version-v1.13/howtos/automation-and-gitops/_category_.json b/spaces_versioned_docs/version-v1.13/howtos/automation-and-gitops/_category_.json new file mode 100644 index 000000000..b65481af6 --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/automation-and-gitops/_category_.json @@ -0,0 +1,8 @@ +{ + "label": "Automation & GitOps", + "position": 11, + "collapsed": true, + "customProps": { + "plan": "business" + } +} diff --git a/spaces_versioned_docs/version-v1.13/howtos/automation-and-gitops/overview.md b/spaces_versioned_docs/version-v1.13/howtos/automation-and-gitops/overview.md new file mode 100644 index 000000000..57eeb15fc --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/automation-and-gitops/overview.md @@ -0,0 +1,138 @@ +--- +title: Automation and GitOps Overview +sidebar_label: Overview +sidebar_position: 1 +description: Guide to automating control plane deployments with GitOps and Argo CD +plan: "business" +--- + +Automating control plane deployments with GitOps enables declarative, version-controlled infrastructure management. This section covers integrating GitOps workflows with Upbound control planes using Argo CD and related tools. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). GitOps patterns are compatible across all versions of Upbound Spaces. + +For details on deployment models and their differences, see the [Deployment Modes](../../concepts/deployment-modes.md) concept guide. version-specific features, see the . +::: + +## What is GitOps? + +GitOps is an approach for managing infrastructure by: +- **Declaratively describing** desired system state in Git +- **Using controllers** to continuously reconcile actual state with desired state +- **Treating Git as the source of truth** for all configuration and deployments + +Upbound control planes are fully compatible with GitOps patterns and we strongly recommend integrating GitOps in the platforms you build on Upbound. + +## Key Concepts + +### Argo CD +[Argo CD](https://argo-cd.readthedocs.io/) is a popular Kubernetes-native GitOps controller. It continuously monitors Git repositories and automatically applies changes to your infrastructure when commits are detected. + +### Deployment Models + +The way you configure GitOps depends on your deployment model: + +| Aspect | Cloud Spaces | Self-Hosted Spaces | +|--------|--------------|-------------------| +| **Access Method** | Upbound API with tokens | Kubernetes native (secrets/kubeconfig) | +| **Configuration** | Kubeconfig via `up` CLI | Control plane connection secrets | +| **Setup Complexity** | More involved (API integration) | Simpler (native Kubernetes) | +| **Typical Use Case** | Managing Upbound resources | Managing workloads on control planes | + +## Getting Started + +**Choose your path based on your deployment model:** + +###. Cloud Spaces +If you're using Upbound Cloud Spaces (Dedicated or Managed): +1. Start with [GitOps with Upbound Control Planes](../cloud-spaces/gitops-on-upbound.md) +2. Learn how to integrate Argo CD with Cloud Spaces +3. Manage both control plane infrastructure and Upbound resources declaratively + +###. Self-Hosted Spaces +If you're running self-hosted Spaces: +1. Start with [GitOps with ArgoCD in Self-Hosted Spaces](../self-hosted/gitops-with-argocd.md) +2. Learn how to configure control plane connection secrets +3. Manage workloads deployed to your control planes + +## Common Workflows + +### Workflow 1: Managing Control Planes with GitOps +Create and manage control planes themselves declaratively using provider-kubernetes: + +```yaml +apiVersion: kubernetes.crossplane.io/v1alpha2 +kind: Object +metadata: + name: my-controlplane +spec: + forProvider: + manifest: + apiVersion: spaces.upbound.io/v1beta1 + kind: ControlPlane + # ... control plane configuration +``` + +### Workflow 2: Managing Workloads on Control Planes +Deploy applications and resources to control planes using standard Kubernetes GitOps patterns: + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + name: my-app +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-app + namespace: my-app +# ... deployment configuration +``` + +### Workflow 3: Managing Upbound Resources +Use provider-upbound to manage Upbound IAM and repository resources: + +- Teams +- Robots and their team memberships +- Repositories and permissions + +## Advanced Topics + +### Argo CD Plugin for Upbound +Learn more in the [ArgoCD Plugin guide](../self-hosted/use-argo.md) for enhanced integration with self-hosted Spaces. + +### Declarative Control Plane Creation +See [Declaratively create control planes](../self-hosted/declarative-ctps.md) for advanced automation patterns. + +### Consuming Control Plane APIs +Understand how to [consume control plane APIs in your app cluster](../mcp-connector-guide.md) with Argo CD. + +## Prerequisites + +Before implementing GitOps with control planes, ensure you have: + +**For Cloud Spaces:** +- Access to Upbound Cloud Spaces +- `up` CLI installed and configured +- API token with appropriate permissions +- Argo CD or similar GitOps controller running +- Familiarity with Kubernetes RBAC + +**For Self-Hosted Spaces:** +- Self-hosted Spaces deployed and running +- Argo CD deployed in your infrastructure +- Kubectl access to the cluster hosting Spaces +- Understanding of control plane architecture + +## Next Steps + +1. **Choose your deployment model** above +2. **Review the relevant getting started guide** +3. **Set up your GitOps controller** (Argo CD) +4. **Deploy your first automated control plane** +5. **Explore advanced topics** as needed + +:::tip +Start with simple deployments to test your GitOps workflow before moving to production. Use [simulations](../simulations.md) to preview changes before applying them. +::: diff --git a/spaces_versioned_docs/version-v1.13/howtos/backup-and-restore.md b/spaces_versioned_docs/version-v1.13/howtos/backup-and-restore.md new file mode 100644 index 000000000..3b8d026cb --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/backup-and-restore.md @@ -0,0 +1,530 @@ +--- +title: Backup and restore +sidebar_position: 13 +description: Configure and manage backups in your Upbound Space. +plan: "enterprise" +--- + + + +Upbound's _Shared Backups_ is a built-in backup and restore feature. Shared Backups lets you configure automatic schedules for taking snapshots of your control planes. You can restore data from these backups by making new control planes. This guide explains how to use Shared Backups for disaster recovery or upgrade scenarios. + +:::info API Version Information & Available Versions +This guide applies to **all supported versions** (v1.9-v1.15+). + +**Select your API version**: +- [Spaces API v1.15 (Latest)](../../../next/reference/apis/spaces-api/) +- [Spaces API v1.14](../../../1.14/reference/apis/spaces-api/) +- [Spaces API v1.13](../../../1.13/reference/apis/spaces-api/) +- [Spaces API v1.12](../../../1.12/reference/apis/spaces-api/) +- [Spaces API v1.11](../../../1.11/reference/apis/spaces-api/) +- [Spaces API v1.10](../../../1.10/reference/apis/spaces-api/) +- [Spaces API v1.9](../../../1.9/reference/apis/spaces-api/) + +For version support policy, see . version compatibility details, see the . +::: + +## Benefits + +The Shared Backups feature provides the following benefits: + +* Automatic backups for control planes without any operational overhead +* Backup schedules for multiple control planes in a group +* Shared Backups are available across all hosting environments of Upbound (Disconnected, Connected or Cloud Spaces) + + +## Configure a Shared Backup Config + + +[SharedBackupConfig][sharedbackupconfig] is a [group-scoped][group-scoped] resource. You should create them in a group containing one or more control planes. This resource configures the storage details and provider. Whenever a backup executes (either by schedule or manually initiated), it references a SharedBackupConfig to tell it where store the snapshot. + + +### Backup config provider + + +The `spec.objectStorage.provider` and `spec.objectStorage.config` fields configures: + +* The object storage provider +* The path to the provider +* The credentials needed to communicate with the provider + +You can only set one provider. Upbound currently supports AWS, Azure, and GCP as providers. + + +`spec.objectStorage.config` is a freeform map of configuration options for the object storage provider. See [Thanos object storage][thanos-object-storage] for more information on the formats for each supported cloud provider. `spec.bucket` and `spec.provider` overrides the required values in the config. + + + +#### AWS as a storage provider + +:::important +For Cloud Spaces, static credentials are currently the only supported auth method. +::: + +This example demonstrates how to use AWS as a storage provider for your backups: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupConfig +metadata: + name: default + namespace: default +spec: + objectStorage: + provider: AWS + bucket: spaces-backup-bucket + config: + endpoint: s3.eu-west-2.amazonaws.com + region: eu-west-2 + credentials: + source: Secret + secretRef: + name: bucket-creds + key: creds +``` + + +This example assumes you've already created an S3 bucket called "spaces-backup-bucket" in AWS `eu-west-2` region. The account credentials to access the bucket should exist in a secret of the same namespace as the Shared Backup Config. + +#### Azure as a storage provider + +:::important +For Cloud Spaces, static credentials are currently the only supported auth method. +::: + +This example demonstrates how to use Azure as a storage provider for your backups: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupConfig +metadata: + name: default + namespace: default +spec: + objectStorage: + provider: Azure + bucket: upbound-backups + config: + storage_account: upbackupstore + container: upbound-backups + endpoint: blob.core.windows.net + credentials: + source: Secret + secretRef: + name: bucket-creds + key: creds +``` + + +This example assumes you've already created an Azure storage account called `upbackupstore` and blob `upbound-backups`. The storage account key to access the blob should exist in a secret of the same namespace as the Shared Backup Config. + + +#### GCP as a storage provider + +:::important +For Cloud Spaces, static credentials are currently the only supported auth method. +::: + +This example demonstrates how to use Google Cloud Storage as a storage provider for your backups: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupConfig +metadata: + name: default + namespace: default +spec: + objectStorage: + provider: GCP + bucket: spaces-backup-bucket + credentials: + source: Secret + secretRef: + name: bucket-creds + key: creds +``` + + +This example assumes you've already created a Cloud bucket called "spaces-backup-bucket" and a service account with access to this bucket. The key file should exist in a secret of the same namespace as the Shared Backup Config. + + +## Configure a Shared Backup Schedule + + +[SharedBackupSchedule][sharedbackupschedule] is a [group-scoped][group-scoped-1] resource. You should create them in a group containing one or more control planes. This resource defines a backup schedule for control planes within its corresponding group. + +Below is an example of a Shared Backup Schedule that takes backups every day of all control planes having `environment: production` labels: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: daily-schedule + namespace: default +spec: + schedule: "@daily" + configRef: + kind: SharedBackupConfig + name: default + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +### Define a schedule + +The `spec.schedule` field is a [Cron-formatted][cron-formatted] string. Some common examples are below: + + +| Entry | Description | +| ----------------- | ------------------------------------------------------------------------------------------------- | +| `@hourly` | Run once an hour. | +| `@daily` | Run once a day. | +| `@weekly` | Run once a week. | +| `0 0/4 * * *` | Run every 4 hours. | +| `0/15 * * * 1-5` | Run every fifteenth minute on Monday through Friday. | +| `@every 1h30m10s` | Run every 1 hour, 30 minutes, and 10 seconds. Hour is the largest measurement of time for @every. | + + +### Exclude resources from the backup + +The `spec.excludedResources` field is an array of resource names to exclude from each backup. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: daily-schedule +spec: + excludedResources: + - "xclusters.aws.platformref.upbound.io" + - "xdatabase.aws.platformref.upbound.io" + - "xrolepolicyattachment.iam.aws.crossplane.io" +``` + +:::warning +You must specify resource names in lowercase "resource.group" format (for example, `xclusters.aws.platformref.upbound.io`). Using only the resource kind (for example, `XCluster`) isn't supported. +::: + +### Suspend a schedule + +Use `spec.suspend` field to suspend the schedule. It creates no new backups, but allows running backups to complete. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: daily-schedule +spec: + suspend: true +``` + +### Set the time to live + +Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: daily-schedule +spec: + ttl: 168h # Backup is garbage collected after 7 days +``` +:::tip +By default, this setting doesn't delete uploaded files. Review the next section to define +the deletion policy. +::: + +### Define the deletion policy + +Set the `spec.deletionPolicy` to define backup deletion actions, including the +deletion of the backup file from the bucket. The Deletion Policy value defaults +to `Orphan`. Set it to `Delete` to remove uploaded files in the bucket. more +information on the backup and restore process, review the [Spaces API +documentation][spaces-api-documentation]. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: daily-schedule +spec: + ttl: 168h # Backup is garbage collected after 7 days + deletionPolicy: Delete # Defaults to Orphan +``` + +### Garbage collect backups when the schedule gets deleted + +Set the `spec.useOwnerReferencesInBackup` to garbage collect associated backups when a shared schedule gets deleted. If set to true, backups are garbage collected when the schedule gets deleted. + +### Control plane selection + +To configure which control planes in a group you want to create a backup schedule for, use the `spec.controlPlaneSelector` field. You can either use `labelSelectors` or the `names` of a control plane directly. A control plane matches if any of the label selectors match. + +This example matches all control planes in the group that have `environment: production` as a label: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: my-backup-schedule +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +You can use the more complex `matchExpressions` to match labels based on an expression. This example matches control planes that have label `environment: production` or `environment: staging`: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: my-backup-schedule +spec: + controlPlaneSelector: + labelSelectors: + - matchExpressions: + - { key: environment, operator: In, values: [production,staging] } +``` + +You can also specify the names of control planes directly: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: my-backup-schedule +spec: + controlPlaneSelector: + names: + - controlplane-dev + - controlplane-staging + - controlplane-prod +``` + + +## Configure a Shared Backup + + + +[SharedBackup][sharedbackup] is a [group-scoped][group-scoped-2] resource. You should create them in a group containing one or more control planes. This resource causes a backups to occur for control planes within its corresponding group. + +Below is an example of a Shared Backup that takes a backup of all control planes having `environment: production` labels: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup + namespace: default +spec: + configRef: + kind: SharedBackupConfig + name: default + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +### Exclude resources from the backup + +The `spec.excludedResources` field is an array of resource names to exclude from each backup. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup +spec: + excludedResources: + - "xclusters.aws.platformref.upbound.io" + - "xdatabase.aws.platformref.upbound.io" + - "xrolepolicyattachment.iam.aws.crossplane.io" +``` + +:::warning +You must specify resource names in lowercase "resource.group" format (for example, `xclusters.aws.platformref.upbound.io`). Using only the resource kind (for example, `XCluster`) isn't supported. +::: + +### Set the time to live + +Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup +spec: + ttl: 168h # Backup is garbage collected after 7 days +``` + + +### Garbage collect backups on Shared Backup deletion + + + +Set the `spec.useOwnerReferencesInBackup` to define whether to garbage collect associated backups when a shared backup gets deleted. If set to true, backups are garbage collected when the shared backup gets deleted. + +### Control plane selection + +To configure which control planes in a group you want to create a backup for, use the `spec.controlPlaneSelector` field. You can either use `labelSelectors` or the `names` of a control plane directly. A control plane matches if any of the label selectors match. + +This example matches all control planes in the group that have `environment: production` as a label: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +You can use the more complex `matchExpressions` to match labels based on an expression. This example matches control planes that have label `environment: production` or `environment: staging`: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup +spec: + controlPlaneSelector: + labelSelectors: + - matchExpressions: + - { key: environment, operator: In, values: [production,staging] } +``` + +You can also specify the names of control planes directly: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup +spec: + controlPlaneSelector: + names: + - controlplane-dev + - controlplane-staging + - controlplane-prod +``` + +## Create a manual backup + +[Backup][backup] is a [group-scoped][group-scoped-3] resource that causes a single backup to occur for a control planes in its corresponding group. + +Below is an example of a manual Backup of a control plane: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: Backup +metadata: + name: my-backup + namespace: default +spec: + configRef: + kind: SharedBackupConfig + name: default + controlPlane: my-awesome-ctp + deletionPolicy: Delete +``` + +The backup specification `DeletionPolicy` defines backup deletion actions, +including the deletion of the backup file from the bucket. The `Deletion Policy` +value defaults to `Orphan`. Set it to `Delete` to remove uploaded files +in the bucket. +For more information on the backup and restore process, review the [Spaces API documentation][spaces-api-documentation-1]. + + +### Choose a control plane to backup + +The `spec.controlPlane` field defines which control plane to execute a backup against. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: Backup +metadata: + name: my-backup + namespace: default +spec: + controlPlane: my-awesome-ctp +``` + +If the control plane doesn't exist, the backup fails after multiple failed retry attempts. + +### Exclude resources from the backup + +The `spec.excludedResources` field is an array of resource names to exclude from the manual backup. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: Backup +metadata: + name: my-backup +spec: + excludedResources: + - "xclusters.aws.platformref.upbound.io" + - "xdatabase.aws.platformref.upbound.io" + - "xrolepolicyattachment.iam.aws.crossplane.io" +``` + +:::warning +You must specify resource names in lowercase "resource.group" format (for example, `xclusters.aws.platformref.upbound.io`). Using only the resource kind (for example, `XCluster`) isn't supported. +::: + +### Set the time to live + +Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: Backup +metadata: + name: my-backup +spec: + ttl: 168h # Backup is garbage collected after 7 days +``` + +## Restore a control plane from a backup + +You can restore a control plane's state from a backup. Below is an example of creating a new control plane from a previous backup called `restore-me`: + + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: my-awesome-restored-ctp + namespace: default +spec: + restore: + source: + kind: Backup + name: restore-me +``` + + +[group-scoped]: /spaces/concepts/groups +[group-scoped-1]: /spaces/concepts/groups +[group-scoped-2]: /spaces/concepts/groups +[group-scoped-3]: /spaces/concepts/groups +[sharedbackupconfig]: /reference/apis/spaces-api/latest +[thanos-object-storage]: https://thanos.io/tip/thanos/storage.md/ +[sharedbackupschedule]: /reference/apis/spaces-api/latest +[cron-formatted]: https://en.wikipedia.org/wiki/Cron +[spaces-api-documentation]: /reference/apis/spaces-api/v1_9 +[sharedbackup]: /reference/apis/spaces-api/latest +[backup]: /reference/apis/spaces-api/latest +[spaces-api-documentation-1]: /reference/apis/spaces-api/v1_9 + + + diff --git a/spaces_versioned_docs/version-v1.13/howtos/cloud-spaces/_category_.json b/spaces_versioned_docs/version-v1.13/howtos/cloud-spaces/_category_.json new file mode 100644 index 000000000..1e1869a38 --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/cloud-spaces/_category_.json @@ -0,0 +1,10 @@ +{ + "label": "Cloud Spaces", + "position": 1, + "collapsed": true, + "customProps": { + "plan": "standard" + } +} + + diff --git a/spaces_versioned_docs/version-v1.13/howtos/cloud-spaces/dedicated-spaces-deployment.md b/spaces_versioned_docs/version-v1.13/howtos/cloud-spaces/dedicated-spaces-deployment.md new file mode 100644 index 000000000..ebad9493e --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/cloud-spaces/dedicated-spaces-deployment.md @@ -0,0 +1,33 @@ +--- +title: Dedicated Spaces +sidebar_position: 4 +description: A guide to Upbound Dedicated Spaces +plan: business +--- + + +## Benefits + +Dedicated Spaces offer the following benefits: + +- **Single-tenancy** A control plane space where Upbound guarantees you're the only tenant operating in the environment. +- **Connectivity to your private network** Establish secure network connections between your Dedicated Cloud Space running in Upbound and your own resources behind your private network. +- **Reduced Overhead.** Offload day-to-day operational burdens to Upbound while focusing on your job of building your platform. + +## Architecture + +A Dedicated Space is a deployment of the Upbound Spaces software inside an +Upbound-controlled cloud account and network. The control planes you run. + +The diagram below illustrates the high-level architecture of Upbound Dedicated Spaces: + +![Upbound Managed Spaces arch](/img/managed-arch-gcp.png) + +## How to get access to Dedicated Spaces + +If you have an interest in Upbound Dedicated Spaces, contact +[Upbound][contact-us]. We can chat more about your +requirements and see if Dedicated Spaces are a good fit for you. + +[contact-us]: https://www.upbound.io/contact-us +[managed-space]: /spaces/howtos/self-hosted/managed-spaces-deployment diff --git a/spaces_versioned_docs/version-v1.13/howtos/cloud-spaces/gitops-on-upbound.md b/spaces_versioned_docs/version-v1.13/howtos/cloud-spaces/gitops-on-upbound.md new file mode 100644 index 000000000..fa59a8dce --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/cloud-spaces/gitops-on-upbound.md @@ -0,0 +1,318 @@ +--- +title: GitOps with Upbound Control Planes +sidebar_position: 80 +description: An introduction to doing GitOps with control planes on Upbound Cloud Spaces +tier: "business" +--- + +:::info Deployment Model +This guide applies to **Upbound Cloud Spaces** (Dedicated and Managed Spaces). For self-hosted Spaces deployments, see [GitOps with ArgoCD in Self-Hosted Spaces](/spaces/howtos/self-hosted/gitops-with-argocd/). +::: + +GitOps is an approach for managing a system by declaratively describing desired resources' configurations in Git and using controllers to realize the desired state. Upbound's control planes are compatible with this pattern and it's strongly recommended you integrate GitOps in the platforms you build on Upbound. + + +## Integrate with Argo CD + + +[Argo CD][argo-cd] is a project in the Kubernetes ecosystem commonly used for GitOps. You can use it in tandem with Upbound control planes to achieve GitOps flows. The sections below explain how to integrate these tools with Upbound. + +### Generate a kubeconfig for your control plane + +Use the up CLI to [generate a kubeconfig][generate-a-kubeconfig] for your control plane. + +```bash +up ctx /// -f - > context.yaml +``` + +### Create an API token + + +You need a personal access token (PAT). You create PATs on a per-user basis in the Upbound Console. Go to [My Account - API tokens][my-account-api-tokens] and select Create New Token. Give the token a name and save the secret value to somewhere safe. + + +### Add the up CLI init container to Argo + +Create a new file called `up-plugin-values.yaml` and paste the following YAML: + +```yaml +controller: + volumes: + - name: up-plugin + emptyDir: {} + - name: up-home + emptyDir: {} + + volumeMounts: + - name: up-plugin + mountPath: /usr/local/bin/up + subPath: up + - name: up-home + mountPath: /home/argocd/.up + + initContainers: + - name: up-plugin + image: xpkg.upbound.io/upbound/up-cli:v0.39.0 + command: ["cp"] + args: + - /usr/local/bin/up + - /plugin/up + volumeMounts: + - name: up-plugin + mountPath: /plugin + +server: + volumes: + - name: up-plugin + emptyDir: {} + - name: up-home + emptyDir: {} + + volumeMounts: + - name: up-plugin + mountPath: /usr/local/bin/up + subPath: up + - name: up-home + mountPath: /home/argocd/.up + + initContainers: + - name: up-plugin + image: xpkg.upbound.io/upbound/up-cli:v0.39.0 + command: ["cp"] + args: + - /usr/local/bin/up + - /plugin/up + volumeMounts: + - name: up-plugin + mountPath: /plugin +``` + +### Install or upgrade Argo using the values file + +Install or upgrade Argo via Helm, including the values from the `up-plugin-values.yaml` file: + +```bash +helm upgrade --install -n argocd -f up-plugin-values.yaml --reuse-values argocd argo/argo-cd +``` + + +### Configure Argo CD + + +To configure Argo CD for Annotation resource tracking, edit the Argo CD ConfigMap in the Argo CD namespace. +Add `application.resourceTrackingMethod: annotation` to the data section as below. +This configuration turns off Argo CD auto pruning, preventing the deletion of Crossplane resources. + +Next, configure the [auto respect RBAC for the Argo CD controller][auto-respect-rbac-for-the-argo-cd-controller]. +By default, Argo CD attempts to discover some Kubernetes resource types that don't exist in a control plane. +You must configure Argo CD to respect the cluster's RBAC rules so that Argo CD can sync. +Add `resource.respectRBAC: normal` to the data section as below. + +```bash +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-cm +data: + ... + application.resourceTrackingMethod: annotation + resource.respectRBAC: normal +``` + +:::tip +The `resource.respectRBAC` configuration above tells Argo to respect RBAC for _all_ cluster contexts. If you're using an Argo CD instance to manage more than only control planes, you should consider changing the `clusters` string match for the configuration to apply only to control planes. For example, if every control plane context name followed the convention of being named `controlplane-`, you could set the string match to be `controlplane-*` +::: + + +### Create a cluster context definition + + +Replace the variables and run the following script to configure a new Argo cluster context definition. + +To configure Argo for a control plane in a Connected Space, replace `stringData.server` with the ingress URL of the control plane. This URL is what's outputted when using `up ctx`. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: my-control-plane + namespace: argocd + labels: + argocd.argoproj.io/secret-type: cluster +type: Opaque +stringData: + name: my-control-plane-context + server: https://.spaces.upbound.io/apis/spaces.upbound.io/v1beta1/namespaces//controlplanes//k8s + config: | + { + "execProviderConfig": { + "apiVersion": "client.authentication.k8s.io/v1", + "command": "up", + "args": [ "org", "token" ], + "env": { + "ORGANIZATION": "", + "UP_TOKEN": "" + } + }, + "tlsClientConfig": { + "insecure": false, + "caData": "" + } + } +``` + + +## GitOps for Upbound resources + + +Like any other cloud service, you can drive the lifecycle of Upbound Cloud resources with Crossplane. This lets you establish GitOps flows to declaratively create and manage: + +- [control plane groups][control-plane-groups] +- [control planes][control-planes] +- [Upbound IAM resources][upbound-iam-resources] + +Use a control plane installed with [provider-upbound][provider-upbound] and [provider-kubernetes][provider-kubernetes] to achieve this. + +### Provider-upbound + +[Provider-upbound][provider-upbound-2] is a Crossplane provider built by Upbound to interact with Upbound resources. use _provider-upbound_ to declaratively create and manage the lifecycle of IAM resources and repositories: + +- [Robots][robots] and their membership to teams +- [Teams][teams] +- [Repositories][repositories] and [permissions][permissions] on those repositories. + +:::tip +This provider defines managed resources for control planes, their auth, and permissions. These resources only applicable for customers who run in Upbound's **Legacy Spaces** control plane hosting environments. Customers should use provider-kubernetes explained below to manage the lifecycle of control planes with Crossplane. +::: + +### Provider-kubernetes + +[Provider-kubernetes][provider-kubernetes-3] is a Crossplane provider that defines an [Object][object] resource. Use _Objects_ as general-purpose resources to wrap _any_ Kubernetes resource for Crossplane to manage. + +Upbound [Space APIs][space-apis] are Kube-like APIs and have implemented support for most Kubernetes-style API concepts. You can use kubectl or any other Kubernetes-compatible tooling to interact with the API. This means you can use _provider-kubernetes_ to drive interactions with Space APIs. + +:::warning +When interacting with a Cloud Space's API, the Kubernetes [watch][watch] feature **isn't implemented.** Argo CD requires _watch_ support to function as expected, meaning you can't point Argo directly at a Cloud Space until it's implemented. +::: + +Use _provider-kubernetes_ to declaratively drive interactions with all [Space APIs][space-apis-1]. Wrap the desired API resource in an _Object_. See the example below for a control plane: + +```yaml +apiVersion: kubernetes.crossplane.io/v1alpha2 +kind: Object +metadata: + name: my-controlplane +spec: + forProvider: + manifest: + apiVersion: spaces.upbound.io/v1beta1 + kind: ControlPlane + metadata: + name: my-controlplane + namespace: default + spec: + crossplane: + autoUpgrade: + channel: Rapid +``` + +[Control plane groups][control-plane-groups-2] are a special case because they technically map to an underlying Kubernetes namespace. You should create a `kind: namespace` with the `spaces.upbound.io/group` label to create a control plane group in a Space. See the example below: + +```yaml +apiVersion: kubernetes.crossplane.io/v1alpha2 +kind: Object +metadata: + name: group1 +spec: + forProvider: + manifest: + apiVersion: v1 + kind: Namespace + metadata: + name: group1 + labels: + spaces.upbound.io/group: "true" + spec: {} +``` + +### Configure auth for provider-kubernetes + +Like any other Crossplane provider, _provider-kubernetes_ requires a valid [ProviderConfig][providerconfig] to authenticate with Upbound before interacting with its APIs. Follow the steps below to configure auth for a ProviderConfig on a control plane that you want to use to interact with Upbound resources. + +1. Define an environment variable for the name of your Upbound org account. Use `up org list` to retrieve this value. +```ini +export UPBOUND_ACCOUNT="" +``` + +2. Create a [personal access token][personal-access-token] and store it as an environment variable. +```shell +export UPBOUND_TOKEN="" +``` + +3. Log on to Upbound. +```shell +up login +``` + +4. Create a kubeconfig for the desired Cloud Space instance you want to interact with. +```shell +export CONTROLPLANE_CONFIG=/tmp/controlplane-kubeconfig +KUBECONFIG=$CONTROLPLANE_CONFIG up ctx $UPBOUND_ACCOUNT/upbound-gcp-us-west-1 # Replace this path with whichever Cloud Space you want to communicate with. +``` + +5. On the control plane you want to use to interact with Upbound resources, create a secret containing the credentials: +```shell +kubectl -n crossplane-system create secret generic cluster-config --from-file=kubeconfig=$CONTROLPLANE_CONFIG +kubectl -n crossplane-system create secret generic upbound-credentials --from-literal=token=$UPBOUND_TOKEN +``` + +6. Create a ProviderConfig that references the credentials created in the prior step. Create this resource in your control plane: +```yaml +apiVersion: kubernetes.crossplane.io/v1alpha1 +kind: ProviderConfig +metadata: + name: default +spec: + credentials: + source: Secret + secretRef: + namespace: crossplane-system + name: cluster-config + key: kubeconfig + identity: + type: UpboundTokens + source: Secret + secretRef: + name: upbound-credentials + namespace: crossplane-system + key: token +``` + +You can now create _Objects_ in the control plane which wrap Space APIs. + +[generate-a-kubeconfig]: /manuals/cli/concepts/contexts +[control-plane-groups]: /spaces/concepts/groups +[control-planes]: /spaces/concepts/control-planes +[upbound-iam-resources]: /manuals/platform/concepts/identity-management +[space-apis]: /reference/apis/spaces-api/v1_9 +[space-apis-1]: /reference/apis/spaces-api/v1_9 +[control-plane-groups-2]: /spaces/concepts/groups + + +[argo-cd]: https://argo-cd.readthedocs.io/en/stable/ +[my-account-api-tokens]: https://accounts.upbound.io/settings/tokens +[auto-respect-rbac-for-the-argo-cd-controller]: https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#auto-respect-rbac-for-controller +[spec-writeconnectionsecrettoref]: /reference/apis/spaces-api/latest +[auto-respect-rbac-for-the-argo-cd-controller-1]: https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#auto-respect-rbac-for-controller +[provider-upbound]: https://marketplace.upbound.io/providers/upbound/provider-upbound +[provider-kubernetes]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes +[provider-upbound-2]: https://marketplace.upbound.io/providers/upbound/provider-upbound +[robots]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/iam.upbound.io/Robot/v1alpha1 +[teams]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/iam.upbound.io/Team/v1alpha1 +[repositories]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/repository.upbound.io/Repository/v1alpha1 +[permissions]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/repository.upbound.io/Permission/v1alpha1 +[provider-kubernetes-3]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes +[object]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes/v0.17.0/resources/kubernetes.crossplane.io/Object/v1alpha2 +[watch]: https://kubernetes.io/docs/reference/using-api/api-concepts/#watch-bookmarks +[providerconfig]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes/v0.17.0/resources/kubernetes.crossplane.io/ProviderConfig/v1alpha1 +[personal-access-token]: https://accounts.upbound.io/settings/tokens diff --git a/spaces_versioned_docs/version-v1.13/howtos/control-plane-topologies.md b/spaces_versioned_docs/version-v1.13/howtos/control-plane-topologies.md new file mode 100644 index 000000000..9020e5a41 --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/control-plane-topologies.md @@ -0,0 +1,566 @@ +--- +title: Control Plane Topologies +sidebar_position: 15 +description: Configure scheduling of composites to remote control planes +--- + +:::info API Version Information +This guide is for the Control Plane Topology feature, which is in **private preview**. interested customers with access to this feature, it applies to v1.12+. + +For related API specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). details on feature availability, see the . +::: + +:::important +This feature is in private preview for select customers in Upbound Spaces. If you're interested in this deployment mode, please [contact us](https://www.upbound.io/support/contact). +::: + +Upbound's _Control Plane Topology_ feature lets you build and deploy a platform +of multiple control planes. These control planes work together for a unified platform +experience. + + +With the _Topology_ feature, you can install resource APIs that are +reconciled by other control planes and configure the routing that occurs between +control planes. You can also build compositions that reference other resources +running on your control plane or elsewhere in Upbound. + +This guide explains how to use Control Plane Topology APIs to install, configure +remote APIs, and build powerful compositions that reference other resources. + +## Benefits + +The Control Plane Topology feature provides the following benefits: + +* Decouple your platform architecture into independent offerings to improve your platform's software development lifecycle. +* Install composite APIs from Configurations as CRDs which are fulfilled and reconciled by other control planes. +* Route APIs to other control planes by configuring an _Environment_ resource, which define a set of routable dimensions. + +## How it works + + +Imagine the scenario where you want to let a user reference a subnet when creating a database instance. To your control plane, the `kind: database` and `kind: subnet` are independent resources. To you as the composition author, these resources have an important relationship. It may be that: + +- you don't want your user to ever be able to create a database without specifying a subnet. +- you want to let them create a subnet when they create the database, if it doesn't exist. +- you want to allow them to reuse a subnet that got created elsewhere or gets shared by another user. + +In each of these scenarios, you must resort to writing complex composition logic +to handle each case. The problem is compounded when the resource exists in a +context separate from the current control plane's context. Imagine a scenario +where one control plane manages Database resources and a second control plane +manages networking resources. With the _Topology_ feature, you can offload these +concerns to Upbound machinery. + + +![Control Plane Topology feature arch](/img/topology-arch.png) + +## Prerequisites + +Enable the Control Plane Topology feature in the Space you plan to run your control plane in: + +- Cloud Spaces: Not available yet +- Connected Spaces: Space administrator must enable this feature +- Disconnected Spaces: Space administrator must enable this feature + + + +## Compose resources with _ReferencedObjects_ + + + +_ReferencedObject_ is a resource type available in an Upbound control plane that lets you reference other Kubernetes resources in Upbound. + +:::tip +This feature is useful for composing resources that exist in a +remote context, like another control plane. You can also use +_ReferencedObjects_ to resolve references to any other Kubernetes object +in the current control plane context. This could be a secret, another Crossplane +resource, or more. +::: + +### Declare the resource reference in your XRD + +To compose a _ReferencedObject_, you should start by adding a resource reference +in your Composite Resource Definition (XRD). The convention for the resource +reference follows the shape shown below: + +```yaml +Ref: + type: object + properties: + apiVersion: + type: string + default: "" + enum: [ "" ] + kind: + type: string + default: "" + enum: [ "" ] + grants: + type: array + default: [ "Observe" ] + items: + type: string + enum: [ "Observe", "Create", "Update", "Delete", "*" ] + name: + type: string + namespace: + type: string + required: + - name +``` + +The `Ref` should be the kind of resource you want to reference. The `apiVersion` and `kind` should be the associated API version and kind of the resource you want to reference. + +The `name` and `namespace` strings are inputs that let your users specify the resource instance. + +#### Grants + +The `grants` field is a special array that lets you give users the power to influence the behavior of the referenced resource. You can configure which of the available grants you let your user select and which it defaults to. Similar in behavior as [Crossplane management policies][crossplane-management-policies], each grant value does the following: + +- **Observe:** The composite may observe the state of the referenced resource. +- **Create:** The composite may create the referenced resource if it doesn't exist. +- **Update:** The composite may update the referenced resource. +- **Delete:** The composite may delete the referenced resource. +- **\*:** The composite has full control over the referenced resource. + +Here are some examples that show how it looks in practice: + +
+ +Show example for defining the reference to another composite resource + +```yaml +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xsqlinstances.database.platform.upbound.io +spec: + type: object + properties: + parameters: + type: object + properties: + networkRef: + type: object + properties: + apiVersion: + type: string + default: "networking.platform.upbound.io" + enum: [ "networking.platform.upbound.io" ] + grants: + type: array + default: [ "Observe" ] + items: + type: string + enum: [ "Observe" ] + kind: + type: string + default: "Network" + enum: [ "Network" ] + name: + type: string + namespace: + type: string + required: + - name +``` + +
+ + +
+Show example for defining the reference to a secret +```yaml +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xsqlinstances.database.platform.upbound.io +spec: + type: object + properties: + parameters: + type: object + properties: + secretRef: + type: object + properties: + apiVersion: + type: string + default: "v1" + enum: [ "v1" ] + grants: + type: array + default: [ "Observe" ] + items: + type: string + enum: [ "Observe", "Create", "Update", "Delete", "*" ] + kind: + type: string + default: "Secret" + enum: [ "Secret" ] + name: + type: string + namespace: + type: string + required: + - name +``` +
+ +### Manually add the jsonPath + +:::important +This step is a known limitation of the preview. We're working on tooling that +removes the need for authors to do this step. +::: + +During the preview timeframe of this feature, you must add an annotation by hand +to the XRD. In your XRD's `metadata.annotations`, set the +`references.upbound.io/schema` annotation. It should be a JSON string in the +following format: + +```json +{ + "apiVersion": "references.upbound.io/v1alpha1", + "kind": "ReferenceSchema", + "references": [ + { + "jsonPath": ".spec.parameters.secretRef", + "kinds": [ + { + "apiVersion": "v1", + "kind": "Secret" + } + ] + } + ] +} +``` + +Flatten this JSON into a string and set the annotation on your XRD. View the +example below for an illustration: + +
+Show example setting the references.upbound.io/schema annotation +```yaml +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xthings.networking.acme.com + annotations: + references.upbound.io/schema: '{"apiVersion":"references.upbound.io/v1alpha1","kind":"ReferenceSchema","references":[{"jsonPath":".spec.secretRef","kinds":[{"apiVersion":"v1","kind":"Secret"}]},{"jsonPath":".spec.configMapRef","kinds":[{"apiVersion":"v1","kind":"ConfigMap"}]}]}' +``` +
+ +
+Show example for setting multiples references in the references.upbound.io/schema annotation +```yaml +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xthings.networking.acme.com + annotations: + references.upbound.io/schema: '{"apiVersion":"references.upbound.io/v1alpha1","kind":"ReferenceSchema","references":[{"jsonPath":".spec.parameters.secretRef","kinds":[{"apiVersion":"v1","kind":"Secret"}]},{"jsonPath":".spec.parameters.configMapRef","kinds":[{"apiVersion":"v1","kind":"ConfigMap"}]}]}' +``` +
+ + +You can use a VSCode extension like [vscode-pretty-json][vscode-pretty-json] to make this task easier. + + +### Compose a _ReferencedObject_ + +To pair with the resource reference declared in your XRD, you must compose the referenced resource. Use the _ReferencedObject_ resource type to bring the resource into your composition. _ReferencedObject_ has the following schema: + +```yaml +apiVersion: references.upbound.io/v1alpha1 +kind: ReferencedObject +spec: + managementPolicies: + - Observe + deletionPolicy: Orphan + composite: + apiVersion: + kind: + name: + jsonPath: .spec.parameters.secretRef +``` + +The `spec.composite.apiVersion` and `spec.composite.kind` should match the API version and kind of the `compositeTypeRef` declared in your composition. The `spec.composite.name` should be the name of the composite resource instance. + +The `spec.composite.jsonPath` should be the path to the root of the resource ref you declared in your XRD. + +
+Show example for composing a resource reference to a secret + +```yaml +apiVersion: apiextensions.crossplane.io/v1 +kind: Composition +metadata: + name: demo-composition +spec: + compositeTypeRef: + apiVersion: networking.acme.com/v1alpha1 + kind: XThing + mode: Pipeline + pipeline: + - step: patch-and-transform + functionRef: + name: crossplane-contrib-function-patch-and-transform + input: + apiVersion: pt.fn.crossplane.io/v1beta1 + kind: Resources + resources: + - name: secret-ref-object + base: + apiVersion: references.upbound.io/v1alpha1 + kind: ReferencedObject + spec: + managementPolicies: + - Observe + deletionPolicy: Orphan + composite: + apiVersion: networking.acme.com/v1alpha1 + kind: XThing + name: TO_BE_PATCHED + jsonPath: .spec.parameters.secretRef + patches: + - type: FromCompositeFieldPath + fromFieldPath: metadata.name + toFieldPath: spec.composite.name +``` +
+ +By declaring a resource reference in your XRD, Upbound handles resolution of the desired resource. + +## Deploy APIs + +To configure routing resource requests between control planes, you need to deploy APIs in at least two control planes. + +### Deploy into a service-level control plane + +Package the APIs you build into a Configuration package an deploy it on a +control plane in an Upbound Space. In Upbound, it's common to refer to the +control plane where the Configuration package is deployed as a **service-level +control plane**. This control plane runs the controllers that processes the API +requests and provisions underlying resources. In a later section, you learn how +you can use _Topology_ features to [configure routing][configure-routing]. + +### Deploy as Remote APIs on a platform control plane + +You should use the same package source as deployed in the **service-level +control planes**, but this time deploy the Configuration in a separate control +plane as a _RemoteConfiguration_. The _RemoteConfiguration_ installs Kubernetes +CustomResourceDefinitions for the APIs defined in the Configuration package, but +no controllers get deployed. + +### Install a _RemoteConfiguration_ + +_RemoteConfiguration_ is a resource type available in an Upbound manage control +planes that acts like a sort of Crossplane [Configuration][configuration] +package. Unlike standard Crossplane Configurations, which install XRDs, +compositions, and functions into a desired control plane, _RemoteConfigurations_ +install only the CRDs for claimable composite resource types. + +#### Install directly + +Install a _RemoteConfiguration_ by defining the following and applying it to +your control plane: + +```yaml +apiVersion: pkg.upbound.io/v1alpha1 +kind: RemoteConfiguration +metadata: + name: +spec: + package: +``` + +#### Declare as a project dependency + +You can declare _RemoteConfigurations_ as dependencies in your control plane's +[project file][project-file]. Use the up CLI to add the dependency, providing +the `--remote` flag: + +```tsx live +up dep add --remote +``` + +This command adds a declaration in the `spec.apiDependencies` stanza of your +project's `upbound.yaml` as demonstrated below: + +```yaml +apiVersion: meta.dev.upbound.io/v1alpha1 +kind: Project +metadata: + name: service-controlplane +spec: + apiDependencies: + - configuration: xpkg.upbound.io/upbound/remote-configuration + version: '>=v0.0.0' + dependsOn: + - provider: xpkg.upbound.io/upbound/provider-kubernetes + version: '>=v0.0.0' +``` + +Like a Configuration, a _RemoteConfigurationRevision_ gets created when the +package gets installed on a control plane. Unlike Configurations, XRDs and +compositions **don't** get installed by a _RemoteConfiguration_. Only the CRDs +for claimable composite types get installed and Crossplane thereafter manages +their lifecycle. You can tell when a CRD gets installed by a +_RemoteConfiguration_ because it has the `internal.scheduling.upbound.io/remote: +true` label: + +```yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: things.networking.acme.com + labels: + internal.scheduling.upbound.io/remote: "true" +``` + +## Use an _Environment_ to route resources + +_Environment_ is a resource type available in Upbound control planes that works +in tandem with resources installed by _RemoteConfigurations_. _Environment_ is a +namespace-scoped resource that lets you configure how to route remote resources +to other control planes by a set of user-defined dimensions. + +### Define a routing dimension + +To establish a routing dimensions between two control planes, you must do two +things: + +1. Annotate the service control plane with the name and value of a dimension. +2. Configure an environment on another control plane with a dimension matching the field and value of the service control plane. + +The example below demonstrates the creation of a service control plane with a +`region` dimension: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + labels: + dimension.scheduling.upbound.io/region: "us-east-1" + name: prod-1 + namespace: default +spec: +``` + +Upbound's Spaces controller keeps an inventory of all declared dimensions and +listens for control planes to route to them. + +### Create an _Environment_ + +Next, create an _Environment_ on a separate control plane, referencing the +dimension from before. The example below demonstrates routing all remote +resource requests in the `default` namespace of the control plane based on a +single `region` dimension: + +```yaml +apiVersion: scheduling.upbound.io/v1alpha1 +kind: Environment +metadata: + name: default + namespace: default +spec: + dimensions: + region: us-east-1 +``` + +You can specify whichever dimensions as you want. The example below demonstrates +multiple dimensions: + +```yaml +apiVersion: scheduling.upbound.io/v1alpha1 +kind: Environment +metadata: + name: default + namespace: default +spec: + dimensions: + region: us-east-1 + env: prod + offering: databases +``` + +In order for the routing controller to match, _all_ dimensions must match for a +given service control plane. + +You can specify dimension overrides on a per-resource group basis. This lets you +configure default routing rules for a given _Environment_ and override routing +on a per-offering basis. + +```yaml +apiVersion: scheduling.upbound.io/v1alpha1 +kind: Environment +metadata: + name: default + namespace: default +spec: + dimensions: + region: us-east-1 + resourceGroups: + - name: database.platform.upbound.io # database + dimensions: + region: "us-east-1" + env: "prod" + offering: "databases" + - name: networking.platform.upbound.io # networks + dimensions: + region: "us-east-1" + env: "prod" + offering: "networks" +``` + +### Confirm the configured route + +After you create an _Environment_ on a control plane, the routes selected get +reported in the _Environment's_ `.status.resourceGroups`. This is illustrated +below: + +```yaml +apiVersion: scheduling.upbound.io/v1alpha1 +kind: Environment +metadata: + name: default +... +status: + resourceGroups: + - name: database.platform.upbound.io # database + proposed: + controlPlane: ctp-1 + group: default + space: upbound-gcp-us-central1 + dimensions: + region: "us-east-1" + env: "prod" + offering: "databases" +``` + +If you don't see a response in the `.status.resourceGroups`, this indicates a +match wasn't found or an error establishing routing occurred. + +:::tip +There's no limit to the number of control planes you can route to. You can also +stack routing and form your own topology of control planes, with multiple layers +of routing. +::: + +### Limitations + + +Routing from one control plane to another is currently scoped to control planes +that exist in a single Space. You can't route resource requests to control +planes that exist on a cross-Space boundary. + + +[project-file]: /manuals/cli/howtos/project +[contact-us]: https://www.upbound.io/usage/support/contact +[crossplane-management-policies]: https://docs.crossplane.io/latest/managed-resources/managed-resources/#managementpolicies +[vscode-pretty-json]: https://marketplace.visualstudio.com/items?itemName=chrismeyers.vscode-pretty-json +[configure-routing]: #use-an-environment-to-route-resources +[configuration]: https://docs.crossplane.io/latest/packages/providers diff --git a/spaces_versioned_docs/version-v1.13/howtos/ctp-connector.md b/spaces_versioned_docs/version-v1.13/howtos/ctp-connector.md new file mode 100644 index 000000000..b2cc48c49 --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/ctp-connector.md @@ -0,0 +1,508 @@ +--- +title: Control Plane Connector +weight: 80 +description: A guide for how to connect a Kubernetes app cluster to a control plane in Upbound using the Control Plane connector feature +plan: "standard" +--- + + + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). Control Plane Connector functionality is consistent across versions. + +For related API specifications and resources, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +Control Plane Connector connects arbitrary Kubernetes application clusters outside the +Upbound Spaces to your control planes running in Upbound Spaces. +This lets you interact with your control plane's API from the app cluster. The claim APIs and the namespaced XR APIs +you define via CompositeResourceDefinitions (XRDs) in the control plane, are available in +your app cluster alongside Kubernetes workload APIs like Pod. Control Plane Connector +enables the same experience as a locally installed Crossplane. + +![control plane connector operations flow](/img/ConnectorFlow.png) + +### Control Plane Connector operations + +Control Plane Connector leverages the [Kubernetes API AggregationLayer][kubernetes-api-aggregationlayer] +to create an extension API server and serve the claim APIs and the namespaced XR APIs in the control plane. It +discovers the claim APIs and the namespaced XR APIs available in the control plane and registers corresponding +APIService resources on the app cluster. Those APIService resources refer to the +extension API server of Control Plane Connector. + +The claim APIs and the namespaced XR APIs are available in your Kubernetes cluster, just like all native +Kubernetes APIs. + +The Control Plane Connector processes every request targeting the claim APIs and the namespaced XR APIs and makes the +relevant requests to the connected control plane. + +Only the connected control plane stores and processes all claims and namespaced XRs created in the app +cluster, eliminating any storage use at the application cluster. The control plane +connector provisions a target namespace at the control plane for the app cluster and stores +all claims and namespaced XRs in this target namespace. + +For managing the claims and namespaced XRs, the Control Plane Connector creates a unique identifier for a +resource by combining input parameters from claims, including: +- `metadata.name` +- `metadata.namespace` +- `your cluster name` + + +It employs SHA-256 hashing to generate a hash value and then extracts the first +16 characters of that hash. This ensures the resulting identifier remains within +the 64-character limit in Kubernetes. + + + +For instance, if a claim named `my-bucket` exists in the test namespace in +`cluster-dev`, the system calculates the SHA-256 hash from +`my-bucket-x-test-x-00000000-0000-0000-0000-000000000000` and takes the first 16 +characters. The control plane side then names the claim `claim-c603e518969b413e`. + +For namespaced XRs, the process is similar, only the prefix is different. +The name becomes `nxr-c603e518969b413e`. + + +### Installation + + + + + +Log in with the up CLI: + +```bash +up login +``` + +Connect your app cluster to a namespace in an Upbound control plane with `up controlplane connector install `. This command creates a user token and installs the Control Plane Connector to your cluster. It's recommended you create a values file called `connector-values.yaml` and provide the following below. Select the tab according to which environment your control plane is running in. + + + + + + +```yaml +upbound: + # This is your org account in Upbound e.g. the name displayed after executing `up org list` + account: + # This is a personal access token generated in the Upbound Console + token: + +spaces: + # If your control plane is running in Upbound's GCP Cloud Space, else use upbound-aws-us-east-1.spaces.upbound.io + host: "upbound-gcp-us-west-1.spaces.upbound.io" + insecureSkipTLSVerify: true + controlPlane: + # The name of the control plane you want the Connector to attach to + name: + # The control plane group the control plane resides in + group: + # The namespace within the control plane to sync claims from the app cluster to. NOTE: This must be created before you install the connector. + claimNamespace: +``` + + + + + +1. Create a [kubeconfig][kubeconfig] for the control plane. Update your Upbound context to the path for your desired control plane. +```ini +up login +up ctx /upbound-gcp-us-central-1/default/your-control-plane +up ctx . -f - > context.yaml +``` + +2. Write it to a secret in the cluster where you plan to +install the Control Plane Connector to. +```ini +kubectl create secret generic my-controlplane-kubeconfig --from-file=context.yaml +``` + +3. Reference this secret in the +`spaces.controlPlane.kubeconfigSecret` field below. + +```yaml +spaces: + controlPlane: + # The namespace within the control plane to sync claims from the app cluster to. NOTE: This must be created before you install the connector. + claimNamespace: + kubeconfigSecret: + name: my-controlplane-kubeconfig + key: kubeconfig +``` + + + + + + +Provide the values file above when you run the CLI command: + + +```bash {copy-lines="3"} +up controlplane connector install my-control-plane my-app-ns-1 --file=connector-values.yaml +``` + +The Claim APIs and the namespaced XR APIs from your control plane are now visible in the cluster. +You can verify this with `kubectl api-resources`. + +```bash +kubectl api-resources +``` + +### Uninstall + +Disconnect an app cluster that you prior installed the Control Plane Connector on by +running the following: + +```bash +up ctp connector uninstall +``` + +This command uninstalls the helm chart for the Control Plane Connector from an app +cluster. It moves any claims in the app cluster into the control plane +at the specified namespace. + +:::tip +Make sure your kubeconfig's current context is pointed at the app cluster where +you want to uninstall Control Plane Connector from. +::: + + + + +It's recommended you create a values file called `connector-values.yaml` and +provide the following below. Select the tab according to which environment your +control plane is running in. + + + + + + +```yaml +upbound: + # This is your org account in Upbound e.g. the name displayed after executing `up org list` + account: + # This is a personal access token generated in the Upbound Console + token: + +spaces: + # Upbound GCP US-West-1 upbound-gcp-us-west-1.spaces.upbound.io + # Upbound AWS US-East-1 upbound-aws-us-east-1.spaces.upbound.io + # Upbound GCP US-Central-1 upbound-gcp-us-central-1.spaces.upbound.io + host: "" + insecureSkipTLSVerify: true + controlPlane: + # The name of the control plane you want the Connector to attach to + name: + # The control plane group the control plane resides in + group: + # The namespace within the control plane to sync claims from the app cluster to. + # NOTE: This must be created before you install the connector. + claimNamespace: +``` + + + + +Create a [kubeconfig][kubeconfig-1] for the +control plane. Write it to a secret in the cluster where you plan to +install the Control Plane Connector to. Reference this secret in the +`spaces.controlPlane.kubeconfigSecret` field below. + +```yaml +spaces: + controlPlane: + # The namespace within the control plane to sync claims from the app cluster to. NOTE: This must be created before you install the connector. + claimNamespace: + kubeconfigSecret: + name: my-controlplane-kubeconfig + key: kubeconfig +``` + + + + + + +Provide the values file above when you `helm install` the Control Plane Connector: + + +```bash +helm install --wait mcp-connector oci://xpkg.upbound.io/spaces-artifacts/mcp-connector -n kube-system -f connector-values.yaml +``` +:::tip +Create an API token from the Upbound user account settings page in the console by following [these instructions][these-instructions]. +::: + +### Uninstall + +You can uninstall Control Plane Connector with Helm by running the following: + +```bash +helm uninstall mcp-connector +``` + + + + + +### Example usage + +This example creates a control plane using [Configuration +EKS][configuration-eks]. `KubernetesCluster` is +available as a claim API in your control plane. The following is [an +example][an-example] +object you can create in your control plane. + +```yaml +apiVersion: k8s.starter.org/v1alpha1 +kind: KubernetesCluster +metadata: + name: my-cluster + namespace: default +spec: + id: my-cluster + parameters: + nodes: + count: 3 + size: small + services: + operators: + prometheus: + version: "34.5.1" + writeConnectionSecretToRef: + name: my-cluster-kubeconfig +``` + +After connecting your Kubernetes app cluster to the control plane, you +can create the `KubernetesCluster` object in your app cluster. Although your +local cluster has an Object, the actual resources is in your managed control +plane inside Upbound. + +```bash {copy-lines="3"} +# Applying the claim YAML above. +# kubectl is set up to talk with your Kubernetes cluster. +kubectl apply -f claim.yaml + + +kubectl get claim -A +NAME SYNCED READY CONNECTION-SECRET AGE +my-cluster True True my-cluster-kubeconfig 2m +``` + +Once Kubernetes creates the object, view the console to see your object. + +![Claim by connector in console](/img/ClaimInConsole.png) + +You can interact with the object through your cluster just as if it +lives in your cluster. + +### Migration to control planes + +This guide details the migration of a Crossplane installation to Upbound-managed +control planes using the Control Plane Connector to manage claims on an application +cluster. + +![migration flow application cluster to control plane](/img/ConnectorMigration.png) + +#### Export all resources + +Before proceeding, ensure that you have set the correct kubecontext for your application +cluster. + +```bash +up controlplane migration export --pause-before-export --output=my-export.tar.gz --yes +``` + +This command performs the following: +- Pauses all claim, composite, and managed resources before export. +- Scans the control plane for resource types. +- Exports Crossplane and native resources. +- Archives the exported state into `my-export.tar.gz`. + +Example output: +```bash +Exporting control plane state... + ✓ Pausing all claim resources before export... 1 resources paused! ⏸️ + ✓ Pausing all composite resources before export... 7 resources paused! ⏸️ + ✓ Pausing all managed resources before export... 34 resources paused! ⏸️ + ✓ Scanning control plane for types to export... 231 types found! 👀 + ✓ Exporting 231 Crossplane resources...125 resources exported! 📤 + ✓ Exporting 3 native resources...19 resources exported! 📤 ✓ Archiving exported state... archived to "my-export.tar.gz"! 📦 + +Successfully exported control plane state! +``` + +#### Import all resources + +The system restores the target control plane with the exported +resources, which serves as the destination for the Control Plane Connector. + + +Log into Upbound and select the correct context: + +```bash +up login +up ctx +up ctp create ctp-a +``` + +Output: +```bash +ctp-a created +``` + +Verify that the Crossplane version on both the application cluster and the new managed +control plane matches the core Crossplane version. + +Use the following command to import the resources: +```bash +up controlplane migration import -i my-export.tar.gz \ + --unpause-after-import \ + --mcp-connector-cluster-id=my-appcluster \ + --mcp-connector-claim-namespace=my-appcluster +``` + +This command: +- Note: `--mcp-connector-cluster-id` needs to be unique per application cluster +- Note: `--mcp-connector-claim-namespace` is the namespace the system creates + during the import +- Restores base resources +- Waits for XRDs and packages to establish +- Imports Claims, XRs resources +- Finalizes the import and resumes managed resources + +Example output: +```bash +Importing control plane state... + ✓ Reading state from the archive... Done! 👀 + ✓ Importing base resources... 56 resources imported!📥 + ✓ Waiting for XRDs... Established! ⏳ + ✓ Waiting for Packages... Installed and Healthy! ⏳ + ✓ Importing remaining resources... 88 resources imported! 📥 + ✓ Finalizing import... Done! 🎉 + ✓ Unpausing managed resources ... Done! ▶️ + +fully imported control plane state! +``` + +Verify Imported Claims + + +The Control Plane Connector renames all claims and adds additional labels to them. + +```bash +kubectl get claim -A +``` + +Example output: +```bash +NAMESPACE NAME SYNCED READY CONNECTION-SECRET AGE +my-appcluster cluster.aws.platformref.upbound.io/claim-e708ff592b974f51 True True platform-ref-aws-kubeconfig 3m17s +``` + +Inspect the labels: +```bash +kubectl get -n my-appcluster cluster.aws.platformref.upbound.io/claim-e708ff592b974f51 -o yaml | yq .metadata.labels +``` + +Example output: +```bash +mcp-connector.upbound.io/app-cluster: my-appcluster +mcp-connector.upbound.io/app-namespace: default +mcp-connector.upbound.io/app-resource-name: example +``` + +#### Cleanup the app cluster + +Remove all Crossplane-related resources from the application cluster, including: + +- Managed Resources +- Claims +- Compositions +- XRDs +- Packages (Functions, Configurations, Providers) +- Crossplane and all associated CRDs + + +#### Install Control Plane Connector + + +Follow the preceding installation guide and configure the `connector-values.yaml`: + +```yaml +# NOTE: clusterID needs to match --mcp-connector-cluster-id used in the import on the managed control Plane +clusterID: my-appcluster +upbound: + account: + token: + +spaces: + host: "" + insecureSkipTLSVerify: true + controlPlane: + name: + group: + # NOTE: This is the --mcp-connector-claim-namespace used during the import to the control plane + claimNamespace: +``` +Once the Control Plane Connector installs, verify that resources exist in the application +cluster: + +```bash +kubectl api-resources | grep platform +``` + +Example output: +```bash +awslbcontrollers aws.platform.upbound.io/v1alpha1 true AWSLBController +podidentities aws.platform.upbound.io/v1alpha1 true PodIdentity +sqlinstances aws.platform.upbound.io/v1alpha1 true SQLInstance +clusters aws.platformref.upbound.io/v1alpha1 true Cluster +osss observe.platform.upbound.io/v1alpha1 true Oss +apps platform.upbound.io/v1alpha1 true App +``` + +Restore claims from the control plane to the application cluster: + +```bash +kubectl get claim -A +``` + +Example output: +```bash +NAMESPACE NAME SYNCED READY CONNECTION-SECRET AGE +default cluster.aws.platformref.upbound.io/example True True platform-ref-aws-kubeconfig 127m +``` + +With this guide, you migrated your Crossplane installation to +Upbound-control planes. This ensures seamless integration with your +application cluster using the Control Plane Connector. + +### Connect multiple app clusters to a control plane + +Claims are store in a unique namespace in the Upbound control plane. +Every cluster creates a new control plane namespace. + +![Multi-cluster architecture with control plane connector](/img/ConnectorMulticlusterArch.png) + +There's no limit on the number of clusters connected to a single control plane. +Control plane operators can see all their infrastructure in a central control +plane. + +Without using control planes and Control Plane Connector, users have to install +Crossplane and providers for cluster. Each cluster requires configuration for +providers with necessary credentials. With a single control plane where multiple +clusters connected through Upbound tokens, you don't need to give out any cloud +credentials to the clusters. + + +[kubeconfig]: /manuals/cli/howtos/context-config/#generate-a-kubeconfig-for-a-control-plane-in-a-group +[kubeconfig-1]:/spaces/concepts/control-planes/#connect-directly-to-your-control-plane +[these-instructions]:/manuals/console/#create-a-personal-access-token +[kubernetes-api-aggregationlayer]: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/ +[configuration-eks]: https://github.com/upbound/configuration-eks +[an-example]: https://github.com/upbound/configuration-eks/blob/9f86b6d/.up/examples/cluster.yaml diff --git a/spaces_versioned_docs/version-v1.13/howtos/debugging-a-ctp.md b/spaces_versioned_docs/version-v1.13/howtos/debugging-a-ctp.md new file mode 100644 index 000000000..521271e40 --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/debugging-a-ctp.md @@ -0,0 +1,128 @@ +--- +title: Debugging issues on a control plane +sidebar_position: 70 +description: A guide for how to debug resources on a control plane running in Upbound. +--- + +This guide provides troubleshooting guidance for how to identify and fix issues on a control plane. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). Debugging methods are consistent across versions. + +For related control plane specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version-specific features, see the . +::: + +## Start from Upbound Console + + +The Upbound [Console][console] has a built-in control plane explorer experience +that surfaces status and events for the resources on your control plane. The +explorer is claim-based. Resources in this view exist only if they exist in the +reference chain originating from a claim. This view is a helpful starting point +if you are attempting to debug an issue originating from a claim. + +:::tip +If you directly create Crossplane Managed Resources (`MR`s) or Composite +Resources (`XR`s), they won't render in the explorer. +::: + +### Example + +The example below uses the control plane explorer view to inspect why a claim for an EKS Cluster isn't healthy. + +#### Check the health status of claims + +From the API type card, two claims branching from of it: one shows a healthy green icon, while the other shows an unhealthy red icon. + +![Use control plane explorer view to see status of claims](/img/debug-overview.png) + +Select `More details` on the unhealthy claim card and Upbound shows details for the claim. + +![Use control plane explorer view to see details of claims](/img/debug-claim-more-details.png) + +Looking at the three events for this claim: + +- **ConfigureCompositeResource**: this event indicates Upbound created the claimed Composite Resource (`XR`). + +- **BindCompositeResource**: this indicates the Composite Resource (`XR`) that's being "claimed" isn't ready yet. A claim doesn't show `HEALTHY` until the XR it references is ready. + +- **ConfigureCompositeResource**: the error saying, `cannot apply composite resource...the object has been modified; please apply your changes to the latest version and try again` is a generic event from Crossplane resources. It's safe to ignore this error. + +Next, look at the `status` field of the rendered YAML for the resource. + +![Use control plane explorer view to see status details of claims](/img/debug-claim-status.png) + +The status reports a similar message as the event stream: this claim is waiting for a Composite Resource to be ready. Based on this, investigate the Composite Resource referenced by this claim next. + +#### Check the health status of the Composite Resource + + +The control plane explorer only shows the claim cards by default. Selecting the claim card renders the rest of the Crossplane resource tree associated with the selected claim. + + +The previous claim expands into this screenshot: + +![Use control plane explorer view to expand tree of claim](/img/debug-claim-expansion.png) + +This renders the XR referenced by the claim (along with all its references). You can see the XR is showing the same unhealthy status icon in its card. Notice the XR has itself two nested XRs. One of the nested XRs shows a healthy green icon on its card, while the other shows an unhealthy red icon. Like the claim, a Composite Resource doesn't show healthy until all referenced resources also show healthy. + +#### Inspecting Managed Resources + +Select `more details` to inspect one of the unhealthy Managed Resources shows the following: + +![Use control plane explorer view to view events for an MR](/img/debug-mr-event.png) + +This event reveals it's unhealthy because it's waiting on a reference to another Managed Resource. Searching the rendered YAML of the MR for this resource shows the following: + +![Use control plane explorer view to view status for an MR](/img/debug-mr-status.png) + +The rendered YAML shows this MR is referencing a sibling MR that shares the same controller. The same parent XR created both of these managed resources. Inspect the sibling MR to see what its status is. + +![Use control plane explorer view to view status for a sibling MR](/img/debug-mr-dependency-status.png) + +The sibling MR event stream shows the Provider processed the resource create request. Ignore the `CannotInitalizeManagedResrouce` event. EKS clusters can take 15 minutes or more to provision in AWS. The root cause is everything is fine -- all the resources are still provisioning. Waiting longer and then looking at the control plane explorer again, shows all resources are healthy. reference, below is an example status field for a resource that's healthy and provisioned. + +```yaml +... +status: + atProvider: + id: team-b-app-cluster-bhwfb-hwtgs-20230403135452772300000008 + conditions: + - lastTransitionTime: '2023-04-03T13:56:35Z' + reason: Available + status: 'True' + type: Ready + - lastTransitionTime: '2023-04-03T13:54:02Z' + reason: ReconcileSuccess + status: 'True' + type: Synced + - lastTransitionTime: '2023-04-03T13:54:53Z' + reason: Success + status: 'True' + type: LastAsyncOperation + - lastTransitionTime: '2023-04-03T13:54:53Z' + reason: Finished + status: 'True' + type: AsyncOperation +``` + +### Control plane explorer limitations + +The control plane explorer view is currently designed around claims (`XC`s). The control plane explorer doesn't inspect other Crossplane resources. To inspect other Crossplane resources, use the `up` CLI. + +Some examples of Crossplane resources that require the `up` CLI + +- Managed Resources that aren't associated with a claim +- Composite Resources that aren't associated with a claim +- The status of _deleting_ resources +- ProviderConfigs +- Provider events + +## Use direct CLI access + +If your preference is to use a terminal instead of a GUI, Upbound supports direct access to the API server of the control plane. Use [`up ctx`][up-ctx] to connect directly to your control plane. + + +[console]: /manuals/console/upbound-console +[up-ctx]: /reference/cli-reference diff --git a/spaces_versioned_docs/version-v1.13/howtos/managed-service.md b/spaces_versioned_docs/version-v1.13/howtos/managed-service.md new file mode 100644 index 000000000..40b983a76 --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/managed-service.md @@ -0,0 +1,23 @@ +--- +title: Managed Upbound control planes +description: "Learn about the managed service capabilities of a Space" +sidebar_position: 10 +--- + +Control planes in Upbound are fully isolated [Upbound Crossplane][uxp] instances +that Upbound manages for you. This means: + +- the underlying lifecycle of infrastructure (compute, memory, and storage) required to power your instance. +- scaling of the infrastructure. +- the maintenance of the core Upbound Crossplane components that make up a control plane. + +This lets users focus on building their APIs and operating their control planes, +while Upbound handles the rest. Each control plane has its own dedicated API +server connecting users to their control plane. + +## Learn about Upbound control planes + +Read the [concept][ctp-concept] documentation to learn about Upbound control planes. + +[uxp]: /manuals/uxp/overview +[ctp-concept]: /spaces/concepts/control-planes \ No newline at end of file diff --git a/spaces_versioned_docs/version-v1.13/howtos/mcp-connector-guide.md b/spaces_versioned_docs/version-v1.13/howtos/mcp-connector-guide.md new file mode 100644 index 000000000..8a3866d07 --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/mcp-connector-guide.md @@ -0,0 +1,169 @@ +--- +title: Consume control plane APIs in an app cluster with control plane connector +sidebar_position: 99 +description: A tutorial to configure a Space with Argo to declaratively create and + manage control planes +--- + +In this tutorial, you learn how to configure a Kubernetes app cluster to communicate with a control plane in an Upbound self-hosted Space. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Control plane connector functionality is consistent across versions. + +For related API specifications and resources, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +The [control plane connector][control-plane-connector] bridges your Kubernetes application clusters---running outside of Upbound--to your control planes running in Upbound. This allows you to interact with your control plane's API right from the app cluster. The claim APIs you define via `CompositeResourceDefinitions` are available alongside Kubernetes workload APIs like `Pod`. In effect, control plane connector provides the same experience as a locally installed Crossplane. + +## Prerequisites + +To complete this tutorial, you need the following: + +- Have already deployed an Upbound Space. +- Have already deployed an Kubernetes cluster (referred to as `app cluster`). + +## Create a control plane + +Create a new control plane in your self-hosted Space. Run the following command in a terminal: + +```bash +up ctp create my-control-plane +``` + +Once the control plane is ready, connect to it. + +```bash +up ctp connect my-control-plane +``` + +For convenience, install a an Upbound [platform reference Configuration][platform-reference-configuration] from the marketplace. production scenarios, replace this with your own Crossplane Configurations or compositions. + +```bash +up ctp configuration install xpkg.upbound.io/upbound/platform-ref-aws:v1.4.0 +``` + +## Fetch the control plane's connection details + +Run the following command in a terminal: + +```shell +kubectl get secret kubeconfig-my-control-plane -n default -o jsonpath='{.data.kubeconfig}' | base64 -d > kubeconfig-my-control-plane.yaml +``` + +This command saves the kubeconfig for the control plane to a file in your working directory. + +## Install control plane connector in your app cluster + +Switch contexts to your Kubernetes app cluster. To install the control plane connector in your app cluster, you must first provide a secret containing your control plane's kubeconfig at install-time. Run the following command in a terminal: + +:::important +Make sure the following commands are executed against your **app cluster**, not your control plane. +::: + +```bash +kubectl create secret generic kubeconfig-my-control-plane -n kube-system --from-file=kubeconfig=./kubeconfig-my-control-plane.yaml +``` + +Set the environment variable below to configure which namespace _in your control plane_ you wish to sync the app cluster's claims to. + +```shell +export CONNECTOR_CTP_NAMESPACE=app-cluster-1 +``` + +Install the Control Plane Connector in the app cluster and point it to your control plane. + +```bash +up ctp connector install my-control-plane $CONNECTOR_CTP_NAMESPACE --control-plane-secret=kubeconfig-my-control-plane +``` + +## Inspect your app cluster + +After you install Control Plane Connector in the app cluster, you can now see APIs which live on the control plane. You can confirm this is the case by running the following command on your app cluster: + +```bash {copy-lines="1"} +kubectl api-resources | grep upbound + +# The output should look like this: +sqlinstances aws.platform.upbound.io/v1alpha1 true SQLInstance +clusters aws.platformref.upbound.io/v1alpha1 true Cluster +osss observe.platform.upbound.io/v1alpha1 true Oss +apps platform.upbound.io/v1alpha1 true App +``` + +## Claim a database instance on your app cluster + +Create a database claim against the `SQLInstance` API and observe resources get created by your control plane. Apply the following resources to your app cluster: + +```yaml +cat < --output + ``` + + The command exports your existing Crossplane control plane configuration/state into an archive file. + +::: note +By default, the export command doesn't make any changes to your existing Crossplane control plane state, leaving it intact. Use the `--pause-before-export` flag to pause the reconciliation on managed resources before exporting the archive file. + +This safety mechanism ensures the control plane you migrate state to doesn't assume ownership of resources before you're ready. +::: + +2. Use the control plane [create command][create-command] to create a managed +control plane in Upbound: + + ```bash + up controlplane create my-controlplane + ``` + +3. Use [`up ctx`][up-ctx] to connect to the control plane created in the previous step: + + ```bash + up ctx "///my-controlplane" + ``` + + The command configures your local `kubeconfig` to connect to the control plane. + +4. Run the following command to import the archive file into the control plane: + + ```bash + up controlplane migration import --input + ``` + +:::note +By default, the import command leaves the control plane in an inactive state by pausing the reconciliation on managed +resources. This pause gives you an opportunity to review the imported configuration/state before activating the control plane. +Use the `--unpause-after-import` flag to change the default behavior and activate the control plane immediately after +importing the archive file. +::: + + + +5. Review and validate the imported configuration/state. When you are ready, activate your managed + control plane by running the following command: + + ```bash + kubectl annotate managed --all crossplane.io/paused- + ``` + + At this point, you can delete the source Crossplane control plane. + +## CLI options + +### Filtering + +The migration tool captures the state of a Control Plane. The only filtering +supported is Kubernetes namespace and Kubernetes resource Type filtering. + +You can exclude namespaces using the `--exclude-namespaces` CLI option. This can prevent the CLI from including unwanted resources in the export. + +```bash +--exclude-namespaces=kube-system,kube-public,kube-node-lease,local-path-storage,... + +# A list of specific namespaces to exclude from the export. Defaults to 'kube-system', 'kube-public','kube-node-lease', and 'local-path-storage'. +``` + +You can exclude Kubernetes Resource types by using the `--exclude-resources` CLI option: + +```bash +--exclude-resources=EXCLUDE-RESOURCES,... + +# A list of resource types to exclude from the export in "resource.group" format. No resources are excluded by default. +``` + +For example, here's an example for excluding the CRDs installed by Crossplane functions (since they're not needed): + +```bash +up controlplane migration export \ + --exclude-resources=gotemplates.gotemplating.fn.crossplane.io,kclinputs.template.fn.crossplane.io +``` + +:::warning +You must specify resource names in lowercase "resource.group" format (for example, `gotemplates.gotemplating.fn.crossplane.io`). Using only the resource kind (for example, `GoTemplate`) isn't supported. +::: + + +:::tip Function Input CRDs + +Exclude function input CRDs (`inputs.template.fn.crossplane.io`, `resources.pt.fn.crossplane.io`, `gotemplates.gotemplating.fn.crossplane.io`, `kclinputs.template.fn.crossplane.io`) from migration exports. Upbound automatically recreates these resources during import. Function input CRDs typically have owner references to function packages and may have restricted RBAC access. Upbound installs these CRDs during the import when function packages are restored. + +::: + + +After export, users can also change the archive file to only include necessary resources. + +### Export non-Crossplane resources + +Use the `--include-extra-resources=` CLI option to select other CRD types to include in the export. + +### Set the kubecontext + +Currently `--context` isn't supported in the migration CLI. You should be able to use the `--kubeconfig` CLI option to use a file that's set to the correct context. example: + +```bash +up controlplane migration export --kubeconfig +``` + +Use this in tandem with `up ctx` to export a control plane's kubeconfig: + +```bash +up ctx --kubeconfig ~/.kube/config + +# To list the current contet +up ctx . --kubeconfig ~/.kube/config +``` + +## Export archive + +The migration CLI exports an archive upon successful completion. Below is an example export of a control plane that excludes several CRD types and skips the confirmation prompt. A file gets written to the working directory, unless you select another output file: + +
+ +View the example export + +```bash +$ up controlplane migration export --exclude-resources=gotemplates.gotemplating.fn.crossplane.io,kclinputs.template.fn.crossplane.io --yes +Exporting control plane state... +✓ Scanning control plane for types to export... 121 types found! 👀 +✓ Exporting 121 Crossplane resources...60 resources exported! 📤 +✓ Exporting 3 native resources...8 resources exported! 📤 +✓ Archiving exported state... archived to "xp-state.tar.gz"! 📦 +``` + +
+ + +When an export occurs, a file named `xp-state.tar.gz` by default gets created in the working directory. You can unzip the file and all the contents of the export are all text YAML files. + +- Each CRD (for example `vpcs.ec2.aws.upbound.io`) gets its own directory +which contains: + - A `metadata.yaml` file that contains Kubernetes Object Metadata + - A list of Kubernetes Categories the resource belongs to +- A `cluster` directory that contains YAML manifests for all resources provisioned +using the CRD. + +Sample contents for a Cluster with a single `XNetwork` Composite from +[configuration-aws-network][configuration-aws-network] is show below: + + +
+ +View the example cluster content + +```bash +├── compositionrevisions.apiextensions.crossplane.io +│ ├── cluster +│ │ ├── kcl.xnetworks.aws.platform.upbound.io-4ca6a8a.yaml +│ │ └── xnetworks.aws.platform.upbound.io-9859a34.yaml +│ └── metadata.yaml +├── configurations.pkg.crossplane.io +│ ├── cluster +│ │ └── configuration-aws-network.yaml +│ └── metadata.yaml +├── deploymentruntimeconfigs.pkg.crossplane.io +│ ├── cluster +│ │ └── default.yaml +│ └── metadata.yaml +├── export.yaml +├── functions.pkg.crossplane.io +│ ├── cluster +│ │ ├── crossplane-contrib-function-auto-ready.yaml +│ │ ├── crossplane-contrib-function-go-templating.yaml +│ │ └── crossplane-contrib-function-kcl.yaml +│ └── metadata.yaml +├── internetgateways.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-xgl4q.yaml +│ └── metadata.yaml +├── mainroutetableassociations.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-t2qh7.yaml +│ └── metadata.yaml +├── namespaces +│ └── cluster +│ ├── crossplane-system.yaml +│ ├── default.yaml +│ └── upbound-system.yaml +├── providerconfigs.aws.upbound.io +│ ├── cluster +│ │ └── default.yaml +│ └── metadata.yaml +├── providerconfigusages.aws.upbound.io +│ ├── cluster +│ │ ├── 0a2a3ec6-ef13-45f9-9cf0-63af7f4a6b6b.yaml +...redacted +│ │ └── f7092b0f-3a78-4bfe-82c8-57e5085a9b11.yaml +│ └── metadata.yaml +├── providers.pkg.crossplane.io +│ ├── cluster +│ │ ├── upbound-provider-aws-ec2.yaml +│ │ └── upbound-provider-family-aws.yaml +│ └── metadata.yaml +├── routes.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-dt9cj.yaml +│ └── metadata.yaml +├── routetableassociations.ec2.aws.upbound.io +│ ├── cluster +│ │ ├── borrelli-backup-test-mr2sd.yaml +│ │ ├── borrelli-backup-test-ngq5h.yaml +│ │ ├── borrelli-backup-test-nrkgg.yaml +│ │ └── borrelli-backup-test-wq752.yaml +│ └── metadata.yaml +├── routetables.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-dv4mb.yaml +│ └── metadata.yaml +├── secrets +│ └── namespaces +│ ├── crossplane-system +│ │ ├── cert-token-signing-gateway-pub.yaml +│ │ ├── mxp-hostcluster-certs.yaml +│ │ ├── package-pull-secret.yaml +│ │ └── xgql-tls.yaml +│ └── upbound-system +│ └── aws-creds.yaml +├── securitygrouprules.ec2.aws.upbound.io +│ ├── cluster +│ │ ├── borrelli-backup-test-472f4.yaml +│ │ └── borrelli-backup-test-qftmw.yaml +│ └── metadata.yaml +├── securitygroups.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-w5jch.yaml +│ └── metadata.yaml +├── storeconfigs.secrets.crossplane.io +│ ├── cluster +│ │ └── default.yaml +│ └── metadata.yaml +├── subnets.ec2.aws.upbound.io +│ ├── cluster +│ │ ├── borrelli-backup-test-8btj6.yaml +│ │ ├── borrelli-backup-test-gbmrm.yaml +│ │ ├── borrelli-backup-test-m7kh7.yaml +│ │ └── borrelli-backup-test-nttt5.yaml +│ └── metadata.yaml +├── vpcs.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-7hwgh.yaml +│ └── metadata.yaml +└── xnetworks.aws.platform.upbound.io +├── cluster +│ └── borrelli-backup-test.yaml +└── metadata.yaml +43 directories, 87 files +``` + +
+ + +The `export.yaml` file contains metadata about the export, including the configuration of the export, Crossplane information, and what's included in the export bundle. + +
+ +View the export + +```yaml +version: v1alpha1 +exportedAt: 2025-01-06T17:39:53.173222Z +options: + excludedNamespaces: + - kube-system + - kube-public + - kube-node-lease + - local-path-storage + includedResources: + - namespaces + - configmaps + - secrets + excludedResources: + - gotemplates.gotemplating.fn.crossplane.io + - kclinputs.template.fn.crossplane.io +crossplane: + distribution: universal-crossplane + namespace: crossplane-system + version: 1.17.3-up.1 + featureFlags: + - --enable-provider-identity + - --enable-environment-configs + - --enable-composition-functions + - --enable-usages +stats: + total: 68 + nativeResources: + configmaps: 0 + namespaces: 3 + secrets: 5 + customResources: + amicopies.ec2.aws.upbound.io: 0 + amilaunchpermissions.ec2.aws.upbound.io: 0 + amis.ec2.aws.upbound.io: 0 + availabilityzonegroups.ec2.aws.upbound.io: 0 + capacityreservations.ec2.aws.upbound.io: 0 + carriergateways.ec2.aws.upbound.io: 0 + compositeresourcedefinitions.apiextensions.crossplane.io: 0 + compositionrevisions.apiextensions.crossplane.io: 2 + compositions.apiextensions.crossplane.io: 0 + configurationrevisions.pkg.crossplane.io: 0 + configurations.pkg.crossplane.io: 1 +...redacted +``` + +
+ +### Skipped resources + +Along with to the resources excluded via CLI options, the following resources aren't +included in the backup: + +- The `kube-root-ca.crt` ConfigMap, since this is cluster-specific +- Resources directly managed via Helm (ArgoCD's helm implementation, which templates +Helm resources and then applies them, get included in the backup). The migration creates the exclusion list by looking for: + - Any Resource with the label `"app.kubernetes.io/managed-by" == "Helm"` + - Kubernetes Secrets with the label prefix `helm.sh/release`. example, `helm.sh/release.v1` +- Resources installed via a Crossplane package. These have an `ownerReference` with +a prefix `pkg.crossplane.io`. The expectation is that during import, the Crossplane Package Manager bears responsibility for installing the resources. +- Crossplane Locks: Any `Lock.pkg.crossplane.io` resource isn't included in the +export. + +## Restore + +The following is an example of a successful import run. At the end of the import, all Managed Resources are in a paused state. + +
+ +View the migration import + +```bash +$ up controlplane migration import +Importing control plane state... +✓ Reading state from the archive... Done! 👀 +✓ Importing base resources... 18 resources imported! 📥 +✓ Waiting for XRDs... Established! ⏳ +✓ Waiting for Packages... Installed and Healthy! ⏳ +✓ Importing remaining resources... 50 resources imported! 📥 +✓ Finalizing import... Done! 🎉 +``` + +
+ +Your scenario may involve migrating resources which already exist through other automation on the platform. When executing an import in these circumstances, the importer applies the new manifests to the cluster. If the resource already exists, the restore sets fields to what's in the backup. + +The importer restores all resources in the export archive. Managed Resources get imported with the `crossplane.io/paused: "true"` annotation set. Use the `--unpause-after-import` CLI argument to automatically un-pause resources that got +paused during backup, or remove the annotation manually. + +### Restore order + +The importer restores based on Kubernetes types. The restore order doesn't include parent/child relationships. + +Because Crossplane Composites create new Managed Resources if not present on the cluster, all +Claims, Composites and Managed Resources get imported in a paused state. You can un-pause them after the restore completes. + +The first step of import is installing Base Resources into the cluster. These resources (such has +packages and XRDs) must be ready before proceeding with the import. +Base Resources are: + +- Kubernetes Resources + - ConfigMaps + - Namespaces + - Secrets +- Crossplane Resources + - ControllerConfigs: `controllerconfigs.pkg.crossplane.io` + - DeploymentRuntimeConfigs: `deploymentruntimeconfigs.pkg.crossplane.io` + - StoreConfigs: `storeconfigs.secrets.crossplane.io` +- Crossplane Packages + - Providers: `providers.pkg.crossplane.io` + - Functions: `functions.pkg.crossplane.io` + - Configurations: `configurations.pkg.crossplane.io` + +Restore waits for the base resources to be `Ready` before moving on to the next step. Next, restore walks through the archive and restores all the manifests present. + +During import, the `crossplane.io/paused` annotation gets added to Managed Resources, Claims +and Composites. + +To manually un-pause managed resources after an import, remove the annotation by running: + +```bash +kubectl annotate managed --all crossplane.io/paused- +``` + +You can also run import again with the `--unpause-after-import` flag to remove the annotations. + +```bash +up controlplane migration import --unpause-after-import +``` + +### Restoring resource status + +The importer applies the status of all resources during import. The importer determines if the CRD version has a status field defined based on the stored CRD version. + + +[cli-command]: /reference/cli-reference +[up-cli]: /reference/cli-reference +[up-cli-1]: /manuals/cli/overview +[create-command]: /reference/cli-reference +[up-ctx]: /reference/cli-reference +[configuration-aws-network]: https://marketplace.upbound.io/configurations/upbound/configuration-aws-network diff --git a/spaces_versioned_docs/version-v1.13/howtos/observability.md b/spaces_versioned_docs/version-v1.13/howtos/observability.md new file mode 100644 index 000000000..8fc5c3278 --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/observability.md @@ -0,0 +1,395 @@ +--- +title: Observability +sidebar_position: 50 +description: A guide for how to use the integrated observability pipeline feature + in a Space. +plan: "enterprise" +--- + + + +This guide explains how to configure observability in Upbound Spaces. Upbound +provides integrated observability features built on +[OpenTelemetry][opentelemetry] to collect, process, and export logs, metrics, +and traces. + +Upbound Spaces offers two levels of observability: + +1. **Space-level observability** - Observes the cluster infrastructure where Spaces software is installed (Self-Hosted only) +2. **Control plane observability** - Observes workloads running within individual control planes + + + + + +:::info API Version Information & Version Selector +This guide applies to all supported versions (v1.9-v1.15+). Observability features evolved: + +- **v1.9-v1.12**: SharedTelemetryConfig available (alpha until v1.11) +- **v1.11+**: Observability promoted to stable with logs export support +- **v1.14+**: Both space-level and control-plane observability GA + +**View API Reference for Your Version**: +| Version | Status | Link | +|---------|--------|------| +| v1.15 | Latest | [Spaces API v1.15](../../../next/reference/apis/spaces-api/) | +| v1.14 | Supported | [Spaces API v1.14](../../../1.14/reference/apis/spaces-api/) | +| v1.13 | Supported | [Spaces API v1.13](../../../1.13/reference/apis/spaces-api/) | +| v1.12 | Supported | [Spaces API v1.12](../../../1.12/reference/apis/spaces-api/) | +| v1.11 | Supported | [Spaces API v1.11](../../../1.11/reference/apis/spaces-api/) | +| v1.10 | Limited | [Spaces API v1.10](../../../1.10/reference/apis/spaces-api/) | +| v1.9 | EOL | [Spaces API v1.9](../../../1.9/reference/apis/spaces-api/) | + +For version support policy and feature availability, see and . +::: + +:::important +**Space-level observability** (available since v1.6.0, GA in v1.14.0): +- Disabled by default +- Requires manual enablement and configuration +- Self-Hosted Spaces only + +**Control plane observability** (available since v1.13.0, GA in v1.14.0): +- Enabled by default +- No additional configuration required +::: + + + + +## Prerequisites + + +**Control plane observability** is enabled by default. No additional setup is +required. + + + +### Self-hosted Spaces + +1. **Enable the observability feature** when installing Spaces: + ```bash + up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "observability.enabled=true" + ``` + +Set `features.alpha.observability.enabled=true` instead if using Spaces version +before `v1.14.0`. + +2. **Install OpenTelemetry Operator** (required for Space-level observability): + ```bash + kubectl apply -f https://github.com/open-telemetry/opentelemetry-operator/releases/download/v0.116.0/opentelemetry-operator.yaml + ``` + + :::important + If running Spaces `v1.11` or later, use OpenTelemetry Operator `v0.110.0` or later due to breaking changes. + ::: + + +## Space-level Observability + +Space-level observability is only available for self-hosted Spaces and allows +administrators to observe the cluster infrastructure. + +### Configuration + +Configure Space-level observability using the `spacesCollector` value in your +Spaces Helm chart: + +```yaml +observability: + spacesCollector: + config: + exporters: + otlphttp: + endpoint: "" + headers: + api-key: YOUR_API_KEY + exportPipeline: + logs: + - otlphttp + metrics: + - otlphttp +``` + +This configuration exports metrics and logs from: + +- Crossplane installation +- Spaces infrastructure (controller, API, router, etc.) + +### Router metrics + +The Spaces router uses Envoy as a reverse proxy and automatically exposes +metrics when you enable Space-level observability. These metrics provide +visibility into: + +- Traffic routing to control planes and services +- Request status codes, timeouts, and retries +- Circuit breaker state preventing cascading failures +- Client connection patterns and request volume +- Request latency (P50, P95, P99) + +For more information about available metrics, example queries, and how to enable +this feature, see the [Space-level observability guide][space-level-o11y]. + +## Control plane observability + +Control plane observability collects telemetry data from workloads running +within individual control planes using `SharedTelemetryConfig` resources. + +The pipeline deploys [OpenTelemetry Collectors][opentelemetry-collectors] per +control plane, defined by a `SharedTelemetryConfig` at the group level. +Collectors pass data to external observability backends. + +:::important +From Spaces `v1.13` and beyond, telemetry only includes user-facing control +plane workloads (Crossplane, providers, functions). + +Self-hosted users can include system workloads (`api-server`, `etcd`) by setting +`observability.collectors.includeSystemTelemetry=true` in Helm. +::: + +:::important +Spaces validates `SharedTelemetryConfig` resources before applying them by +sending telemetry to configured exporters. self-hosted Spaces, ensure that +`spaces-controller` can reach the exporter endpoints. +::: + +### `SharedTelemetryConfig` + +`SharedTelemetryConfig` is a group-scoped custom resource that defines telemetry +configuration for control planes. + +#### New Relic example + +```yaml +apiVersion: observability.spaces.upbound.io/v1alpha1 +kind: SharedTelemetryConfig +metadata: + name: newrelic + namespace: default +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + org: foo + exporters: + otlphttp: + endpoint: https://otlp.nr-data.net + headers: + api-key: YOUR_API_KEY + exportPipeline: + metrics: [otlphttp] + traces: [otlphttp] + logs: [otlphttp] +``` + +#### Datadog Example + +```yaml +apiVersion: observability.spaces.upbound.io/v1alpha1 +kind: SharedTelemetryConfig +metadata: + name: datadog + namespace: default +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + org: foo + exporters: + datadog: + api: + site: ${DATADOG_SITE} + key: ${DATADOG_API_KEY} + exportPipeline: + metrics: [datadog] + traces: [datadog] + logs: [datadog] +``` + +### Control plane selection + +Use `spec.controlPlaneSelector` to specify which control planes should use the +telemetry configuration. + +#### Label-based selection + +```yaml +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +#### Expression-based selection + +```yaml +spec: + controlPlaneSelector: + labelSelectors: + - matchExpressions: + - { key: environment, operator: In, values: [production,staging] } +``` + +#### Name-based selection + +```yaml +spec: + controlPlaneSelector: + names: + - controlplane-dev + - controlplane-staging + - controlplane-prod +``` + +### Manage sensitive data + +:::important +Available from Spaces `v1.10` +::: + +Store sensitive data in Kubernetes secrets and reference them in your +`SharedTelemetryConfig`: + +1. **Create the secret:** + ```bash + kubectl create secret generic sensitive -n \ + --from-literal=apiKey='YOUR_API_KEY' + ``` + +2. **Reference in SharedTelemetryConfig:** + ```yaml + apiVersion: observability.spaces.upbound.io/v1alpha1 + kind: SharedTelemetryConfig + metadata: + name: newrelic + spec: + configPatchSecretRefs: + - name: sensitive + key: apiKey + path: exporters.otlphttp.headers.api-key + controlPlaneSelector: + labelSelectors: + - matchLabels: + org: foo + exporters: + otlphttp: + endpoint: https://otlp.nr-data.net + headers: + api-key: dummy # Replaced by secret value + exportPipeline: + metrics: [otlphttp] + traces: [otlphttp] + logs: [otlphttp] + ``` + +### Telemetry processing + +:::important +Available from Spaces `v1.11` +::: + +Configure processing pipelines to transform telemetry data using the [transform +processor][transform-processor]. + +#### Add labels to metrics + +```yaml +spec: + processors: + transform: + error_mode: ignore + metric_statements: + - context: datapoint + statements: + - set(attributes["newLabel"], "someLabel") + processorPipeline: + metrics: [transform] +``` + +#### Remove labels + +From metrics: +```yaml +processors: + transform: + metric_statements: + - context: datapoint + statements: + - delete_key(attributes, "kubernetes_namespace") +``` + +From logs: +```yaml +processors: + transform: + log_statements: + - context: log + statements: + - delete_key(attributes, "log.file.name") +``` + +#### Modify log messages + +```yaml +processors: + transform: + log_statements: + - context: log + statements: + - set(attributes["original"], body) + - set(body, Concat(["log message:", body], " ")) +``` + +### Monitor status + +Check the status of your `SharedTelemetryConfig`: + +```bash +kubectl get stc +NAME SELECTED FAILED PROVISIONED AGE +datadog 1 0 1 63s +``` + +- `SELECTED`: Number of control planes selected +- `FAILED`: Number of control planes that failed provisioning +- `PROVISIONED`: Number of successfully running collectors + +For detailed status information: + +```bash +kubectl describe stc +``` + +## Supported exporters + +Both Space-level and control plane observability support: +- `datadog` -. Datadog integration +- `otlphttp` - General-purpose exporter (used by New Relic, among others) +- `debug` -. troubleshooting + +## Considerations + +- **Control plane conflicts**: Each control plane can only use one `SharedTelemetryConfig`. Multiple configs selecting the same control plane conflict. +- **Custom collector image**: Both Space-level and control plane observability use the same custom OpenTelemetry Collector image with supported exporters. +- **Resource scope**: `SharedTelemetryConfig` resources are group-scoped, allowing different telemetry configurations per group. + +For more advanced configuration options, review the [Helm chart +reference][helm-chart-reference] and [OpenTelemetry Transformation Language +documentation][opentelemetry-transformation-language]. + + +[opentelemetry]: https://opentelemetry.io/ +[opentelemetry-collectors]: https://opentelemetry.io/docs/collector/ +[opentelemetry-collector-configuration]: https://opentelemetry.io/docs/collector/configuration/#exporters +[opentelemetry-operator]: https://opentelemetry.io/docs/kubernetes/operator/ +[transform-processor]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/transformprocessor/README.md +[opentelemetry-transformation-language]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl +[space-level-o11y]: /spaces/howtos/self-hosted/space-observability +[helm-chart-reference]: /reference/helm-reference +[opentelemetry-transformation-language-functions]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/ottlfuncs/README.md +[opentelemetry-transformation-language-contexts]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts +[guide-on-ottl]: https://betterstack.com/community/guides/observability/ottl/#a-brief-overview-of-the-ottl-grammar diff --git a/spaces_versioned_docs/version-v1.13/howtos/query-api.md b/spaces_versioned_docs/version-v1.13/howtos/query-api.md new file mode 100644 index 000000000..78163de2f --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/query-api.md @@ -0,0 +1,320 @@ +--- +title: Query API +sidebar_position: 40 +description: Use the `up` CLI to query objects and resources +--- + + + + +Upbound's Query API allows users to inspect objects and resources within their control planes. The read-only `up alpha query` and `up alpha get` CLI commands allow you to gather information on your control planes in a fast and efficient package. These commands follow the [`kubectl` conventions][kubectl-conventions] for filtering, sorting, and retrieving information from your Space. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). Query API is available in Cloud Spaces since v1.6 and Self-Hosted since v1.8. + +For detailed deployment procedures, see [Deploy Query API infrastructure - Self-Hosted](./self-hosted/query-api.md). version compatibility details, see the . +::: + + + +## Using the Query API + + +The Query API allows you to retrieve control plane information faster than traditional `kubectl` commands. This feature lets you debug your Crossplane resources with the CLI or within the Upbound Console's enhanced management views. + +### Query within a single control plane + +Use the `up alpha get` command to retrieve information about objects within the current control plane context. This command uses the **Query** endpoint and targets the current control plane. + +To switch between control plane groups, use the [`up ctx` ][up-ctx] and change to your desired context with an interactive prompt or specify with your control plane path: + +```shell +up ctx /// +``` + +You can query within a single control plane with the [`up alpha get` command][up-alpha-get-command] to return more information about a given object within the current kubeconfig context. + +The `up alpha get` command can query resource types and aliases to return objects in your control plane. + +```shell +up alpha get managed +NAME READY SYNCED AGE +custom-account1-5bv5j-sa True True 15m +custom-cluster1-bq6dk-net True True 15m +custom-account1-5bv5j-subnet True True 15m +custom-cluster1-bq6dk-nodepool True True 15m +custom-cluster1-bq6dk-cluster True True 15m +custom-account1-5bv5j-net True True 15m +custom-cluster1-bq6dk-subnet True True 15m +custom-cluster1-bq6dk-sa True True 15m +``` + +The [`-A` flag][a-flag] queries for objects across all namespaces. + +```shell +up alpha get configmaps -A +NAMESPACE NAME AGE +crossplane-system uxp-versions-config 18m +crossplane-system universal-crossplane-config 18m +crossplane-system kube-root-ca.crt 18m +upbound-system kube-root-ca.crt 18m +kube-system kube-root-ca.crt 18m +kube-system coredns 18m +default kube-root-ca.crt 18m +kube-node-lease kube-root-ca.crt 18m +kube-public kube-root-ca.crt 18m +kube-system kube-apiserver-legacy-service-account-token-tracking 18m +kube-system extension-apiserver-authentication 18m +``` + +To query for [multiple resource types][multiple-resource-types], you can add the name or alias for the resource as a comma separated string. + +```shell +up alpha get providers,providerrevisions + +NAME HEALTHY REVISION IMAGE STATE DEP-FOUND DEP-INSTALLED AGE +providerrevision.pkg.crossplane.io/crossplane-contrib-provider-nop-ecc25c121431 True 1 xpkg.upbound.io/crossplane-contrib/provider-nop:v0.2.1 Active 18m +NAME INSTALLED HEALTHY PACKAGE AGE +provider.pkg.crossplane.io/crossplane-contrib-provider-nop True True xpkg.upbound.io/crossplane-contrib/provider-nop:v0.2.1 18m +``` + +### Query multiple control planes + +The [`up alpha query` command][up-alpha-query-command] returns a list of objects of any kind within all the control planes in your Space. This command uses either the **SpaceQuery** or **GroupQuery** endpoints depending on your query scope. The `-A` flag switches the query context from the group level to the entire Space + +The `up alpha query` command accepts resources and aliases to return objects across your group or Space. + +```shell +up alpha query crossplane + +NAME ESTABLISHED OFFERED AGE +compositeresourcedefinition.apiextensions.crossplane.io/xnetworks.platform.acme.co True True 20m +compositeresourcedefinition.apiextensions.crossplane.io/xaccountscaffolds.platform.acme.co True True 20m + + +NAME XR-KIND XR-APIVERSION AGE +composition.apiextensions.crossplane.io/xaccountscaffolds.platform.acme.co XAccountScaffold platform.acme.co/v1alpha1 20m +composition.apiextensions.crossplane.io/xnetworks.platform.acme.co XNetwork platform.acme.co/v1alpha1 20m + + +NAME REVISION XR-KIND XR-APIVERSION AGE +compositionrevision.apiextensions.crossplane.io/xaccountscaffolds.platform.acme.co-5ae9da5 1 XAccountScaffold platform.acme.co/v1alpha1 20m +compositionrevision.apiextensions.crossplane.io/xnetworks.platform.acme.co-414ce80 1 XNetwork platform.acme.co/v1alpha1 20m + +NAME READY SYNCED AGE +nopresource.nop.crossplane.io/custom-cluster1-bq6dk-subnet True True 19m +nopresource.nop.crossplane.io/custom-account1-5bv5j-net True True 19m + +## Output truncated... + +``` + + +The [`--sort-by` flag][sort-by-flag] allows you to return information to your specifications. You can construct your sort order in a JSONPath expression string or integer. + + +```shell +up alpha query crossplane -A --sort-by="{.metadata.name}" + +CONTROLPLANE NAME AGE +default/test deploymentruntimeconfig.pkg.crossplane.io/default 10m + +CONTROLPLANE NAME AGE TYPE DEFAULT-SCOPE +default/test storeconfig.secrets.crossplane.io/default 10m Kubernetes crossplane-system +``` + +To query for multiple resource types, you can add the name or alias for the resource as a comma separated string. + +```shell +up alpha query namespaces,configmaps -A + +CONTROLPLANE NAME AGE +default/test namespace/upbound-system 15m +default/test namespace/crossplane-system 15m +default/test namespace/kube-system 16m +default/test namespace/default 16m + +CONTROLPLANE NAMESPACE NAME AGE +default/test crossplane-system configmap/uxp-versions-config 15m +default/test crossplane-system configmap/universal-crossplane-config 15m +default/test crossplane-system configmap/kube-root-ca.crt 15m +default/test upbound-system configmap/kube-root-ca.crt 15m +default/test kube-system configmap/coredns 16m +default/test default configmap/kube-root-ca.crt 16m + +## Output truncated... + +``` + +The Query API also allows you to return resource types with specific [label columns][label-columns]. + +```shell +up alpha query composite -A --label-columns=crossplane.io/claim-namespace + +CONTROLPLANE NAME SYNCED READY COMPOSITION AGE CLAIM-NAMESPACE +query-api-test/test xeks.argo.discover.upbound.io/test-k7xbk False xeks.argo.discover.upbound.io 51d default + +CONTROLPLANE NAME EXTERNALDNS SYNCED READY COMPOSITION AGE CLAIM-NAMESPACE +spaces-clusters/controlplane-query-api-test-spaces-playground xexternaldns.externaldns.platform.upbound.io/spaces-cluster-0-xd8v2-lhnl7 6.34.2 True True xexternaldns.externaldns.platform.upbound.io 19d default +default/query-api-test xexternaldns.externaldns.platform.upbound.io/space-awg-kine-f7dxq-nkk2q 6.34.2 True True xexternaldns.externaldns.platform.upbound.io 55d default + +## Output truncated... + +``` + +### Query API request format + +The CLI can also return a version of your query request with the [`--debug` flag][debug-flag]. This flag returns the API spec request for your query. + +```shell +up alpha query composite -A -d + +apiVersion: query.spaces.upbound.io/v1alpha1 +kind: SpaceQuery +metadata: + creationTimestamp: null +spec: + cursor: true + filter: + categories: + - composite + controlPlane: {} + limit: 500 + objects: + controlPlane: true + table: {} + page: {} +``` + +For more complex queries, you can interact with the Query API like a Kubernetes-style API by creating a query and applying it with `kubectl`. + +The example below is a query for `claim` resources in every control plane from oldest to newest and returns specific information about those claims. + + +```yaml +apiVersion: query.spaces.upbound.io/v1alpha1 +kind: SpaceQuery +spec: + filter: + categories: + - claim + order: + - creationTimestamp: Asc + cursor: true + count: true + objects: + id: true + controlPlane: true + object: + kind: true + apiVersion: true + metadata: + name: true + uid: true + spec: + containers: + image: true +``` + + +The Query API is served by the Spaces API endpoint. You can use `up ctx` to +switch the kubectl context to the Spaces API ingress. After that, you can use +`kubectl create` and receive the `response` for your query parameters. + + +```shell +kubectl create -f spaces-query.yaml -o yaml +``` + +Your `response` should look similar to this example: + +```yaml {copy-lines="none"} +apiVersion: query.spaces.upbound.io/v1alpha1 +kind: SpaceQuery +metadata: + creationTimestamp: "2024-08-08T14:41:46Z" + name: default +response: + count: 3 + cursor: + next: "" + page: 0 + pageSize: 100 + position: 0 + objects: + - controlPlane: + name: query-api-test + namespace: default + id: default/query-api-test/823b2781-7e70-4d91-a6f0-ee8f455d67dc + object: + apiVersion: spaces.platform.upbound.io/v1alpha1 + kind: Space + metadata: + name: space-awg-kine + resourceVersion: "803868" + uid: 823b2781-7e70-4d91-a6f0-ee8f455d67dc + spec: {} + - controlPlane: + name: test-1 + namespace: test + id: test/test-1/08a573dd-851a-42cc-a600-b6f6ed37ee8d + object: + apiVersion: argo.discover.upbound.io/v1alpha1 + kind: EKS + metadata: + name: test-1 + resourceVersion: "4270320" + uid: 08a573dd-851a-42cc-a600-b6f6ed37ee8d + spec: {} + - controlPlane: + name: controlplane-query-api-test-spaces-playground + namespace: spaces-clusters + id: spaces-clusters/controlplane-query-api-test-spaces-playground/b5a6770f-1f85-4d09-8990-997c84bd4159 + object: + apiVersion: spaces.platform.upbound.io/v1alpha1 + kind: Space + metadata: + name: spaces-cluster-0 + resourceVersion: "1408337" + uid: b5a6770f-1f85-4d09-8990-997c84bd4159 + spec: {} +``` + + +## Query API Explorer + + + +import CrdDocViewer from '@site/src/components/CrdViewer'; + +### Query + +The Query resource allows you to query objects in a single control plane. + + + +### GroupQuery + +The GroupQuery resource allows you to query objects across a group of control planes. + + + +### SpaceQuery + +The SpaceQuery resource allows you to query objects across all control planes in a space. + + + + + + +[documentation]: /spaces/howtos/self-hosted/query-api +[up-ctx]: /reference/cli-reference +[up-alpha-get-command]: /reference/cli-reference +[a-flag]: /reference/cli-reference +[multiple-resource-types]: /reference/cli-reference +[up-alpha-query-command]: /reference/cli-reference +[sort-by-flag]: /reference/cli-reference +[label-columns]: /reference/cli-reference +[debug-flag]: /reference/cli-reference +[kubectl-conventions]: https://kubernetes.io/docs/reference/kubectl/generated/kubectl_get/ diff --git a/spaces_versioned_docs/version-v1.13/howtos/secrets-management.md b/spaces_versioned_docs/version-v1.13/howtos/secrets-management.md new file mode 100644 index 000000000..88e730ae5 --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/secrets-management.md @@ -0,0 +1,719 @@ +--- +title: Secrets Management +sidebar_position: 20 +description: A guide for how to configure synchronizing external secrets into control + planes in a Space. +--- + +Upbound's _Shared Secrets_ is a built in secrets management feature that +provides an integrated way to manage secrets across your platform. It allows you +to store sensitive data like passwords and certificates for your managed control +planes as secrets in an external secret store. + +This feature is a wrapper around the [External Secrets Operator (ESO)][external-secrets-operator-eso] that pulls secrets from external vaults and distributes them across your platform. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). The SharedSecretStore and SharedExternalSecret CRDs have been stable since v1.9. + +For API specifications and configuration details, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +## Benefits + +The Shared Secrets feature allows you to: + +* Access secrets from a variety of external secret stores without operation overhead +* Configure synchronization for multiple control planes in a group +* Store and manage all your secrets centrally +* Use Shared Secrets across all Upbound environments(Cloud and Disconnected Spaces) +* Synchronize secrets across groups of control planes while maintaining clear security boundaries +* Manage secrets at scale programmatically while ensuring proper isolation and access control + +## Understanding the Architecture + +The Shared Secrets feature uses a hierarchical approach to centrally manage +secrets and effectively control their distribution. + +![Shared Secrets workflow diagram](/img/shared-secrets-workflow.png) + +1. The flow begins at the group level, where you define your secret sources and distribution rules +2. These rules automatically create corresponding resources in your control planes +3. In each control plane, specific namespaces receive the secrets +4. Changes at the group level automatically propagate through this chain + +## Component configuration + +Upbound Shared Secrets consists of two components: + +1. **SharedSecretStore**: Defines connections to external secret providers +2. **SharedExternalSecret**: Specifies which secrets to synchronize and where + + +### Connect to an External Vault + + +The `SharedSecretStore` component is the connection point to your external +secret vaults. It provisions ClusterSecretStore resources into control planes +within the group. + + +#### AWS Secrets Manager + + + +In this example, you'll create a `SharedSecretStore` to connect to AWS +Secrets Manager in `us-west-2`. Then apply access to all control planes labeled with +`environment: production`, and make these secrets available in the `default` and +`crossplane-system` namespaces. + + +You can configure access to AWS Secrets Manager using static credentials or +workload identity. + +:::important +While the underlying ESO API supports more auth methods, static credentials are currently the only supported auth method in Cloud Spaces. +::: + +##### Static credentials + +1. Use the AWS CLI to create access credentials. + + +2. Create your access credentials. +```ini +# Create a text file with AWS credentials +cat > aws-credentials.txt << EOF +[default] +aws_access_key_id = +aws_secret_access_key = +EOF +``` + +3. Next,store the access credentials in a secret in the namespace you want to have access to the `SharedSecretStore`. +```shell +kubectl create secret \ + generic aws-credentials \ + -n default \ + --from-file=creds=./aws-credentials.txt +``` + +4. Create a `SharedSecretStore` custom resource file called `secretstore.yaml`. + Paste the following configuration: +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: aws-secrets +spec: + # Define which control planes should receive this configuration + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production + + # Define which namespaces within those control planes can access secrets + namespaceSelector: + names: + - default + - crossplane-system + + # Configure the connection to AWS Secrets Manager + provider: + aws: + service: SecretsManager + region: us-west-2 + auth: + secretRef: + accessKeyIDSecretRef: + name: aws-credentials + key: access-key-id + secretAccessKeySecretRef: + name: aws-credentials + key: secret-access-key +``` + + + +##### Workload Identity with IRSA + + + +You can also use AWS IAM Roles for Service Accounts (IRSA) depending on your +organizations needs: + +1. Ensure you have deployed the Spaces software into an IRSA-enabled EKS cluster. +2. Follow the AWS instructions to create an IAM OIDC provider with your EKS OIDC + provider URL. +3. Determine the Spaces-generated `controlPlaneID` of your control plane: +```shell +kubectl get controlplane -o jsonpath='{.status.controlPlaneID}' +``` + +4. Create an IAM trust policy in your AWS account to match the control plane. +```yaml +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam:::oidc-provider/" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + ":aud": "sts.amazonaws.com", + ":sub": [ +"system:serviceaccount:mxp--system:external-secrets-controller"] + } + } + } + ] +} +``` + +5. Update your Spaces deployment to annotate the SharedSecrets service account + with the role ARN. +```shell +up space upgrade ... \ + --set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."eks\.amazonaws\.com/role-arn"="" +``` + +6. Create a SharedSecretStore and reference the SharedSecrets service account: +```ini {copy-lines="all"} +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: aws-sm + namespace: default +spec: + provider: + aws: + service: SecretsManager + region: + auth: + jwt: + serviceAccountRef: + name: external-secrets-controller + controlPlaneSelector: + names: + - + namespaceSelector: + names: + - default +``` + +When you create a `SharedSecretStore` the underlying mechanism: + +1. Applies at the group level +2. Determines which control planes should receive this configuration by the `controlPlaneSelector` +3. Automatically creates a ClusterSecretStore inside each identified control plane +4. Maintains a connection in each control plane with the ClusterSecretStore + credentials and configuration from the parent SharedSecretStore + +Upbound automatically generates a ClusterSecretStore in each matching control +plane when you create a SharedSecretStore. + +```yaml {copy-lines="none"} +# Automatically created in each matching control plane +apiVersion: external-secrets.io/v1beta1 +kind: ClusterSecretStore +metadata: + name: aws-secrets # Name matches the parent SharedSecretStore +spec: + provider: + upboundspaces: + storeRef: + name: aws-secret +``` + +When you create the SharedSecretStore controller, it replaces the provider with +a special provider called `upboundspaces`. This provider references the +SharedSecretStore object in the Spaces API. This avoids copying the actual cloud +credentials from Spaces to each control plane. + +This workflow allows you to configure the store connection only once at the +group level and automatically propagates to each control plane. Individual control +planes can use the store without exposure to the group-level configuration and +updates all child ClusterSecretStores when updated. + + +#### Azure Key Vault + + +:::important +While the underlying ESO API supports more auth methods, static credentials are currently the only supported auth method in Cloud Spaces. +::: + +##### Static credentials + +1. Use the Azure CLI to create a service principal and authentication file. +2. Create a service principal and save credentials in a file: +```json +{ + "appId": "myAppId", + "displayName": "myServicePrincipalName", + "password": "myServicePrincipalPassword", + "tenant": "myTentantId" +} +``` + +3. Store the credentials as a Kubernetes secret: +```shell +kubectl create secret \ + generic azure-secret-sp \ + -n default \ + --from-file=creds=./azure-credentials.json +``` + +4. Create a SharedSecretStore referencing these credentials: +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: azure-kv +spec: + provider: + azurekv: + tenantId: "" + vaultUrl: "" + authSecretRef: + clientId: + name: azure-secret-sp + key: ClientID + clientSecret: + name: azure-secret-sp + key: ClientSecret + controlPlaneSelector: + names: + - + namespaceSelector: + names: + - default +``` + +##### Workload Identity + + +You can also use Entra Workload Identity Federation to access Azure Key Vault +without needing to manage secrets. + +To use Entra Workload ID with AKS: + + +1. Deploy the Spaces software into a [workload identity-enabled AKS cluster][workload-identity-enabled-aks-cluster]. +2. Retrieve the OIDC issuer URL of the AKS cluster: +```ini +az aks show --name "" \ + --resource-group "" \ + --query "oidcIssuerProfile.issuerUrl" \ + --output tsv +``` + +3. Use the Azure CLI to make a managed identity: +```ini +az identity create \ + --name "" \ + --resource-group "" \ + --location "" \ + --subscription "" +``` + +4. Look up the managed identity's client ID: +```ini +az identity show \ + --resource-group "" \ + --name "" \ + --query 'clientId' \ + --output tsv +``` + +5. Update your Spaces deployment to annotate the SharedSecrets service account with the associated Entra application client ID from the previous step: +```ini +up space upgrade ... \ + --set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."azure\.workload\.identity/client-id"="" \ + --set-string controlPlanes.sharedSecrets.pod.customLabels."azure\.workload\.identity/use"="true" +``` + +6. Determine the Spaces-generated `controlPlaneID` of your control plane. When you deploy a `kind: controlplane` in a Space, the Spaces software deploys a set of pods in a new namespace following the format `mxp--system`. +```ini +kubectl get controlplane -o jsonpath='{.status.controlPlaneID}' +``` + +7. Create a federated identity credential. +```ini +FEDERATED_IDENTITY_CREDENTIAL_NAME= +USER_ASSIGNED_IDENTITY_NAME= +RESOURCE_GROUP= +AKS_OIDC_ISSUER= +CONTROLPLANE_ID= +az identity federated-credential create --name ${FEDERATED_IDENTITY_CREDENTIAL_NAME} --identity-name "${USER_ASSIGNED_IDENTITY_NAME}" --resource-group "${RESOURCE_GROUP}" --issuer "${AKS_OIDC_ISSUER}" --subject system:serviceaccount:"mxp-${CONTROLPLANE_ID}-system:external-secrets-controller" --audience api://AzureADTokenExchange +``` + +8. Assign the `Key Vault Secrets User` role to the user-assigned managed identity that you created earlier. This step gives the managed identity permission to read secrets from the key vault: +```ini +az role assignment create \ + --assignee-object-id "${IDENTITY_PRINCIPAL_ID}" \ + --role "Key Vault Secrets User" \ + --scope "${KEYVAULT_RESOURCE_ID}" \ + --assignee-principal-type ServicePrincipal +``` + +:::important +You must manually restart a workload's pod when you add the annotation to the running pod's service account. The Entra workload identity mutating admission webhook requires a restart to inject the necessary environment. +::: + +8. Create a `SharedSecretStore`. Replace `vaultURL` with the URL of your Azure Key Vault instance. Replace `identityId` with the client ID of the managed identity created earlier: +```yaml {copy-lines="all"} +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: azure-kv +spec: + provider: + azurekv: + authType: WorkloadIdentity + vaultUrl: "" + controlPlaneSelector: + names: + - + namespaceSelector: + names: + - default +``` + + + + +#### Google Cloud Secret Manager + + + +You can configure access to Google Cloud Secret Manager using static credentials or workload identity. Below are instructions for configuring either. See the [ESO provider API][eso-provider-api] for more information. + +:::important +While the underlying ESO API supports more auth methods, static credentials are currently the only supported auth method in Cloud Spaces. +::: + +##### Static credentials + +1. Use the [GCP CLI][gcp-cli] to create access credentials. +2. Save the output in a file called `gcp-credentials.json`. +3. Store the access credentials in a secret in the same namespace as the `SharedSecretStore`. + ```shell {label="kube-create-secret",copy-lines="all"} + kubectl create secret \ + generic gcpsm-secret \ + -n default \ + --from-file=creds=./gcp-credentials.json + ``` + +4. Create a `SharedSecretStore`, referencing the secret created earlier. Replace `projectID` with your GCP Project ID: +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: gcp-sm +spec: + provider: + gcpsm: + auth: + secretRef: + secretAccessKeySecretRef: + name: gcpsm-secret + key: creds + projectID: + controlPlaneSelector: + names: + - + namespaceSelector: + names: + - default +``` + +:::tip +The example above maps a Shared Secret Store into a single namespace of a single control plane. Read [control plane selection][control-plane-selection] and [namespace selection][namespace-selection] to learn how to map into one or more namespaces of one or more control planes. +::: + + +##### Workload identity with Service Accounts to IAM Roles + + +To configure, grant the `roles/iam.workloadIdentityUser` role to the Kubernetes +service account in the control plane namespace to impersonate the IAM service +account. + +1. Ensure you've deployed Spaces on a [Workload Identity Federation-enabled][workload-identity-federation-enabled] GKE cluster. +2. Determine the Spaces-generated `controlPlaneID` of your control plane. When you deploy a `kind: controlplane` in a Space, the Spaces software deploys a set of pods in a new namespace following the format `mxp--system`. +```ini +kubectl get controlplane -o jsonpath='{.status.controlPlaneID}' +``` + +3. Create a GCP IAM service account with the [GCP CLI][gcp-cli-1]: +```ini +gcloud iam service-accounts create \ + --project= +``` + +4. Grant the IAM service account the role to access GCP Secret Manager: +```ini +SA_NAME= +IAM_SA_PROJECT_ID= +gcloud projects add-iam-policy-binding IAM_SA_PROJECT_ID \ + --member "serviceAccount:SA_NAME@IAM_SA_PROJECT_ID.iam.gserviceaccount.com" \ + --role roles/secretmanager.secretAccessor +``` + +5. When you enable the Shared Secrets feature, a service account gets created in each control plane for the External Secrets Operator. Apply a [GCP IAM policy binding][gcp-iam-policy-binding] to associate this service account with the desired GCP IAM role. +```ini +PROJECT_ID= +PROJECT_NUMBER= +CONTROLPLANE_ID= +gcloud projects add-iam-policy-binding projects/${PROJECT_ID} \ + --role "roles/iam.workloadIdentityUser" \ + --member=principal://iam.googleapis.com/projects/${PROJECT_NUMBER}/locations/global/workloadIdentityPools/${PROJECT_ID}.svc.id.goog/subject/ns/mxp-${CONTROLPLANE_ID}-system/sa/external-secrets-controller +``` + +6. Update your Spaces deployment to annotate the SharedSecrets service account with GCP IAM service account's identifier: +```ini +up space upgrade ... \ + --set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."iam\.gke\.io/gcp-service-account"="" +``` + +7. Create a `SharedSecretStore`. Replace `projectID` with your GCP Project ID: +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: gcp-sm +spec: + provider: + gcpsm: + projectID: + controlPlaneSelector: + names: + - + namespaceSelector: + names: + - default +``` + +:::tip +The example above maps a Shared Secret Store into a single namespace of a single control plane. Read [control plane selection][control-plane-selection-1] and [namespace selection][namespace-selection-2] to learn how to map into one or more namespaces of one or more control planes. +::: + +### Manage your secret distribution + +After you create your SharedSecretStore, you can define which secrets to +distribute using SharedExternalSecret: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedExternalSecret +metadata: + name: database-credentials + namespace: default +spec: + # Select the same control planes as your SharedSecretStore + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production + + externalSecretSpec: + refreshInterval: 1h + secretStoreRef: + name: aws-secrets # References the SharedSecretStore name + kind: ClusterSecretStore + target: + name: db-credentials + data: + - secretKey: username + remoteRef: + key: prod/database/credentials + property: username + - secretKey: password + remoteRef: + key: prod/database/credentials + property: password +``` + +This configuration: + +* Pulls database credentials from your external secret provider +* Creates secrets in all production control planes +* Refreshes the secrets every hour +* Creates a secret called `db-credentials` in each control plane + +When you create a SharedExternalSecret at the group level, Upbound's system +creates a template for the corresponding ClusterExternalSecrets in each selected +control plane. + +The example below simulates the ClusterExternalSecret that Upbound creates: + +```yaml +# Inside each matching control plane: +apiVersion: external-secrets.io/v1beta1 +kind: ClusterExternalSecret +metadata: + name: database-credentials +spec: + refreshInterval: 1h + secretStoreRef: + name: aws-secrets + kind: ClusterSecretStore + data: + - secretKey: username + remoteRef: + key: prod/database/credentials + property: username +``` + +The hierarchy in this configuration is: + +1. SharedExternalSecret (group level) defines what secrets to distribute +2. ClusterExternalSecret (control plane level) manages the distribution within + each control plane + +3. Kubernetes Secrets (namespace level) are created in specified namespaces + + +#### Control plane selection + +To configure which control planes in a group you want to project a SecretStore into, use the `spec.controlPlaneSelector` field. You can either use `labelSelectors` or the `names` of a control plane directly. A control plane matches if any of the label selectors match. + +This example matches all control planes in the group that have `environment: production` as a label: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +You can use the more complex `matchExpressions` to match labels based on an expression. This example matches control planes that have label `environment: production` or `environment: staging`: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + controlPlaneSelector: + labelSelectors: + - matchExpressions: + - { key: environment, operator: In, values: [production,staging] } +``` + +You can also specify the names of control planes directly: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + controlPlaneSelector: + names: + - controlplane-dev + - controlplane-staging + - controlplane-prod +``` + + +#### Namespace selection + +To configure which namespaces **within each matched control plane** to project the secret store into, use `spec.namespaceSelector` field. The projected secret store only appears in the namespaces matching the provided selector. You can either use `labelSelectors` or the `names` of namespaces directly. A control plane matches if any of the label selectors match. + +**For all control planes matched by** `spec.controlPlaneSelector`, This example matches all namespaces in each selected control plane that have `team: team1` as a label: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + namespaceSelector: + labelSelectors: + - matchLabels: + team: team1 +``` + +You can use the more complex `matchExpressions` to match labels based on an expression. This example matches namespaces that have label `team: team1` or `team: team2`: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + namespaceSelector: + labelSelectors: + - matchExpressions: + - { key: team, operator: In, values: [team1,team2] } +``` + +You can also specify the names of namespaces directly: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + namespaceSelector: + names: + - team1-namespace + - team2-namespace +``` + +## Configure secrets directly in a control plane + + +The above explains using group-scoped resources to project secrets into multiple control planes. You can also use ESO API types directly in a control plane as you would in standalone Crossplane or Kubernetes. + + +See the [ESO documentation][eso-documentation] for a full guide on using the API types. + +## Best practices + +When you configure secrets management in your Upbound environment, keep the +following best practices in mind: + +**Use consistent labeling schemes** across your control planes for predictable +and manageable secret distribution. + +**Organize your secrets** in your external provider using a hierarchical +structure that mirrors your control plane organization. + +**Set appropriate refresh intervals** based on your security requires and the +nature of the secrets. + +**Use namespace selection sparingly** to limit secret distribution to only the +namespaces that need them. + +**Use separate tokens for each environment.** Keep them in distinct +SharedSecretStores. Users could bypass SharedExternalSecret selectors by +creating ClusterExternalSecrets directly in control planes. This grants access to all +secrets available to that token. + +**Document your secret management architecture**, including which control planes +should receive which secrets. + +[control-plane-selection]: #control-plane-selection +[namespace-selection]: #namespace-selection +[control-plane-selection-1]: #control-plane-selection +[namespace-selection-2]: #namespace-selection + +[external-secrets-operator-eso]: https://external-secrets.io +[workload-identity-enabled-aks-cluster]: https://learn.microsoft.com/en-us/azure/aks/workload-identity-deploy-cluster +[eso-provider-api]: https://external-secrets.io/latest/provider/google-secrets-manager/ +[gcp-cli]: https://cloud.google.com/iam/docs/creating-managing-service-account-keys +[workload-identity-federation-enabled]: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#enable_on_clusters_and_node_pools +[gcp-cli-1]: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#kubernetes-sa-to-iam +[gcp-iam-policy-binding]: https://cloud.google.com/sdk/gcloud/reference/iam/service-accounts/add-iam-policy-binding +[eso-documentation]: https://external-secrets.io/latest/introduction/getting-started/ diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/_category_.json b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/_category_.json new file mode 100644 index 000000000..5bf23bb0a --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/_category_.json @@ -0,0 +1,11 @@ +{ + "label": "Self-Hosted Spaces", + "position": 2, + "collapsed": true, + "customProps": { + "plan": "business" + } + +} + + diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/administer-features.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/administer-features.md new file mode 100644 index 000000000..ce878014e --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/administer-features.md @@ -0,0 +1,121 @@ +--- +title: Administer features +sidebar_position: 12 +description: Enable and disable features in Spaces +--- + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Feature flags and their availability may vary by version. + +For detailed feature availability across versions, see the . +::: + +This guide shows how to enable or disable features in your self-hosted Space. + +## Shared secrets + +**Status:** Preview + +This feature is enabled by default in Cloud Spaces. + +To enable this feature in a self-hosted Space, set +`features.alpha.sharedSecrets.enabled=true` when installing the Space: + +```bash +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "features.alpha.sharedSecrets.enabled=true" \ +``` + + +## Observability + +**Status:** GA +**Available from:** Spaces v1.13+ + +This feature is enabled by default in Cloud Spaces. + + + +To enable this feature in a self-hosted Space, set +`observability.enabled=true` (`features.alpha.observability.enabled=true` before `v1.14.0`) when installing the Space: + +```bash +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "observability.enabled=true" \ +``` + +The observability feature collects telemetry data from user-facing control +plane workloads like: + +* Crossplane +* Providers +* Functions + +Self-hosted Spaces users can add control plane system workloads such as the +`api-server`, `etcd` by setting the +`observability.collectors.includeSystemTelemetry` Helm flag to true. + +### Sensitive data + +To avoid exposing sensitive data in the `SharedTelemetryConfig` resource, use +Kubernetes secrets to store the sensitive data and reference the secret in the +`SharedTelemetryConfig` resource. + +Create the secret in the same namespace/group as the `SharedTelemetryConfig` +resource. The example below uses `kubectl create secret` to create a new secret: + +```bash +kubectl create secret generic sensitive -n \ + --from-literal=apiKey='YOUR_API_KEY' +``` + +Next, reference the secret in the `SharedTelemetryConfig` resource: + +```yaml +apiVersion: observability.spaces.upbound.io/v1alpha1 +kind: SharedTelemetryConfig +metadata: + name: newrelic +spec: + configPatchSecretRefs: + - name: sensitive + key: apiKey + path: exporters.otlphttp.headers.api-key + controlPlaneSelector: + labelSelectors: + - matchLabels: + org: foo + exporters: + otlphttp: + endpoint: https://otlp.nr-data.net + headers: + api-key: dummy # This value is replaced by the secret value, can be omitted + exportPipeline: + metrics: [otlphttp] + traces: [otlphttp] + logs: [otlphttp] +``` + +The `configPatchSecretRefs` field in the `spec` specifies the secret `name`, +`key`, and `path` values to inject the secret value in the +`SharedTelemetryConfig` resource. + +## Shared backups + +As of Spaces `v.12.0`, this feature is enabled by default. + +To disable in a self-hosted Space, pass the `features.alpha.sharedBackup.enabled=false` as a Helm chart value. +`--set "features.alpha.sharedBackup.enabled=false"` + +## Query API + +**Status:** Preview +The Query API is available in the Cloud Space offering and enabled by default. + +Query API is required for self-hosted deployments with connected Spaces. See the +related [documentation][documentation] +to enable this feature. + +[documentation]: /spaces/howtos/query-api/ diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/attach-detach.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/attach-detach.md new file mode 100644 index 000000000..1465921cf --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/attach-detach.md @@ -0,0 +1,198 @@ +--- +title: Connect or disconnect a Space +sidebar_position: 12 +description: Enable and connect self-hosted Spaces to the Upbound console +--- +:::info API Version Information +This guide applies to **Spaces v1.8.0 and later** (Self-Hosted only). Connection to Upbound console requires Query API and RBAC to be enabled. + +For version-specific features and requirements, see the . Query API setup details, see [Deploy Query API infrastructure](./query-api.md). +::: + +:::important +This feature is in preview. Starting in Spaces `v1.8.0` and later, you must +deploy and [enable the Query API][enable-the-query-api] and [enable Upbound +RBAC][enable-upbound-rbac] to connect a Space to Upbound. +::: + +[Upbound][upbound] allows you to connect self-hosted Spaces and enables a streamlined operations and debugging experience in your Console. + +## Usage + +### Connect + +Before you begin, make sure you have: + +- An existing Upbound [organization][organization] in Upbound SaaS. +- The `up` CLI installed and logged into your organization +- `kubectl` installed with the kubecontext of your self-hosted Space cluster. +- A `token.json` license, provided by your Upbound account representative. +- You enabled the [Query API][query-api] in the self-hosted Space. + +Create a new `UPBOUND_SPACE_NAME`. If you don't create a name, `up` automatically generates one for you: + +```ini +export UPBOUND_SPACE_NAME=your-self-hosted-space +``` + +#### With up CLI + +:::tip +The command tries to connect the Space to the org account context pointed at by your `up` CLI profile. Make sure you've logged into Upbound SaaS with `up login -a ` before trying to connect the Space. +::: + +Connect the Space to the Console: + +```bash +up space connect "${UPBOUND_SPACE_NAME}" +``` + +This command installs a Connect agent, creates a service account, and configures permissions in your Upbound cloud organization in the `upbound-system` namespace of your Space. + +#### With Helm + +Export your Upbound org account name to an environment variable called `UPBOUND_ORG_NAME`. You can see this value by running `up org list` after logging on to Upbound. + +```ini +export UPBOUND_ORG_NAME=your-org-name +``` + +Create a new robot token and export it to an environment variable called `UPBOUND_TOKEN`: + +```bash +up robot create "${UPBOUND_SPACE_NAME}" --description="Robot used for authenticating Space '${UPBOUND_SPACE_NAME}' with Upbound Connect" +export UPBOUND_TOKEN=$(up robot token create "$UPBOUND_SPACE_NAME" "$UPBOUND_SPACE_NAME" --file - | jq -r '.token') +``` + +:::note +Follow the [`jq` installation guide][jq-install] if your machine doesn't include +it by default. +::: + +Create a secret containing the robot token: + +```bash +kubectl create secret -n upbound-system generic connect-token --from-literal=token=${UPBOUND_TOKEN} +``` + +Specify your username and password for the helm OCI registry: + +```bash +jq -r .token $SPACES_TOKEN_PATH | helm registry login xpkg.upbound.io -u $(jq -r .accessId $SPACES_TOKEN_PATH) --password-stdin +``` + +In the same cluster where you installed the Spaces software, install the Upbound connect agent with your token secret. + +```bash +helm -n upbound-system upgrade --install agent \ + oci://xpkg.upbound.io/spaces-artifacts/agent \ + --version "0.0.0-441.g68777b9" \ + --set "image.repository=xpkg.upbound.io/spaces-artifacts/agent" \ + --set "registration.image.repository=xpkg.upbound.io/spaces-artifacts/register-init" \ + --set "imagePullSecrets[0].name=upbound-pull-secret" \ + --set "registration.enabled=true" \ + --set "space=${UPBOUND_SPACE_NAME}" \ + --set "organization=${UPBOUND_ORG_NAME}" \ + --set "tokenSecret=connect-token" \ + --wait +``` + + +#### View your Space in the Console + + +Go to the [Upbound Console][upbound-console], log in, and choose the newly connected Space from the Space selector dropdown. + +![A screenshot of the Upbound Console space selector dropdown](/img/attached-space.png) + +:::note +You can only connect a self-hosted Space to a single organization at a time. +::: + +### Disconnect + +#### With up CLI + +To disconnect a self-hosted Space or a deleted self-hosted Space, run the following command: + +```bash +up space disconnect "${UPBOUND_SPACE_NAME}" +``` + +If the Space still exists, this command uninstalls the Connect agent and deletes the associated service account and permissions. + +#### With Helm + +To disconnect a self-hosted Space or a deleted self-hosted Space, run the following command: + +```bash +helm delete -n upbound-system agent +``` + +Clean up the robot token you created for this self-hosted Space: + +```bash +up robot delete "${UPBOUND_SPACE_NAME}" --force +``` + +## Security model + +### Architecture + +![An architectural diagram of a self-hosted Space attached to Upbound](/img/console-attach-architecture.jpg) + +:::note +This diagram illustrates a self-hosted Space running in AWS connected to the global Upbound Console. The same model applies to a Space running in AKS, GKE, or other Kubernetes environments. +::: + +### Data path + +Upbound uses a Pub/Sub model over TLS to communicate between Upbound's global +console and your self-hosted Space. Self-hosted Spaces establishes a secure +connection with `connect.upbound.io`. `api.upbound.io`, and `auth.upbound.io` and subscribes to an +endpoint. + +:::important +Add `connect.upbound.io`, `api.upbound.io`, and `auth.upbound.io` to your organization's list of +allowed endpoints. +::: + +The +Upbound Console communicates to the Space through that endpoint. The data flow +is: + +1. Users sign in to the Upbound Console, redirecting to authenticate with an organization's configured Identity Provider via SSO. +2. Once authenticated, actions in the Console, like listing control planes or specific resource types from a control plane. These requests post as messages to the Upbound Connect service. +3. A user's self-hosted Space polls the Upbound Connect service periodically for new messages, verifies the authenticity of the message, and fulfills the request contained. +4. A user's self-hosted Space returns the results of the request to the Upbound Connect service and the Console renders the results in the user's browser session. + +**Upbound never stores data originated from a self-hosted Space.** The data is transient and only exposed in the user's browser session. The Console needs this data to render your resources and control planes in the UI. + +### Data transmitted + +Users interact with the Upbound Console to generate request queries to the Upbound Connect Service while exploring, managing, or debugging a self-hosted Space. These requests send data back to the user's browser session in the Console, including: + +* Metadata for the Space +* Metadata for control planes in the state +* Configuration manifests for various resource types within your Space: Crossplane managed resources, composite resources, composite resource claims, Upbound shared secrets, Upbound shared backups, Crossplane providers, ProviderConfigs, Configurations, and Crossplane Composite Functions. + +:::important +This data only concerns resource configuration. The data _inside_ the managed +resource in your Space isn't visible at any point. +::: + +**Upbound can't see your data.** Upbound doesn't have access to session-based data rendered for your users in the Upbound Console. Upbound has no information about your self-hosted Space, other than that you've connected a self-hosted Space. + +### Threat vectors + +Only users with editor or administrative permissions can make changes using the Console like creating or deleting control planes or groups. + + +[enable-the-query-api]: /spaces/howtos/self-hosted/query-api +[enable-upbound-rbac]: /manuals/platform/concepts/authorization/upbound-rbac +[upbound]: /manuals/console/upbound-console +[organization]: /manuals/platform/concepts/identity-management/organizations +[query-api]: /spaces/howtos/self-hosted/query-api +[jq-install]: https://jqlang.org/download/ + +[upbound-console]: https://console.upbound.io diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/billing.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/billing.md new file mode 100644 index 000000000..145ff9f03 --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/billing.md @@ -0,0 +1,307 @@ +--- +title: Self-Hosted Space Billing +sidebar_position: 50 +description: A guide for how billing works in an Upbound Space +--- + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Billing models may evolve between versions; see Capacity Licensing for alternative models. + +For version-specific features and capacity-based licensing details, see the . reference specifications, see [Capacity Licensing](/spaces/howtos/self-hosted/capacity-licensing). +::: + +Spaces are a self-hosting feature of Upbound's [flagship product][flagship-product] for platform teams to deploy control planes in their self-managed environments. You can install Spaces into any Kubernetes cluster in your own cloud account, on-premises data center, or on the edge. The pricing usage-based and requires an Upbound account and subscription. The billing unit is a `Loop`. + + +:::info +This guide describes the traditional usage-based billing model using object storage. disconnected or air-gapped environments, consider [Capacity Licensing](/spaces/howtos/self-hosted/capacity-licensing), which provides a simpler fixed-capacity model with local usage tracking. +::: + +## Billing details + +Spaces **aren't connected** to Upbound's global service. To enable proper billing, the Spaces software ships a controller whose responsibility is to collect billing data from your Spaces deployment. The collection and storage of your billing data happens expressly locally within your environment; no data is automatically emitted back to Upbound's global service. This data gets written to object storage of your choice. AWS, Azure, and GCP are currently supported. The Spaces software exports billing usage software every ~15 seconds. + +Spaces customers must periodically provide the billing data to Upbound. Contact your Upbound sales representative to learn more. + + + +## AWS S3 + + + +Configure billing to write to an S3 bucket by providing the following values at install-time. Create an S3 bucket if you don't already have one. + +### IAM policy + +You must create an IAM policy and attach it to the IAM user (for static credentials) or IAM role (for assumed +roles). + +The policy example below enables the necessary S3 permissions: + +```json +{ + "Sid":"EnableS3Permissions", + "Effect":"Allow", + "Action": [ + "s3:PutObject", + "s3:GetObject", + "s3:ListBucket", + "s3:DeleteObject" + ], + "Resource": [ + "arn:aws:s3:::your-bucket-name/*", + "arn:aws:s3:::your-bucket-name" + ] +}, +{ + "Sid": "ListBuckets", + "Effect": "Allow", + "Action": "s3:ListAllMyBuckets", + "Resource": "*" +} +``` + +### Authentication with static credentials + +In your Spaces install cluster, create a secret in the `upbound-system` +namespace. This secret must contain keys `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`. + +```bash +kubectl create secret generic billing-credentials -n upbound-system \ + --from-literal=AWS_ACCESS_KEY_ID= \ + --from-literal=AWS_SECRET_ACCESS_KEY= +``` + +Install the Space software, providing the billing details to the other required values. + + + + + + +```bash {hl_lines="2-6"} +helm -n upbound-system upgrade --install spaces ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=aws" \ + --set "billing.storage.aws.region=" \ + --set "billing.storage.aws.bucket=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + +```bash {hl_lines="2-6"} +up space init ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=aws" \ + --set "billing.storage.aws.region=" \ + --set "billing.storage.aws.bucket=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + + + +### Authentication with an IAM role + + +To use short-lived credentials with an assumed IAM role, create an IAM role with +established trust to the `vector`-serviceaccount in all `mxp-*-system` +namespaces. + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::12345678912:oidc-provider/oidc.eks.eu-west-2.amazonaws.com/id/YOUROIDCPROVIDERID" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringLike": { + "oidc.eks.eu-west-2.amazonaws.com/id/YOUROIDCPROVIDERID:sub": "system:serviceaccount:mxp-*-system:vector" + } + } + } + ] +} +``` + +For more information about workload identities, review the [Workload-identity +Configuration documentation][workload-identity-configuration-documentation] + + + + + + +```bash {hl_lines="2-7"} +helm -n upbound-system upgrade --install spaces ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=aws" \ + --set "billing.storage.aws.region=" \ + --set "billing.storage.aws.bucket=" \ + --set "billing.storage.secretRef.name=" \ + --set "controlPlanes.vector.serviceAccount.customAnnotations[eks.amazonaws.com/role-arn]=" + ... +``` + + + + + +```bash {hl_lines="2-7"} +up space init ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=aws" \ + --set "billing.storage.aws.region=" \ + --set "billing.storage.aws.bucket=" \ + --set "billing.storage.secretRef.name=" \ + --set "controlPlanes.vector.serviceAccount.customAnnotations[eks.amazonaws.com/role-arn]=" + ... +``` + + + + + + +*Note*: You must set `billing.storage.secretRef.name` to an empty string when using an assumed role. + + +## Azure blob storage + +Configure billing to write to a blob in Azure by providing the following values at install-time. Create a storage account and container if you don't already have one. + +Then, on the cluster where you installed the Spaces software, create a secret in `upbound-system`. This secret must contain keys `AZURE_TENANT_ID`, `AZURE_CLIENT_ID`, and `AZURE_CLIENT_SECRET`. Make sure to replace the values with details generated from your Azure account. + +```bash +kubectl create secret generic billing-credentials -n upbound-system \ + --from-literal=AZURE_TENANT_ID= \ + --from-literal=AZURE_CLIENT_ID= \ + --from-literal=AZURE_CLIENT_SECRET= +``` + +Install the Space software, providing the billing details to the other required values. + + + + + + +```bash {hl_lines="2-6"} +helm -n upbound-system upgrade --install spaces ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=azure" \ + --set "billing.storage.azure.storageAccount=" \ + --set "billing.storage.azure.container=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + +```bash {hl_lines="2-6"} +up space init ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=azure" \ + --set "billing.storage.azure.storageAccount=" \ + --set "billing.storage.azure.container=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + + + +## GCP Cloud Storage Buckets + + +Configure billing to write to a Cloud Storage bucket in GCP by providing the following values at install-time. Create a bucket if you don't already have one. + +Then, on the cluster where you installed the Spaces software, create a secret in `upbound-system`. This secret must contain the key `google_application_credentials`. Make sure to replace the value with a GCP service account key JSON generated from your GCP account. + +```bash +kubectl create secret generic billing-credentials -n upbound-system \ + --from-literal=google_application_credentials= +``` + +Install the Space software, providing the billing details to the other required values. + + + + + + +```bash {hl_lines="2-5"} +helm -n upbound-system upgrade --install spaces ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=gcp" \ + --set "billing.storage.gcp.bucket=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + +```bash {hl_lines="2-5"} +up space init ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=gcp" \ + --set "billing.storage.gcp.bucket=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + +## Export billing data to send to Upbound + +To prepare the billing data to send to Upbound, do the following: + +Ensure the current context of your kubeconfig points at the Spaces cluster. Then, run the [export][export] command. + + +:::important +Your current CLI must have read access to the bucket to run this command. +::: + + +The example below exports billing data stored in AWS: + +```bash +up space billing export --provider=aws \ + --bucket=spaces-billing-bucket \ + --account=your-upbound-org \ + --billing-month=2024-07 \ + --force-incomplete +``` + +The command creates a billing report that's zipped up in your current working directory. Send the output to your Upbound sales representative. + + +You can find full instructions and command options in the up [CLI reference][cli-reference] docs. + + +[export]: /reference/cli-reference +[cli-reference]: /reference/cli-reference +[flagship-product]: https://www.upbound.io/platform +[workload-identity-configuration-documentation]: https://docs.upbound.io/operate/accounts/authentication/oidc-configuration diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/capacity-licensing.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/capacity-licensing.md new file mode 100644 index 000000000..a1dc6c101 --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/capacity-licensing.md @@ -0,0 +1,591 @@ +--- +title: Capacity Licensing +sidebar_position: 60 +description: A guide for capacity-based licensing in self-hosted Spaces +plan: "enterprise" +--- + + + + + +This guide explains how to configure and monitor capacity-based licensing in +self-hosted Upbound Spaces. Capacity licensing provides a simplified billing +model for disconnected or air-gapped environments where automated usage +reporting isn't possible. + +:::info +Spaces `v1.15` and later support Capacity Licensing as an +alternative to the traditional usage-based billing model described in the +[Self-Hosted Space Billing][space-billing] guide. +::: + +## Overview + +Capacity licensing allows organizations to purchase a fixed capacity of +resources upfront. The Spaces software tracks usage locally and provides +visibility into consumption against your purchased capacity, all without +requiring external connectivity to Upbound's services. + +### Key concepts + +- **Resource Hours**: The primary billing unit representing all resources + managed by Crossplane over time. This includes managed resources, + composites (XRs), claims (XRCs), and all composed resources - essentially + everything Crossplane manages. The system aggregates resource counts over each + hour using trapezoidal integration to accurately account for changes in + resource count throughout the hour. +- **Operations**: The number of Operations invoked by Crossplane. +- **License Capacity**: The total amount of resource hours and operations included in your license. +- **Usage Tracking**: Continuous monitoring of consumption with real-time utilization percentages. + +### How it works + +1. Upbound provides you with a license file containing your purchased capacity +2. You configure a `SpaceLicense` in your Spaces cluster +3. The metering system automatically: + - Collects measurements from all control planes every minute + - Aggregates usage data into hourly intervals + - Stores usage data in a local PostgreSQL database + - Updates the `SpaceLicense` status with current consumption + +## Prerequisites + +### PostgreSQL database + +Capacity licensing requires a PostgreSQL database to store usage measurements. You can use: + +- An existing PostgreSQL instance +- A managed PostgreSQL service (AWS RDS, Azure Database, Google Cloud SQL) +- A PostgreSQL instance deployed in your cluster + +The database must be: + +- Accessible from the Spaces cluster +- Configured with a dedicated database and credentials + +#### Example: Deploy PostgreSQL with CloudNativePG + +If you don't have an existing PostgreSQL instance, you can deploy one in your +cluster using [CloudNativePG] (CNPG). CNPG is a Kubernetes operator that +manages PostgreSQL clusters. + +1. Install the CloudNativePG operator: + +```bash +kubectl apply -f https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.24/releases/cnpg-1.24.1.yaml +``` + +2. Create a PostgreSQL cluster for metering: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: metering-postgres + namespace: upbound-system +spec: + instances: 1 + imageName: ghcr.io/cloudnative-pg/postgresql:16 + bootstrap: + initdb: + database: metering + owner: metering + postInitApplicationSQL: + - ALTER ROLE "metering" CREATEROLE; + storage: + size: 5Gi + # Optional: Configure resources for production use + # resources: + # requests: + # memory: "512Mi" + # cpu: "500m" + # limits: + # memory: "1Gi" + # cpu: "1000m" +--- +apiVersion: v1 +kind: Secret +metadata: + name: metering-postgres-app + namespace: upbound-system + labels: + cnpg.io/reload: "true" +stringData: + username: metering + password: "your-secure-password-here" +type: kubernetes.io/basic-auth +``` + +```bash +kubectl apply -f metering-postgres.yaml +``` + +3. Wait for the cluster to be ready: + +```bash +kubectl wait --for=condition=ready cluster/metering-postgres -n upbound-system --timeout=5m +``` + +4. You can access the PostgreSQL cluster at `metering-postgres-rw.upbound-system.svc.cluster.local:5432`. + +:::tip +For production deployments, consider: +- Increasing `instances` to 3 for high availability +- Configuring [backups] to object storage +- Setting appropriate resource requests and limits +- Using a dedicated storage class with good I/O performance +::: + +### License file + +Contact your Upbound sales representative to obtain a license file for your organization. The license file contains: +- Your unique license ID +- Purchased capacity (resource hours and operations) +- License validity period +- Any usage restrictions (such as cluster UUID pinning) + +## Configuration + +### Step 1: Create database credentials secret + +Create a Kubernetes secret containing your PostgreSQL password using the pgpass format: + +```bash +# Create a pgpass file with format: hostname:port:database:username:password +# Note: The database name and username must be 'metering' +# For CNPG clusters, use the read-write service endpoint: -rw..svc.cluster.local +echo "metering-postgres-rw.upbound-system.svc.cluster.local:5432:metering:metering:your-secure-password-here" > pgpass + +# Create the secret +kubectl create secret generic metering-postgres-credentials \ + -n upbound-system \ + --from-file=pgpass=pgpass + +# Clean up the pgpass file +rm pgpass +``` + +The secret must contain a single key: +- **`pgpass`**: PostgreSQL password file in the format `hostname:port:metering:metering:password` + +:::note +The database name and username are fixed as `metering`. Ensure your PostgreSQL instance has a database named `metering` with a user `metering` that has appropriate permissions. + +If you deployed PostgreSQL using CNPG as shown in the example above, the password should match what you set in the `metering-postgres-app` secret. +::: + +:::tip +For production environments, consider using external secret management solutions: +- [External Secrets Operator][eso] +- Cloud-specific secret managers (AWS Secrets Manager, Azure Key Vault, GCP Secret Manager) +::: + +### Step 2: Enable metering in Spaces + +Enable the metering feature when installing or upgrading Spaces: + + + + + +```bash {hl_lines="2-7"} +helm -n upbound-system upgrade --install spaces ... \ + --set "metering.enabled=true" \ + --set "metering.storage.postgres.connection.url=metering-postgres-rw.upbound-system.svc.cluster.local:5432" \ + --set "metering.storage.postgres.connection.credentials.secret.name=metering-postgres-credentials" \ + --set "metering.interval=1m" \ + --set "metering.workerCount=10" \ + --set "metering.aggregationInterval=1h" \ + --set "metering.measurementRetentionDays=30" + ... +``` + + + + + +```bash {hl_lines="2-7"} +up space init ... \ + --set "metering.enabled=true" \ + --set "metering.storage.postgres.connection.url=metering-postgres-rw.upbound-system.svc.cluster.local:5432" \ + --set "metering.storage.postgres.connection.credentials.secret.name=metering-postgres-credentials" \ + --set "metering.interval=1m" \ + --set "metering.workerCount=10" \ + --set "metering.aggregationInterval=1h" \ + --set "metering.measurementRetentionDays=30" + ... +``` + + + + + +#### Configuration options + +| Option | Default | Description | +|--------|---------|-------------| +| `metering.enabled` | `false` | Enable the metering feature | +| `metering.storage.postgres.connection.url` | - | PostgreSQL host and port (format: `host:port`, required) | +| `metering.storage.postgres.connection.credentials.secret.name` | - | Name of the secret containing PostgreSQL credentials (required) | +| `metering.storage.postgres.connection.sslmode` | `require` | SSL mode for PostgreSQL connection (`disable`, `allow`, `prefer`, `require`, `verify-ca`, `verify-full`) | +| `metering.storage.postgres.connection.ca.name` | - | Name of the secret containing CA certificate for TLS connections (optional) | +| `metering.interval` | `1m` | How often to collect measurements from control planes | +| `metering.workerCount` | `10` | Number of parallel workers for measurement collection | +| `metering.aggregationInterval` | `1h` | How often to aggregate measurements into hourly usage data | +| `metering.measurementRetentionDays` | `30` | Days to retain raw measurements (0 = indefinite) | + + +#### Database sizing and retention + +The metering system uses two PostgreSQL tables to track usage: + +**Raw measurements table** (`measurements`): +- Stores point-in-time snapshots collected every measurement interval (default: 1 minute) +- One row per control plane per interval +- Affected by the `measurementRetentionDays` setting +- Used for detailed auditing and troubleshooting + +**Aggregated usage table** (`hourly_usage`): +- Stores hourly aggregated resource hours and operations per license +- One row per hour per license +- Never deleted (required for accurate license tracking) +- Grows much slower than raw measurements + +##### Storage sizing guidelines + +Estimate your PostgreSQL storage needs based on these factors: + + +| Deployment Size | Control Planes | Measurement Interval | Retention Days | Raw Measurements | Indexes & Overhead | Total Storage | +|----------------|----------------|---------------------|----------------|------------------|-------------------|---------------| +| Small | 10 | 1m | 30 | ~85 MB | ~40 MB | **~125 MB** | +| Medium | 50 | 1m | 30 | ~430 MB | ~215 MB | **~645 MB** | +| Large | 200 | 1m | 30 | ~1.7 GB | ~850 MB | **~2.5 GB** | +| Large (90-day retention) | 200 | 1m | 90 | ~5.2 GB | ~2.6 GB | **~7.8 GB** | + +The aggregated hourly usage table adds minimal overhead (~50 KB per year per license). + +**Formula for custom calculations**: +``` +Daily measurements per control plane = (24 * 60) / interval_minutes +Total rows = control_planes × daily_measurements × retention_days +Storage (MB) ≈ (total_rows × 200 bytes) / 1,048,576 × 1.5 (with indexes) +``` + +##### Retention behavior + +The `measurementRetentionDays` setting controls retention of raw measurement data: + +- **Default: 30 days** - Balances audit capabilities with storage efficiency +- **Set to 0**: Disables cleanup, retains all raw measurements indefinitely +- **Cleanup runs**: Every aggregation interval (default: hourly) +- **What's kept forever**: Aggregated hourly usage data (needed for license tracking) +- **What's cleaned up**: Raw point-in-time measurements older than retention period + +**Recommendations**: +- **30 days**: For most troubleshooting and short-term auditing +- **60 to 90 days**: For environments requiring extended audit trails +- **Unlimited (0)**: Only for environments with ample storage or specific compliance requirements + +:::note +Increasing retention period linearly increases storage requirements for raw measurements. The aggregated hourly data is always retained regardless of this setting. +::: + +### Step 3: Apply your license + +Use the `up` CLI to apply your license file: + +```bash +up space license apply /path/to/license.json +``` + +This command automatically: +- Creates a secret containing your license file in the `upbound-system` namespace +- Creates the `SpaceLicense` resource configured to use that secret + +:::tip +You can specify a different namespace for the license secret using the `--namespace` flag: +```bash +up space license apply /path/to/license.json --namespace my-namespace +``` +::: + +
+Alternative: Manual kubectl approach + +If you prefer not to use the `up` CLI, you can manually create the resources: + +1. Create the license secret: + +```bash +kubectl create secret generic space-license \ + -n upbound-system \ + --from-file=license.json=/path/to/license.json +``` + +2. Create the SpaceLicense resource: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceLicense +metadata: + name: space +spec: + secretRef: + name: space-license + namespace: upbound-system + key: license.json +``` + +```bash +kubectl apply -f spacelicense.yaml +``` + +:::important +You **must** name the `SpaceLicense` resource `space`. This resource is a singleton and only one can exist in the cluster. +::: + +
+ +## Monitoring usage + +### Check license status + +Use the `up` CLI to view your license details and current usage: + +```bash +up space license show +``` + +Example output: + +``` +Spaces License Status: Valid (License is valid) + +Created: 2024-01-01T00:00:00Z +Expires: 2025-01-01T00:00:00Z + +Plan: enterprise + +Resource Hour Limit: 1000000 +Operation Limit: 500000 + +Enabled Features: +- spaces +- query-api +- backup-restore +``` + +The output shows: +- License validity status and any validation messages +- Creation and expiration dates +- Your commercial plan tier +- Capacity limits for resource hours and operations +- Enabled features in your license +- Any restrictions (such as cluster UUID pinning) + +
+Alternative: View detailed status with kubectl + +For detailed information including usage statistics, use kubectl: + +```bash +kubectl get spacelicense space -o yaml +``` + +Example output showing usage data: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceLicense +metadata: + name: space +spec: + secretRef: + name: space-license + namespace: upbound-system +status: + conditions: + - type: LicenseValid + status: "True" + reason: Valid + message: "License is valid" + id: "lic_abc123xyz" + plan: "enterprise" + capacity: + resourceHours: 1000000 + operations: 500000 + usage: + resourceHours: 245680 + operations: 12543 + resourceHoursUtilization: "24.57%" + operationsUtilization: "2.51%" + firstMeasurement: "2024-01-15T10:00:00Z" + lastMeasurement: "2024-02-10T14:30:00Z" + createdAt: "2024-01-01T00:00:00Z" + expiresAt: "2025-01-01T00:00:00Z" + enabledFeatures: + - "spaces" + - "query-api" + - "backup-restore" +``` + +
+ +### Understanding the status fields + +| Field | Description | +|-------|-------------| +| `status.id` | Unique license identifier | +| `status.plan` | Your commercial plan (community, standard, enterprise) | +| `status.capacity` | Total capacity included in your license | +| `status.usage.resourceHours` | Total resource hours consumed | +| `status.usage.operations` | Total operations performed | +| `status.usage.resourceHoursUtilization` | Percentage of resource hours capacity used | +| `status.usage.operationsUtilization` | Percentage of operations capacity used | +| `status.usage.firstMeasurement` | When usage tracking began | +| `status.usage.lastMeasurement` | Most recent usage update | +| `status.expiresAt` | License expiration date | + +### Monitor with kubectl + +Watch your license utilization in real-time: + +```bash +kubectl get spacelicense space -w +``` + +Short output format: + +``` +NAME PLAN VALID REASON AGE +space enterprise True Valid 45d +``` + +## Managing licenses + +### Updating your license + +To update your license with a new license file (for example, when renewing or upgrading capacity), apply the new license: + +```bash +up space license apply /path/to/new-license.json +``` + +This command replaces the existing license secret and updates the SpaceLicense resource. + +### Removing a license + +To remove a license: + +```bash +up space license remove +``` + +This command: +- Prompts for confirmation before proceeding +- Removes the license secret + +To skip the confirmation prompt, use the `--force` flag: + +```bash +up space license remove --force +``` + +## Troubleshooting + +### License not updating + +If the license status doesn't update with usage data: + +1. **Check metering controller logs**: + ```bash + kubectl logs -n upbound-system deployment/spaces-controller -c metering + ``` + +2**Check if the system captures your measurements**: + + ```bash + # Connect to PostgreSQL and query the measurements table + kubectl exec -it -- psql -U -d \ + -c "SELECT COUNT(*) FROM measurements WHERE timestamp > NOW() - INTERVAL '1 hour';" + ``` + +### High utilization warnings + +If you're approaching your capacity limits: + +1. **Review resource usage** by control plane to identify high consumers +2. **Contact your Upbound sales representative** to discuss capacity expansion +3. **Optimize managed resources** by cleaning up unused resources + +### License validation failures + +If your license shows as invalid: + +1. **Check expiration date**: `kubectl get spacelicense space -o jsonpath='{.status.expiresAt}'` +2. **Verify license file integrity**: Ensure the secret contains valid JSON +3. **Check for cluster UUID restrictions**: Upbound pins some licenses to + specific clusters +4. **Review controller logs** for detailed error messages + +## Differences from traditional billing + +### Capacity licensing + +- ✅ Works in disconnected environments +- ✅ Provides real-time usage visibility +- ✅ No manual data export required +- ✅ Requires PostgreSQL database +- ✅ Fixed capacity model + +### Traditional billing (object storage) + + +- ❌ Requires periodic manual export +- ❌ Delayed visibility into usage +- ✅ Works with S3/Azure Blob/GCS +- ❌ Requires cloud storage access +- ✅ Pay-as-you-go model + +## Best practices + +### Database management + +1. **Regular backups**: Back up your metering database regularly to preserve usage history +2. **Monitor database size**: Set appropriate retention periods to manage storage growth +3. **Use managed databases**: Consider managed PostgreSQL services for production +4. **Connection pooling**: Use connection pooling for better performance at scale + +### License management + +1. **Monitor utilization**: Set up alerts before reaching 80% capacity +2. **Plan renewals early**: Start renewal discussions 60 days before expiration +3. **Track grace periods**: Note the `gracePeriodEndsAt` date for planning +4. **Secure license files**: Treat license files as sensitive credentials + +### Operational monitoring + +1. **Set up dashboards**: Create Grafana dashboards for usage trends +2. **Enable alerting**: Configure alerts for high utilization and expiration +3. **Regular audits**: Periodically review usage patterns across control planes +4. **Capacity planning**: Use historical data to predict future capacity needs + +## Next steps + +- Learn about [Observability] to monitor your Spaces deployment +- Explore [Backup and Restore][backup-restore] to protect your control plane data +- Review [Self-Hosted Space Billing][space-billing] for the traditional billing model +- Contact [Upbound Sales][sales] to discuss capacity licensing options + + +[space-billing]: /spaces/howtos/self-hosted/billing +[CloudNativePG]: https://cloudnative-pg.io/ +[backups]: https://cloudnative-pg.io/documentation/current/backup_recovery/ +[backup-restore]: /spaces/howtos/backup-and-restore +[sales]: https://www.upbound.io/contact +[eso]: https://external-secrets.io/ +[Observability]: /spaces/howtos/observability + + diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/certs.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/certs.md new file mode 100644 index 000000000..e517c250e --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/certs.md @@ -0,0 +1,274 @@ +--- +title: Istio Ingress Gateway With Custom Certificates +sidebar_position: 20 +description: Install self hosted spaces using istio ingress gateway in a Kind cluster +--- + +:::important +Prerequisites + +- Spaces Token available in a file +- `docker login xpkg.upbound.io -u -p ` +- [`istioctl`][istioctl] installation +- `jq` installation +::: + +This document describes the installation of a self hosted space on an example `kind` +cluster along with Istio Ingress Gateway and certificates. The service mesh and certificates +installation is transferable to self hosted spaces in arbitrary clouds. + +## Create a kind cluster + +```shell +cat < +## Install Istio + + + +:::important +This is an example and not recommended for use in production. +::: + + +1. Create the `istio-values.yaml` file + +```shell +cat > istio-values.yaml << 'EOF' +apiVersion: install.istio.io/v1alpha1 +kind: IstioOperator +spec: + hub: gcr.io/istio-release + components: + ingressGateways: + - enabled: true + name: istio-ingressgateway + k8s: + nodeSelector: + ingress-ready: "true" + overlays: + - apiVersion: apps/v1 + kind: Deployment + name: istio-ingressgateway + patches: + - path: spec.template.spec.containers.[name:istio-proxy].ports + value: + - containerPort: 8080 + hostPort: 80 + - containerPort: 8443 + hostPort: 443 +EOF +``` + +2. Install istio via `istioctl` + +```shell +istioctl install -f istio-values.yaml +``` + +## Create a self-signed Certificate via cert-manager + +:::important +This Certificate manifest creates a self-signed certificate for a proof of concept +environment and isn't recommended for production use cases. +::: + +1. Create the upbound-system namespace + +```shell +kubectl create namespace upbound-system +``` + +2. Create a self-signed certificate + +```shell +cat < +## Create an Istio Gateway and VirtualService + + + + +Configure an Istio Gateway and VirtualService to use TLS passthrough. + + +```shell +cat < spaces-values.yaml << 'EOF' +# Configure spaces-router to use the TLS secret created by cert-manager. +externalTLS: + tlsSecret: + name: example-tls-secret + caBundleSecret: + name: example-tls-secret + key: ca.crt +ingress: + provision: false + # Allow Istio Ingress Gateway to communicate to the spaces-router + namespaceLabels: + kubernetes.io/metadata.name: istio-system + podLabels: + app: istio-ingressgateway + istio: ingressgateway +EOF +``` + +2. Set the required environment variables + +```shell +# Update these according to your account/token file +export SPACES_TOKEN_PATH= +export UPBOUND_ACCOUNT= +# Replace SPACES_ROUTER_HOST with your Spaces ingress hostname +export SPACES_ROUTER_HOST="proxy.upbound-127.0.0.1.nip.io" +export SPACES_VERSION="1.14.1" +``` + +3. Create an image pull secret for Spaces + +```shell +kubectl -n upbound-system create secret docker-registry upbound-pull-secret \ + --docker-server=https://xpkg.upbound.io \ + --docker-username="$(jq -r .accessId $SPACES_TOKEN_PATH)" \ + --docker-password="$(jq -r .token $SPACES_TOKEN_PATH)" +``` + +4. Install the Spaces helm chart + +```shell +# Login to xpkg.upbound.io +jq -r .token $SPACES_TOKEN_PATH | helm registry login xpkg.upbound.io -u $(jq -r .accessId $SPACES_TOKEN_PATH) --password-stdin + +# Install spaces helm chart +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "authentication.hubIdentities=true" \ + --set "authorization.hubRBAC=true" \ + --wait -f spaces-values.yaml +``` + +## Validate the installation + +Successful access of the `up` command to interact with your self hosted space validates the +certificate installation. + +- `up ctx .` + +You can also issue control plane creation, list and deletion commands. + +- `up ctp create cert-test` +- `up ctp list` +- `up ctx disconnected/kind-kind/default/cert-test && kubectl get namespace` +- `up ctp delete cert-test` + +:::note +If `up` can't connect to your control plane, follow [this guide to create a new profile][up-profile]. +::: + +## Troubleshooting + +Examine your certificate with `openssl`: + +```shell +openssl s_client -connect proxy.upbound-127.0.0.1.nip.io:443 -showcerts +``` + +[istioctl]: https://istio.io/latest/docs/ops/diagnostic-tools/istioctl/ +[up-profile]: /manuals/cli/howtos/profile-config/ diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/configure-ha.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/configure-ha.md new file mode 100644 index 000000000..ddf36c55e --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/configure-ha.md @@ -0,0 +1,450 @@ +--- +title: Production Scaling and High Availability +description: Configure your Self-Hosted Space for production +sidebar_position: 5 +--- + + + +This guide explains how to configure an existing Upbound Space deployment for +production operation at scale. + +Use this guide when you're ready to deploy production scaling, high availability, +and monitoring in your Space. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. + +For API specifications on ControlPlane resources and configurations, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +## Prerequisites + +Before you begin scaling your Spaces deployment, make sure you have: + + +* A working Space deployment +* Cluster administrator access +* An understanding of load patterns and growth in your organization +* A familiarity with node affinity, tainting, and Horizontal Pod Autoscaling + (HPA) + + +## Production scaling strategy + + +In this guide, you will: + + + +* Create dedicated node pools for different component types +* Configure high-availability to ensure there are no single points of failure +* Set dynamic scaling for variable workloads +* Optimize your storage and component operations +* Monitor your deployment health and performance + +## Spaces architecture + +The basic Spaces workflow follows the pattern below: + + +![Spaces workflow][spaces-workflow] + +## Node architecture + +You can mitigate resource contention and improve reliability by separating system +components into dedicated node pools. + +### `etcd` dedicated nodes + +`etcd` performance directly impacts your entire Space, so isolate it for +consistent performance. + +1. Create a dedicated `etcd` node pool + + **Requirements:** + - **Minimum**: 3 nodes for HA + - **Instance type**: General purpose with high network throughput/low latency + - **Storage**: High performance storage (`etcd` is I/O sensitive) + +2. Taint `etcd` nodes to reserve them + + ```bash + kubectl taint nodes target=etcd:NoSchedule + ``` + +3. Configure `etcd` storage + + `etcd` is sensitive to storage I/O performance. Review the [`etcd` scaling + documentation][scaling] + for specific storage guidance. + +### API server dedicated nodes + +API servers handle all control plane requests and should run on dedicated +infrastructure. + +1. Create dedicated API server nodes + + **Requirements:** + - **Minimum**: 2 nodes for HA + - **Instance type**: Compute-optimized, memory-optimized, or general-purpose + - **Scaling**: Scale vertically based on API server load patterns + +2. Taint API server nodes + + ```bash + kubectl taint nodes target=apiserver:NoSchedule + ``` + +### Configure cluster autoscaling + +Enable cluster autoscaling for all node pools. + +For AWS EKS clusters, Upbound recommends using [`Karpenter`][karpenter] for +improved bin-packing and instance type selection. + +For GCP GKE clusters, follow the [GKE autoscaling][gke-autoscaling] guide. + +For Azure AKS clusters, follow the [AKS autoscaling][aks-autoscaling] guide. + + +## Configure high availability + +Ensure control plane components can survive node and zone failures. + +### Enable high availability mode + +1. Configure control planes for high availability + + ```yaml + controlPlanes: + ha: + enabled: true + ``` + + This configures control plane pods to run with multiple replicas and + associated pod disruption budgets. + +### Configure component distribution + +1. Set up API server pod distribution + + ```yaml + controlPlanes: + vcluster: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: target + operator: In + values: + - apiserver + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vcluster + topologyKey: "kubernetes.io/hostname" + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vcluster + topologyKey: topology.kubernetes.io/zone + weight: 100 + ``` + +2. Configure `etcd` pod distribution + + ```yaml + controlPlanes: + etcd: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: target + operator: In + values: + - etcd + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vcluster-etcd + topologyKey: "kubernetes.io/hostname" + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vcluster-etcd + topologyKey: topology.kubernetes.io/zone + weight: 100 + ``` + +### Configure tolerations + +Allow control plane pods to schedule on the tainted dedicated nodes (available +in Spaces v1.14+). + +1. Add tolerations for `etcd` pods + + ```yaml + controlPlanes: + etcd: + tolerations: + - key: "target" + operator: "Equal" + value: "etcd" + effect: "NoSchedule" + ``` + +2. Add tolerations for API server pods + + ```yaml + controlPlanes: + vcluster: + tolerations: + - key: "target" + operator: "Equal" + value: "apiserver" + effect: "NoSchedule" + ``` + + +## Configure autoscaling for Spaces components + + +Set up the Spaces system components to handle variable load automatically. + +### Scale API and `apollo` services + +1. Configure minimum replicas for availability + + ```yaml + api: + replicaCount: 2 + + features: + alpha: + apollo: + enabled: true + replicaCount: 2 + ``` + + Both services support horizontal and vertical scaling based on load patterns. + +### Configure router autoscaling + +The `spaces-router` is the entry point for all traffic and needs intelligent +scaling. + + +1. Enable Horizontal Pod Autoscaler + + ```yaml + router: + hpa: + enabled: true + minReplicas: 2 + maxReplicas: 8 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 80 + ``` + +2. Monitor scaling factors + + **Router scaling behavior:** + - **Vertical scaling**: Scales based on number of control planes + - **Horizontal scaling**: Scales based on request volume + - **Resource monitoring**: Monitor CPU and memory usage + + + +### Configure controller scaling + +The `spaces-controller` manages Space-level resources and requires vertical +scaling. + +1. Configure adequate resources with headroom + + ```yaml + controller: + resources: + requests: + cpu: "500m" + memory: "1Gi" + limits: + cpu: "2000m" + memory: "4Gi" + ``` + + **Important**: The controller can spike when reconciling large numbers of + control planes, so provide adequate headroom for resource spikes. + +## Set up production storage + + +### Configure Query API database + + +1. Use a managed PostgreSQL database + + **Recommended services:** + - [AWS RDS][rds] + - [Google Cloud SQL][gke-sql] + - [Azure Database for PostgreSQL][aks-sql] + + **Requirements:** + - Minimum 400 IOPS performance + + +## Monitoring + + + +Monitor key metrics to ensure healthy scaling and identify issues quickly. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +### Control plane health + +Track these `spaces-controller` metrics: + +1. **Total control planes** + + ``` + spaces_control_plane_exists + ``` + + Tracks the total number of control planes in the system. + +2. **Degraded control planes** + + ``` + spaces_control_plane_degraded + ``` + + Returns control planes that don't have a `Synced`, `Ready`, and + `Healthy` state. + +3. **Stuck control planes** + + ``` + spaces_control_plane_stuck + ``` + + Control planes stuck in a provisioning state. + +4. **Deletion issues** + + ``` + spaces_control_plane_deletion_stuck + ``` + + Control planes stuck during deletion. + +### Alerting + +Configure alerts for critical scaling and health metrics: + +- **High error rates**: Alert when 4xx/5xx response rates exceed thresholds +- **Control plane health**: Alert when degraded or stuck control planes exceed acceptable counts + +## Architecture overview + +**Spaces System Components:** + +- **`spaces-router`**: Entry point for all endpoints, dynamically builds routes to control plane API servers +- **`spaces-controller`**: Reconciles Space-level resources, serves webhooks, works with `mxp-controller` for provisioning +- **`spaces-api`**: API for managing groups, control planes, shared secrets, and telemetry objects (accessed only through spaces-router) +- **`spaces-apollo`**: Hosts the Query API, connects to PostgreSQL database populated by `apollo-syncer` pods + + +**Control Plane Components (per control plane):** +- **`mxp-controller`**: Handles provisioning tasks, serves webhooks, installs UXP and `XGQL` +- **`XGQL`**: GraphQL API powering console views +- **`kube-state-metrics`**: Collects usage metrics for billing (updated by `mxp-controller` when CRDs change) +- **`vector`**: Works with `kube-state-metrics` to send usage data to external storage for billing +- **`apollo syncer`**: Syncs `etcd` data into PostgreSQL for the Query API + + +### `up ctx` workflow + + + up ctx workflow diagram + + +### Access a control plane API server via kubectl + + + kubectl workflow diagram + + +### Query API/Apollo + + + query API workflow diagram + + +## See also + +* [Upbound Spaces deployment requirements][deployment] +* [Upbound `etcd` scaling resources][scaling] + +[up-ctx-workflow]: /img/up-ctx-workflow.png +[kubectl]: /img/kubectl-workflow.png +[query-api]: /img/query-api-workflow.png +[spaces-workflow]: /img/up-basic-flow.png +[rds]: https://aws.amazon.com/rds/postgresql/ +[gke-sql]: https://cloud.google.com/kubernetes-engine/docs/tutorials/stateful-workloads/postgresql +[aks-sql]: https://learn.microsoft.com/en-us/azure/aks/deploy-postgresql-ha?tabs=azuredisk +[deployment]: https://docs.upbound.io/spaces/howtos/self-hosted/deployment-reqs/ +[karpenter]: https://docs.aws.amazon.com/eks/latest/best-practices/karpenter.html +[gke-autoscaling]: https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-autoscaler +[aks-autoscaling]: https://learn.microsoft.com/en-us/azure/aks/cluster-autoscaler-overview +[scaling]: https://docs.upbound.io/deploy/self-hosted-spaces/scaling-resources#scaling-etcd-storage diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/controllers.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/controllers.md new file mode 100644 index 000000000..692740638 --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/controllers.md @@ -0,0 +1,389 @@ +--- +title: Controllers +weight: 250 +description: A guide to how to wrap and deploy an Upbound controller into control planes on Upbound. +--- + +:::important +This feature is in private preview for select customers in Upbound Spaces. If you're interested in this feature, please [contact us](https://www.upbound.io/contact-us). +::: + +Upbound's _Controllers_ feature lets you build and deploy control plane software from the Kubernetes ecosystem. With the _Controllers_ feature, you're not limited to just managing resource types defined by Crossplane. Now you can create resources from _CustomResourceDefinitions_ defined by other Kubernetes ecosystem tooling. + +This guide explains how to bundle and deploy control plane software from the Kubernetes ecosystem on a control plane in Upbound. + +## Benefits + +The Controllers feature provides the following benefits: + +* Deploy control plane software from the Kubernetes ecosystem. +* Use your control plane's package manager to handle the lifecycle of the control plane software and define dependencies between package. +* Build powerful compositions that combine both Crossplane and Kubernetes _CustomResources_. + +## How it works + +A _Controller_ is a package type that bundles control plane software from the Kubernetes ecosystem. Examples of such software includes: + +- Kubernetes policy engines +- CI/CD tooling +- Your own private custom controllers defined by your organization + +You build a _Controller_ package by wrapping a helm chart along with its requisite _CustomResourceDefinitions_. Your _Controller_ package gets pushed to an OCI registry, and from there you can apply it to a control plane like you would any other Crossplane package. Your control plane's package manager is responsible for managing the lifecycle of the software once applied. + +## Prerequisites + +Enable the Controllers feature in the Space you plan to run your control plane in: + +- Cloud Spaces: Not available yet +- Connected Spaces: Space administrator must enable this feature +- Disconnected Spaces: Space administrator must enable this feature + +Packaging a _Controller_ requires [up CLI][cli] `v0.39.0` or later. + + + +## Build a _Controller_ package + + + +_Controllers_ are a package type that get administered by your control plane's package manager. + +### Prepare the package + +To define a _Controller_, you need a Helm chart. This guide assumes the control plane software you want to build into a _Controller_ already has a Helm chart available. + +Start by making a working directory to assemble the necessary parts: + +```ini +mkdir controller-package +cd controller-package +``` + +Inside the working directory, pull the Helm chart: + +```shell +export CHART_REPOSITORY= +export CHART_NAME= +export CHART_VERSION= + +helm pull $CHART_NAME --repo $CHART_REPOSITORY --version $CHART_VERSION +``` + +Be sure to update the Helm chart repository, name, and version with your own. + +Move the Helm chart into its own folder: + +```ini +mkdir helm +mv $CHART_NAME-$CHART_VERSION.tgz helm/chart.tgz +``` + +Unpack the CRDs from the Helm chart into their own directory: + +```shell +export RELEASE_NAME= +export RELEASE_NAMESPACE= + +mkdir crds +helm template $RELEASE_NAME helm/chart.tgz -n $RELEASE_NAMESPACE --include-crds | \ + yq e 'select(.kind == "CustomResourceDefinition")' - | \ + yq -s '("crds/" + .metadata.name + ".yaml")' - +``` +Be sure to update the Helm release name, and namespace with your own. + +:::info +The instructions above assume your CRDs get deployed as part of your Helm chart. If they're deployed another way, you need to manually copy your CRDs instead. +::: + +Create a `crossplane.yaml` with your controller metadata: + +```yaml +cat < crossplane.yaml +apiVersion: meta.pkg.upbound.io/v1alpha1 +kind: Controller +metadata: + annotations: + friendly-name.meta.crossplane.io: Controller + meta.crossplane.io/description: | + A brief description of what the controller does. + meta.crossplane.io/license: Apache-2.0 + meta.crossplane.io/maintainer: + meta.crossplane.io/readme: | + An explanation of your controller. + meta.crossplane.io/source: + name: +spec: + packagingType: Helm + helm: + releaseName: + releaseNamespace: + # Value overrides for the helm release can be provided below. + # values: + # foo: bar +EOF +``` + +Your controller's file structure should look like this: + +```ini +. +├── crds +│ ├── your-crd.yaml +│ ├── second-crd.yaml +│ └── another-crd.yaml +├── crossplane.yaml +└── helm + └── chart.tgz +``` + +### Package and push the _Controller_ + +At the root of your controller's working directory, build the contents into an xpkg: + +```ini +up xpkg build +``` + +This causes an xpkg to get saved to your current directory with a name like `controller-f7091386b4c0.xpkg`. + +Push the package to your desired OCI registry: + +```shell +export UPBOUND_ACCOUNT= +export CONTROLLER_NAME= +export CONTROLLER_VERSION= +export XPKG_FILENAME= + +up xpkg push xpkg.upbound.io/$UPBOUND_ACCOUNT/$CONTROLLER_NAME:$CONTROLLER_VERSION -f $XPKG_FILENAME +``` + + + +## Deploy a _Controller_ package + + + +:::important +_Controllers_ are only installable on control planes running Crossplane `v1.19.0` or later. +::: + +Set your kubecontext to the desired control plane in Upbound. Change the package path to the OCI registry you pushed it to. Then, deploy the _Controller_ directly: + +```shell +export CONTROLLER_NAME= +export CONTROLLER_VERSION= + +cat < crossplane.yaml +apiVersion: meta.pkg.upbound.io/v1alpha1 +kind: Controller +metadata: + annotations: + friendly-name.meta.crossplane.io: Controller ArgoCD + meta.crossplane.io/description: | + The ArgoCD Controller enables continuous delivery and declarative configuration + management for Kubernetes applications using GitOps principles. + meta.crossplane.io/license: Apache-2.0 + meta.crossplane.io/maintainer: Upbound Maintainers + meta.crossplane.io/readme: | + ArgoCD is a declarative GitOps continuous delivery tool for Kubernetes that + follows the GitOps methodology to manage infrastructure and application + configurations. + meta.crossplane.io/source: https://github.com/argoproj/argo-cd + name: argocd +spec: + packagingType: Helm + helm: + releaseName: argo-cd + releaseNamespace: argo-system + # values: + # foo: bar +EOF +``` + +Your controller's file structure should look like this: + +```ini +. +├── crds +│ ├── applications.argoproj.io.yaml +│ ├── applicationsets.argoproj.io.yaml +│ └── appprojects.argoproj.io.yaml +├── crossplane.yaml +└── helm + └── chart.tgz +``` + +### Package and push controller-argocd + +At the root of your controller's working directory, build the contents into an xpkg: + +```ini +up xpkg build +``` + +This causes an xpkg to get saved to your current directory with a name like `argocd-f7091386b4c0.xpkg`. + +Push the package to your desired OCI registry: + +```shell +export UPBOUND_ACCOUNT= +export CONTROLLER_NAME=controller-argocd +export CONTROLLER_VERSION=v7.8.8 +export XPKG_FILENAME= + +up xpkg push --create xpkg.upbound.io/$UPBOUND_ACCOUNT/$CONTROLLER_NAME:$CONTROLLER_VERSION -f $XPKG_FILENAME +``` + +### Deploy controller-argocd to a control plane + +Set your kubecontext to the desired control plane in Upbound. Change the package path to the OCI registry you pushed it to. Then, deploy the _Controller_ directly: + +```ini +cat < + +## Frequently asked questions + +
+Can I package any software or are there any prerequisites to be a Controller? + +We define a *Controller* as a software that has at least one Custom Resource Definition (CRD) and a Kubernetes controller for that CRD. This is the minimum requirement to be a *Controller*. We have some checks to enforce this at packaging time. + +
+ +
+How can I package my software as a Controller? + +Currently, we support Helm charts as the underlying package format for *Controllers*. As long as you have a Helm chart, you can package it as a *Controller*. + +If you don't have a Helm chart, you can't deploy the software. We only support Helm charts as the underlying package format for *Controllers*. We may extend this to support other packaging formats like Kustomize in the future. + +
+ +
+Can I package Crossplane XRDs/Compositions as a Helm chart to deploy as a Controller? + +This is not recommended. For packaging Crossplane XRDs/ and Compositions, we recommend using the `Configuration` package format. A helm chart only with Crossplane XRDs/Compositions does not qualify as a *Controller*. + +
+ +
+How can I override the Helm values when deploying a Controller? + +Overriding the Helm values is possible at two levels: +- During packaging time, in the package manifest file. +- At runtime, using a `ControllerRuntimeConfig` resource (similar to Crossplane `DeploymentRuntimeConfig`). + +
+ +
+How can I configure the helm release name and namespace for the controller? + +Right now, it is not possible to configure this at runtime. The package author configures release name and namespace during packaging, so it is hardcoded inside the package. Unlike a regular application that is deployed by a Helm chart, *Controllers* can only be deployed once in a given control plane, so, we hope it should be ok to rely on predefined release names and namespaces. We may consider exposing these in `ControllerRuntimeConfig` later, but, we would like to keep it opinionated unless there are strong reasons to do so. + +
+ +
+Can I deploy more than one instance of a Controller package? + +No, this is not possible. Remember, a *Controller* package introduces CRDs which are cluster-scoped objects. Just like one cannot deploy more than one instance of the same Crossplane Provider package today, it is not possible to deploy more than one instance of a *Controller*. + +
+ +
+Do I need a specific Crossplane version to run Controllers? + +Yes, you need to use Crossplane v1.19.0 or later to use *Controllers*. This is because of the changes in the Crossplane codebase to support third-party package formats in dependencies. + +Spaces `v1.12.0` supports Crossplane `v1.19` in the *Rapid* release channel. + +
+ +
+Can I deploy Controllers outside of an Upbound control plane? With UXP? + +No, *Controllers* are a proprietary package format and are only available for control planes running in Spaces hosting environments in Upbound. + +
+ + +[cli]: /manuals/uxp/overview + diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/ctp-audit-logs.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/ctp-audit-logs.md new file mode 100644 index 000000000..52f52c776 --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/ctp-audit-logs.md @@ -0,0 +1,549 @@ +--- +title: Control plane audit logging +--- + +This guide explains how to enable and configure audit logging for control planes +in Self-Hosted Upbound Spaces. + +Starting in Spaces `v1.14.0`, each control plane contains an API server that +supports audit log collection. You can use audit logging to track creation, +updates, and deletions of Crossplane resources. Control plane audit logs +use observability features to collect audit logs with `SharedTelemetryConfig` and +send logs to an OpenTelemetry (`OTEL`) collector. + +:::info API Version Information +This guide applies to **Spaces v1.14.0 and later** (Self-Hosted only). Audit logging is not available in earlier versions. + +For API specifications on observability resources, see the [Spaces API Reference - v1.14](../../../../../1.14/reference/apis/spaces-api/). details on observability evolution across versions, see the . +::: + +## Prerequisites + +Before you begin, make sure you have: + +* Spaces `v1.14.0` or greater +* Admin access to your Spaces host cluster +* `kubectl` configured to access the host cluster +* `helm` installed +* `yq` installed +* `up` CLI installed and logged in to your organization + +## Enable observability + + +Observability graduated to General Available in `v1.14.0` but is disabled by +default. + + + + + +### Before `v1.14` +To enable the GA Observability feature, upgrade your Spaces installation to `v1.14.0` +or later and update your installation setting to the new flag: + +```diff +helm upgrade spaces upbound/spaces -n upbound-system \ +- --set "features.alpha.observability.enabled=true" ++ --set "observability.enabled=true" +``` + + + +### After `v1.14` + +To enable the GA Observability feature for `v1.14.0` and later, pass the feature +flag: + +```sh +helm upgrade spaces upbound/spaces -n upbound-system \ + --set "observability.enabled=true" + +``` + + + + +To confirm Observability is enabled, run the `helm get values` command: + + +```shell +helm get values --namespace upbound-system spaces | yq .observability +``` + +Your output should return: + +```shell-noCopy + enabled: true +``` + +## Install an observability backend + +:::note +If you already have an observability backend in your environment, skip to the +next section. +::: + + +For this guide, you'll use Grafana's `docker-otel-lgtm` bundle to validate audit log +generation. production environments, configure a dedicated observability +backend like Datadog, Splunk, or an enterprise-grade Grafana stack. + + + +First, make sure your `kubectl` context points to your Spaces host cluster: + +```shell +kubectl config current-context +``` + +The output should return your cluster name. + +Next, install `docker-otel-lgtm` as a deployment using port-forwarding to +connect to Grafana. Create a manifest file and paste the +following configuration: + +```yaml title="otel-lgtm.yaml" +apiVersion: v1 +kind: Namespace +metadata: + name: observability +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: otel-lgtm + name: otel-lgtm + namespace: observability +spec: + ports: + - name: grpc + port: 4317 + protocol: TCP + targetPort: 4317 + - name: http + port: 4318 + protocol: TCP + targetPort: 4318 + - name: grafana + port: 3000 + protocol: TCP + targetPort: 3000 + selector: + app: otel-lgtm +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: otel-lgtm + labels: + app: otel-lgtm + namespace: observability +spec: + replicas: 1 + selector: + matchLabels: + app: otel-lgtm + template: + metadata: + labels: + app: otel-lgtm + spec: + containers: + - name: otel-lgtm + image: grafana/otel-lgtm + ports: + - containerPort: 4317 + - containerPort: 4318 + - containerPort: 3000 +``` + +Next, apply the manifest: + +```shell +kubectl apply --filename otel-lgtm.yaml +``` + +Your output should return the resources: + +```shell +namespace/observability created + service/otel-lgtm created + deployment.apps/otel-lgtm created +``` + +To verify your resources deployed, use `kubectl get` to display resources with +an `ACTIVE` or `READY` status. + +Next, forward the Grafana port: + +```shell +kubectl port-forward svc/otel-lgtm --namespace observability 3000:3000 +``` + +Now you can access the Grafana UI at http://localhost:3000. + + +## Create an audit-enabled control plane + +To enable audit logging for a control plane, you need to label it so the +`SharedTelemetryConfig` can identify and apply audit settings. This section +creates a new control plane with the `audit-enabled: "true"` label. The +`audit-enabled: "true"` label marks this control plane for audit logging. The +`SharedTelemetryConfig` (created in the next section) finds control planes with +this label and enables audit logging on them. + +Create a new manifest file and paste the configuration below: + +
+```yaml title="ctp-audit.yaml" +apiVersion: v1 +kind: Namespace +metadata: + name: audit-test +--- +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + labels: + audit-enabled: "true" + name: ctp1 + namespace: audit-test +spec: + writeConnectionSecretToRef: + name: kubeconfig-ctp1 + namespace: audit-test +``` +
+ +The `metadata.labels` section contains the `audit-enabled` setting. + +Apply the manifest: + +```shell +kubectl apply --filename ctp-audit.yaml +``` + +Confirm your control plane reaches the `READY` status: + +```shell +kubectl get --filename ctp-audit.yaml +``` + +## Create a `SharedTelemetryConfig` + +The `SharedTelemetryConfig` applies to all control plane objects in a namespace +and enables audit logging and routes logs to your `OTEL` endpoint. + +Create a `SharedTelemetryConfig` manifest file and paste the configuration +below: + +
+```yaml title="sharedtelemetryconfig.yaml" +apiVersion: observability.spaces.upbound.io/v1alpha1 +kind: SharedTelemetryConfig +metadata: + name: apiserver-audit + namespace: audit-test +spec: + apiServer: + audit: + enabled: true + exporters: + otlphttp: + endpoint: http://otel-lgtm.observability:4318 + exportPipeline: + logs: [otlphttp] + controlPlaneSelector: + labelSelectors: + - matchLabels: + audit-enabled: "true" +``` +
+ +This configuration: + +* Sets `apiServer.audit.enabled` to `true` +* Configures the `otlphttp` exporter to point to the `docker-otel-lgtm` service +* Uses `controlPlaneSelector` to match any control plane in the namespace with the `audit-enabled` label set to `true` + +:::note +You can configure the `SharedTelemetryConfig` to select control planes in +several ways. more information on control plane selection, see the [control +plane selection][ctp-selection] documentation. +::: + +Apply the `SharedTelemetryConfig`: + +```shell +kubectl apply --filename sharedtelemetryconfig.yaml +``` + +Confirm the configuration selected the control plane: + +```shell +kubectl get --filename sharedtelemetryconfig.yaml +``` + +The output should return `SELECTED` as `1` and `VALIDATED` as `TRUE`. + +For more detailed status information, use `kubectl get`: + +```shell +kubectl get --filename sharedtelemetryconfig.yaml --output yaml | yq .status +``` + +## Generate and monitor audit events + +You enabled telemetry on your new control plane and can now generate events to +test the audit logging. This guide uses the `nop-provider` to simulate resource +operations. + +Switch your `up` context to the new control plane: + +```shell +up ctx /// +``` + +Create a new Provider manifest: + +```yaml title="provider-nop.yaml" +apiVersion: pkg.crossplane.io/v1 + kind: Provider + metadata: + name: crossplane-contrib-provider-nop + spec: + package: xpkg.upbound.io/crossplane-contrib/provider-nop:v0.4.0 +``` + +Apply the provider manifest: + +```shell +kubectl apply --filename provider-nop.yaml +``` + +Verify the provider installed and returns `HEALTHY` status as `TRUE`. + +Apply an example resource to kick off event generation: + + +```shell +kubectl apply --filename https://raw.githubusercontent.com/crossplane-contrib/provider-nop/refs/heads/main/examples/nopresource.yaml +``` + +In your Grafana dashboard, navigate to **Drilldown** > **Logs** under the +Grafana menu. + + +Filter for `controlplane-audit` log messages. + +Create a query to find `create` events on `nopresources` by filtering: + +* The `verb` field for `create` events +* The `objectRef_resource` field to match the Kind `nopresources` + +Review the audit log results. The log stream displays: + +*The client applying the create operation +* The resource kind +* Client details +* The response code + +Expand the example below for an audit log entry: + +
+ Audit log entry + +```json +{ + "level": "Metadata", + "auditID": "51bbe609-14ad-4874-be78-1289c10d506a", + "stage": "ResponseComplete", + "requestURI": "/apis/nop.crossplane.io/v1alpha1/nopresources?fieldManager=kubectl-client-side-apply&fieldValidation=Strict", + "verb": "create", + "user": { + "username": "kubernetes-admin", + "groups": ["system:masters", "system:authenticated"] + }, + "impersonatedUser": { + "username": "upbound:spaces:host:masterclient", + "groups": [ + "system:authenticated", + "upbound:controlplane:admin", + "upbound:spaces:host:system:masters" + ] + }, + "sourceIPs": ["10.244.0.135", "127.0.0.1"], + "userAgent": "kubectl/v1.32.2 (darwin/arm64) kubernetes/67a30c0", + "objectRef": { + "resource": "nopresources", + "name": "example", + "apiGroup": "nop.crossplane.io", + "apiVersion": "v1alpha1" + }, + "responseStatus": { "metadata": {}, "code": 201 }, + "requestReceivedTimestamp": "2025-09-19T23:03:24.540067Z", + "stageTimestamp": "2025-09-19T23:03:24.557583Z", + "annotations": { + "authorization.k8s.io/decision": "allow", + "authorization.k8s.io/reason": "RBAC: allowed by ClusterRoleBinding \"controlplane-admin\" of ClusterRole \"controlplane-admin\" to Group \"upbound:controlplane:admin\"" + } + } +``` +
+ +## Customize the audit policy + +Spaces `v1.14.0` includes a default audit policy. You can customize this policy +by creating a configuration file and passing the values to +`observability.collectors.apiServer.auditPolicy` in the helm values file. + +An example custom audit policy: + +```yaml +observability: + controlPlanes: + apiServer: + auditPolicy: | + apiVersion: audit.k8s.io/v1 + kind: Policy + rules: + # ============================================================================ + # RULE 1: Exclude health check and version endpoints + # ============================================================================ + - level: None + nonResourceURLs: + - '/healthz*' + - '/readyz*' + - /version + # ============================================================================ + # RULE 2: ConfigMaps - Write operations only + # ============================================================================ + - level: Metadata + resources: + - group: "" + resources: + - configmaps + verbs: + - create + - update + - patch + - delete + omitStages: + - RequestReceived + - ResponseStarted + # ============================================================================ + # RULE 3: Secrets - ALL operations + # ============================================================================ + - level: Metadata + resources: + - group: "" + resources: + - secrets + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + omitStages: + - RequestReceived + - ResponseStarted + # ============================================================================ + # RULE 4: Global exclusion of read-only operations + # ============================================================================ + - level: None + verbs: + - get + - list + - watch + # ========================================================================== + # RULE 5: Exclude standard Kubernetes resources from write operation logging + # ========================================================================== + - level: None + resources: + - group: "" + - group: "apps" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "storage.k8s.io" + - group: "batch" + - group: "autoscaling" + - group: "metrics.k8s.io" + - group: "node.k8s.io" + - group: "scheduling.k8s.io" + - group: "coordination.k8s.io" + - group: "discovery.k8s.io" + - group: "events.k8s.io" + - group: "flowcontrol.apiserver.k8s.io" + - group: "internal.apiserver.k8s.io" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "admissionregistration.k8s.io" + verbs: + - create + - update + - patch + - delete + # ============================================================================ + # RULE 6: Catch-all for ALL custom resources and any missed resources + # ============================================================================ + - level: Metadata + verbs: + - create + - update + - patch + - delete + omitStages: + - RequestReceived + - ResponseStarted + # ============================================================================ + # RULE 7: Final catch-all - exclude everything else + # ============================================================================ + - level: None + omitStages: + - RequestReceived + - ResponseStarted +``` +You can apply this policy during Spaces installation or upgrade using the helm values file. + +Audit policies use rules evaluated in order from top to bottom where the first +matching rule applies. Control plane audit policies follow Kubernetes conventions and use the +following logging levels: + +* **None** - Don't log events matching this rule +* **Metadata** - Log request metadata (user, timestamp, resource, verb) but not request or response bodies +* **Request** - Log metadata and request body but not response body +* **RequestResponse** - Log metadata, request body, and response body + +For more information, review the Kubernetes [Auditing] documentation. + +## Disable audit logging + +You can disable audit logging on a control plane by removing it from the +`SharedTelemetryConfig` selector or by deleting the `SharedTelemetryConfig`. + +### Disable for specific control planes + +Remove the `audit-enabled` label from control planes that should stop sending audit logs: + +```bash +kubectl label controlplane --namespace audit-enabled- +``` + +The `SharedTelemetryConfig` no longer selects this control plane, and audit log collection stops. + +### Disable for all control planes + +Delete the `SharedTelemetryConfig` to stop audit logging for all control planes it manages: + +```bash +kubectl delete sharedtelemetryconfig --namespace +``` + +[ctp-selection]: /spaces/howtos/observability/#control-plane-selection +[Auditing]: https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/ diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/declarative-ctps.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/declarative-ctps.md new file mode 100644 index 000000000..2c3e5331b --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/declarative-ctps.md @@ -0,0 +1,110 @@ +--- +title: Declaratively create control planes +sidebar_position: 99 +description: A tutorial to configure a Space with Argo to declaratively create and + manage control planes +--- + +In this tutorial, you learn how to configure [Argo CD][argo-cd] to communicate with a self-hosted Space. This flow allows you to declaratively create and manage control planes from Git. Argo CD is a continuous delivery tool for Kubernetes that you can use to drive GitOps flows for your control plane infrastructure. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. + +For API specifications on ControlPlane resources and their declarative creation, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +## Prerequisites + +To complete this tutorial, you need the following: + +- Have already deployed an Upbound Space. +- Have already deployed an instance of Argo CD on a Kubernetes cluster. + +## Connect your Space to Argo CD + +Fetch the kubeconfig for the Space cluster, the Kubernetes cluster where you installed the Upbound Spaces software. You must add the Space cluster as a context to Argo. + +```ini +export SPACES_CLUSTER_SERVER="https://url" +export SPACES_CLUSTER_NAME="cluster" +``` + +Switch contexts to the Kubernetes cluster where you've installed Argo. Create a secret on the Argo cluster whose data contains the connection details of the Space cluster. + +:::important +Make sure the following commands are executed against your **Argo** cluster, not your Space cluster. +::: + +Run the following command in a terminal: + +```yaml +cat < +When you install a Crossplane provider on a control plane, memory gets consumed +according to the number of custom resources it defines. Upbound [Official Provider families][official-provider-families] provide higher fidelity control +to platform teams to install providers for only the resources they need, +reducing the bloat of needlessly installing unused custom resources. Still, you +must factor provider memory usage into your calculations to ensure you've +rightsized the memory available in your Spaces cluster. + + +:::important +Be careful not to conflate `managed resource` with `custom resource definition`. +The former is an "instance" of an external resource in Crossplane, while the +latter defines the API schema of that resource. +::: + +It's estimated that each custom resource definition consumes ~3 MB of memory. +The calculation is: + +```bash +number_of_managed_resources_defined_in_provider x 3 MB = memory_required +``` + +For example, if you plan to use [provider-aws-ec2][provider-aws-ec2], [provider-aws-s3][provider-aws-s3], and [provider-aws-iam][provider-aws-iam], the resulting calculation is: + +```bash +provider-aws-ec2: 98 x 3 MB = 294 MB +provider-aws-s3: 23 x 3 MB = 69 MB +provider-aws-iam 22 x 3 MB = 66 MB +--- +total memory: 429 MB +``` + +In this scenario, you should budget ~430 MB of memory for provider usage on this control plane. + +:::tip +Do this calculation for each provider you plan to install on your control plane. +Then do this calculation for each control plane you plan to run in your Space. +::: + + +#### Total memory usage + +Add the memory usage from the previous sections. Given the preceding examples, +they result in a recommendation to budget ~1 GB memory for each control plane +you plan to run in the Space. + +:::important + +The 1 GB recommendation is an example. +You should input your own provider requirements to arrive at a final number for +your own deployment. + +::: + +### CPU considerations + +#### Managed resource CPU usage + +The number of managed resources under management by a control plane is the largest contributing factor for CPU usage in a Space. CPU usage scales linearly according to the number of managed resources under management by your control plane. In Upbound's testing, CPU usage requirements _does_ vary from provider to provider. Using the Upbound Official Provider families as a baseline: + + +| Provider | MR create operation (CPU core seconds) | MR update or reconciliation operation (CPU core seconds) | +| ---- | ---- | ---- | +| provider-family-aws | 10 | 2 to 3 | +| provider-family-gcp | 7 | 1.5 | +| provider-family-azure | 7 to 10 | 1.5 to 3 | + + +When resources are in a non-ready state, Crossplane providers reconcile often (as fast as every 15 seconds). Once a resource reaches `READY`, each Crossplane provider defaults to a 10 minute poll interval. Given this, a 16-core machine has `16x10x60 = 9600` CPU core seconds available. Interpreting this table: + +- A single control plane that needs to create 100 AWS MRs concurrently would consume 1000 CPU core seconds, or about 1.5 cores. +- A single control plane that continuously reconciles 100 AWS MRs once they've reached a `READY` state would consume 300 CPU core seconds, or a little under half a core. + +Since `provider-family-aws` has the highest recorded numbers for CPU time required, you can use that as an upper limit in your calculations. + +Using these calculations and extrapolating values, given a 16 core machine, it's recommended you don't exceed a single control plane managing 1000 MRs. Suppose you plan to run 10 control planes, each managing 1000 MRs. You want to make sure your node pool has capacity for 160 cores. If you are using a machine type that has 16 cores per machine, that would mean having a node pool of size 10. If you are using a machine type that has 32 cores per machine, that would mean having a node pool of size 5. + +#### Cloud API latency + +Oftentimes, you are using Crossplane providers to talk to external cloud APIs. Those external cloud APIs often have global API rate limits (examples: [Azure limits][azure-limits], [AWS EC2 limits][aws-ec2-limits]). + +For Crossplane providers built on [Upjet][upjet] (such as Upbound Official Provider families), these providers use Terraform under the covers. They expose some knobs (such as `--max-reconcile-rate`) you can use to tweak reconciliation rates. + +### Resource buffers + +The guidance in the preceding sections explains how to calculate CPU and memory usage requirements for: + +- a set of control planes in a Space +- tuned to the number of providers you plan to use +- according to the number of managed resource instances you plan to have managed by your control planes + +Upbound recommends budgeting an extra buffer of 20% to your resource capacity calculations. The numbers shared in the preceding sections don't account for peaks or surges since they're based off average measurements. Upbound recommends budgeting this buffer to account for these things. + +## Deploying more than one Space + +You are welcome to deploy more than one Space. You just need to make sure you have a 1:1 mapping of Space to Kubernetes clusters. Spaces are by their nature constrained to a single Kubernetes Cluster, which are regional entities. If you want to offer control planes in multiple cloud environments or multiple public clouds entirely, these are justifications for deploying >1 Spaces. + +## Cert-manager + +A Spaces deployment uses the [Certificate Custom Resource] from cert-manager to +provision certificates within the Space. This establishes a nice API boundary +between what your platform may need and the Certificate requirements of a +Space. + + +In the event you would like more control over the issuing Certificate Authority +for your deployment or the deployment of cert-manager itself, this guide is for +you. + + +### Deploying + +An Upbound Space deployment doesn't have any special requirements for the +cert-manager deployment itself. The only expectation is that cert-manager and +the corresponding Custom Resources exist in the cluster. + +You should be free to install cert-manager in the cluster in any way that makes +sense for your organization. You can find some [installation ideas] in the +cert-manager docs. + +### Issuers + +A default Upbound Space install includes a [ClusterIssuer]. This `ClusterIssuer` +is a `selfSigned` issuer that other certificates are minted from. You have a +couple of options available to you for changing the default deployment of the +Issuer: +1. Changing the issuer name. +2. Providing your own ClusterIssuer. + + +#### Changing the issuer name + +The `ClusterIssuer` name is controlled by the `certificates.space.clusterIssuer` +Helm property. You can adjust this during installation by providing the +following parameter (assuming your new name is 'SpaceClusterIssuer'): +```shell +--set "certificates.space.clusterIssuer=SpaceClusterIssuer" +``` + + + +#### Providing your own ClusterIssuer + +To provide your own `ClusterIssuer`, you need to first setup your own +`ClusterIssuer` in the cluster. The cert-manager docs have a variety of options +for providing your own. See the [Issuer Configuration] docs for more details. + +Once you have your own `ClusterIssuer` set up in the cluster, you need to turn +off the deployment of the `ClusterIssuer` included in the Spaces deployment. +To do that, provide the following parameter during installation: +```shell +--set "certificates.provision=false" +``` + +###### Considerations +If your `ClusterIssuer` has a name that's different from the default name that +the Spaces installation expects ('spaces-selfsigned'), you need to also specify +your `ClusterIssuer` name during install using: +```shell +--set "certificates.space.clusterIssuer=" +``` + +## Ingress + +To route requests from an external client (kubectl, ArgoCD, etc) to a +control plane, a Spaces deployment includes a default [Ingress] manifest. In +order to ease getting started scenarios, the current `Ingress` includes +configurations (properties and annotations) that assume that you installed the +commonly used [ingress-nginx ingress controller] in the cluster. This section +walks you through using a different `Ingress`, if that's something that your +organization needs. + +### Default manifest + +An example of what the current `Ingress` manifest included in a Spaces install +is below: + +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: mxe-router-ingress + namespace: upbound-system + annotations: + nginx.ingress.kubernetes.io/use-regex: "true" + nginx.ingress.kubernetes.io/ssl-redirect: "false" + nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" + nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" + nginx.ingress.kubernetes.io/proxy-request-buffering: "off" + nginx.ingress.kubernetes.io/proxy-body-size: "0" + nginx.ingress.kubernetes.io/proxy-http-version: "1.1" + nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + nginx.ingress.kubernetes.io/proxy-ssl-verify: "on" + nginx.ingress.kubernetes.io/proxy-ssl-secret: "upbound-system/mxp-hostcluster-certs" + nginx.ingress.kubernetes.io/proxy-ssl-name: spaces-router + nginx.ingress.kubernetes.io/configuration-snippet: | + more_set_headers "X-Request-Id: $req_id"; + more_set_headers "Request-Id: $req_id"; + more_set_headers "Audit-Id: $req_id"; +spec: + ingressClassName: nginx + tls: + - hosts: + - {{ .Values.ingress.host }} + secretName: mxe-router-tls + rules: + - host: {{ .Values.ingress.host }} + http: + paths: + - path: "/v1/controlPlanes" + pathType: Prefix + backend: + service: + name: spaces-router + port: + name: http +``` + +The notable pieces are: +1. Namespace + + + +This property represents the namespace that the spaces-router is deployed to. +In most cases this is `upbound-system`. + + + +2. proxy-ssl-* annotations + +The spaces-router pod terminates TLS using certificates located in the +mxp-hostcluster-certs `Secret` located in the `upbound-system` `Namespace`. + +3. proxy-* annotations + +Requests coming into the ingress-controller can be variable depending on what +the client is requesting. For example, `kubectl get crds` has different +requirements for the connection compared to a 'watch', for example +`kubectl get pods -w`. The ingress-controller is configured to be able to +account for either scenario. + + +4. configuration-snippets + +These commands add headers to the incoming requests that help with telemetry +and diagnosing problems within the system. + +5. Rules + +Requests coming into the control planes use a `/v1/controlPlanes` prefix and +need to be routed to the spaces-router. + + +### Using a different ingress manifest + +Operators can choose to use an `Ingress` manifest and ingress controller that +makes the most sense for their organization. If they want to turn off deploying +the default `Ingress` manifest, they can do so during installation by providing +the following parameter during installation: +```shell +--set ".Values.ingress.provision=false" +``` + +#### Considerations + + + + + +Operators will need to take into account the following considerations when +disabling the default `Ingress` deployment. + +1. Ensure the custom `Ingress` manifest is placed in the same namespace as the +`spaces-router` pod. +2. Ensure that the ingress is configured to use a `spaces-router` as a secure +backend and that the secret used is the mxp-hostcluster-certs secret. +3. Ensure that the ingress is configured to handle long-lived connections. +4. Ensure that the routing rule sends requests prefixed with +`/v1/controlPlanes` to the `spaces-router` using the `http` port. + + + + + + +[cert-manager]: https://cert-manager.io/ +[Certificate Custom Resource]: https://cert-manager.io/docs/usage/certificate/ +[ClusterIssuer]: https://cert-manager.io/docs/concepts/issuer/ +[ingress-nginx ingress controller]: https://kubernetes.github.io/ingress-nginx/deploy/ +[installation ideas]: https://cert-manager.io/docs/installation/ +[Ingress]: https://kubernetes.io/docs/concepts/services-networking/ingress/ +[Issuer Configuration]: https://cert-manager.io/docs/configuration/ +[official-provider-families]: /manuals/packages/providers/provider-families +[aws-eks]: https://aws.amazon.com/eks/ +[google-cloud-gke]: https://cloud.google.com/kubernetes-engine +[microsoft-aks]: https://azure.microsoft.com/en-us/products/kubernetes-service +[upbound-account]: https://www.upbound.io/register/?utm_source=docs&utm_medium=cta&utm_campaign=docs_spaces +[provider-aws-ec2]: https://marketplace.upbound.io/providers/upbound/provider-aws-ec2 +[provider-aws-s3]: https://marketplace.upbound.io/providers/upbound/provider-aws-s3 +[provider-aws-iam]: https://marketplace.upbound.io/providers/upbound/provider-aws-iam +[azure-limits]: https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/request-limits-and-throttling +[aws-ec2-limits]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/throttling.html#throttling-limits-rate-based +[upjet]: https://github.com/upbound/upjet diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/dr.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/dr.md new file mode 100644 index 000000000..67ecbfecf --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/dr.md @@ -0,0 +1,412 @@ +--- +title: Disaster Recovery +sidebar_position: 13 +description: Configure Space-wide backups for disaster recovery. +--- + +:::info API Version Information +This guide applies to **Spaces v1.9.0 and later** (Self-Hosted only). Space Backups is a alpha feature enabled by default starting in v1.14.0. + +- **v1.9.0-v1.13.x**: Alpha feature (requires manual enablement) +- **v1.14.0+**: GA (enabled by default) + +For version-specific features and backup resources, see the . control-plane backups, see [Backup and Restore](../backup-and-restore.md). +::: + +:::important +For Connected and Disconnected Spaces, this feature requires Spaces `v1.9.0` and, starting with `v1.14.0`, Spaces enables it by default. + +To enable it on versions earlier than `v1.14.0`, set features.alpha.spaceBackup.enabled=true when you install Spaces. + +```bash +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "features.alpha.spaceBackup.enabled=true" +``` +::: + +Upbound's _Space Backups_ is a built-in Space-wide backup and restore feature. This guide explains how to configure Space Backups and how to restore from one of them in case of disaster recovery. + +This feature is meant for Space administrators. Group or Control Plane users can leverage [Shared Backups][shared-backups] to backup and restore their ControlPlanes. + +## Benefits +The Space Backups feature provides the following benefits: + +* Automatic backups for all resources in a Space and all resources in control planes, without any operational overhead. +* Backup schedules. +* Selectors to specify resources to backup. + +## Prerequisites + +Enabled the Space Backups feature in the Space: + +- Cloud Spaces: Not accessible to users. +- Connected Spaces: Space administrator must enable this feature. +- Disconnected Spaces: Space administrator must enable this feature. + +## Configure a Space Backup Config + +[SpaceBackupConfig][spacebackupconfig] is a cluster-scoped resource. This resource configures the storage details and provider. Whenever a backup executes (either by schedule or manually initiated), it references a SpaceBackupConfig to tell it where store the snapshot. + + +### Backup config provider + + +The `spec.objectStorage.provider` and `spec.objectStorage.config` fields configures: + +* The object storage provider +* The path to the provider +* The credentials needed to communicate with the provider + +You can only set one provider. Upbound currently supports AWS, Azure, and GCP as providers. + + +`spec.objectStorage.config` is a freeform map of configuration options for the object storage provider. See [Thanos object storage][thanos-object-storage] for more information on the formats for each supported cloud provider. `spec.bucket` and `spec.provider` overrides the required values in the config. + + +#### AWS as a storage provider + +This example demonstrates how to use AWS as a storage provider for your backups: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupConfig +metadata: + name: default +spec: + objectStorage: + provider: AWS + bucket: spaces-backup-bucket + config: + endpoint: s3.eu-west-2.amazonaws.com + region: eu-west-2 + credentials: + source: Secret + secretRef: + name: bucket-creds + namespace: upbound-system + key: creds +``` + +This example assumes you've already created an S3 bucket called +`spaces-backup-bucket` in the `eu-west-2` AWS region. To access the bucket, +define the account credentials as a Secret in the specified Namespace +(`upbound-system` in this example). + +#### Azure as a storage provider + +This example demonstrates how to use Azure as a storage provider for your backups: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupConfig +metadata: + name: default + namespace: default +spec: + objectStorage: + provider: Azure + bucket: upbound-backups + config: + storage_account: upbackupstore + container: upbound-backups + endpoint: blob.core.windows.net + credentials: + source: Secret + secretRef: + name: bucket-creds + namespace: upbound-system + key: creds +``` + + +This example assumes you've already created an Azure storage account called +`upbackupstore` and blob `upbound-backups`. To access the blob, +define the account credentials as a Secret in the specified Namespace +(`upbound-system` in this example). + + +#### GCP as a storage provider + +This example demonstrates how to use Google Cloud Storage as a storage provider for your backups: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupConfig +metadata: + name: default + namespace: default +spec: + objectStorage: + provider: GCP + bucket: spaces-backup-bucket + credentials: + source: Secret + secretRef: + name: bucket-creds + namespace: upbound-system + key: creds +``` + + +This example assumes you've already created a Cloud bucket called +"spaces-backup-bucket" and a service account with access to this bucket. Define the key file as a Secret in the specified Namespace +(`upbound-system` in this example). + + +## Configure a Space Backup Schedule + + +[SpaceBackupSchedule][spacebackupschedule] is a cluster-scoped resource. This resource defines a backup schedule for the whole Space. + +Below is an example of a Space Backup Schedule running every day. It backs up all groups having `environment: production` labels and all control planes in those groups having `backup: please` labels. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupSchedule +metadata: + name: daily-schedule +spec: + schedule: "@daily" + configRef: + kind: SpaceBackupConfig + name: default + match: + groups: + labelSelectors: + - matchLabels: + environment: production + controlPlanes: + labelSelectors: + - matchLabels: + backup: please +``` + +### Define a schedule + +The `spec.schedule` field is a [Cron-formatted][cron-formatted] string. Some common examples are below: + +| Entry | Description | +| ----------------- | ------------------------------------------------------------------------------------------------- | +| `@hourly` | Run once an hour. | +| `@daily` | Run once a day. | +| `@weekly` | Run once a week. | +| `0 0/4 * * *` | Run every 4 hours. | +| `0/15 * * * 1-5` | Run every fifteenth minute on Monday through Friday. | +| `@every 1h30m10s` | Run every 1 hour, 30 minutes, and 10 seconds. Hour is the largest measurement of time for @every. | + +### Suspend a schedule + +Use `spec.suspend` field to suspend the schedule. It creates no new backups, but allows running backups to complete. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupSchedule +metadata: + name: daily-schedule +spec: + suspend: true +... +``` + +### Garbage collect backups when the schedule gets deleted + +Set the `spec.useOwnerReferencesInBackup` to garbage collect associated `SpaceBackup` when a `SpaceBackupSchedule` gets deleted. If set to true, backups are garbage collected when the schedule gets deleted. + +### Set the time to live + +Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. + +The time to live is a duration, for example, `168h` for 7 days. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupSchedule +metadata: + name: daily-schedule +spec: + ttl: 168h # Backup is garbage collected after 7 days +... +``` + +## Selecting space resources to backup + +By default, a SpaceBackup selects all groups and, for each of them, all control planes, secrets, and any other group-scoped resources. + +By setting `spec.match`, you can include only specific groups, control planes, secrets, or other Space resources in the backup. + +By setting `spec.exclude`, you can filter out some matched Space API resources from the backup. + +### Including space resources in a backup + +Different fields are available to include resources based on labels or names: +- `spec.match.groups` to include only some groups in the backup. +- `spec.match.controlPlanes` to include only some control planes in the backup. +- `spec.match.secrets` to include only some secrets in the backup. +- `spec.match.extras` to include only some extra resources in the backup. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackup +metadata: + name: my-backup +spec: + configRef: + kind: SpaceBackupConfig + name: default + match: + groups: + labelSelectors: + - matchLabels: + environment: production + controlPlanes: + labelSelectors: + - matchLabels: + backup: please + secrets: + names: + - my-secret + extras: + - apiGroup: "spaces.upbound.io" + kind: "SharedBackupConfig" + names: + - my-shared-backup +``` + +### Excluding Space resources from the backup + +Use the `spec.exclude` field to exclude matched Space API resources from the backup. + +Different fields are available to exclude resources based on labels or names: +- `spec.exclude.groups` to exclude some groups from the backup. +- `spec.exclude.controlPlanes` to exclude some control planes from the backup. +- `spec.exclude.secrets` to exclude some secrets from the backup. +- `spec.exclude.extras` to exclude some extra resources from the backup. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackup +metadata: + name: my-backup +spec: + ttl: 168h # Backup is garbage collected after 7 days + configRef: + kind: SpaceBackupConfig + name: default + match: + groups: + labelSelectors: + - matchLabels: + environment: production + exclude: + groups: + names: + - not-this-one-please +``` + +### Exclude resources in control planes' backups + +By default, it backs up all resources in a selected control plane. + +Use the `spec.controlPlaneBackups.excludedResources` field to exclude resources from control planes' backups. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackup +metadata: + name: my-backup +spec: + ttl: 168h # Backup is garbage collected after 7 days + configRef: + kind: SpaceBackupConfig + name: default + controlPlaneBackups: + excludedResources: + - secrets + - buckets.s3.aws.upbound.io +``` + +## Create a manual backup + +[SpaceBackup][spacebackup] is a cluster-scoped resource that causes a single backup to occur for the whole Space. + +Below is an example of a manual SpaceBackup: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackup +metadata: + name: my-backup +spec: + configRef: + kind: SpaceBackupConfig + name: default + deletionPolicy: Delete +``` + + +The backup specification `DeletionPolicy` defines backup deletion actions, +including the deletion of the backup file from the bucket. The `Deletion Policy` +value defaults to `Orphan`. Set it to `Delete` to remove uploaded files +in the bucket. +For more information on the backup and restore process, review the [Spaces API documentation][spaces-api-documentation]. + +### Set the time to live + +Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackup +metadata: + name: my-backup +spec: + ttl: 168h # Backup is garbage collected after 7 days +... +``` + +## Restore from a space backup + +Space Backup and Restore focuses only on disaster recovery. The restore procedure assumes a new Space installation with no existing resources. The restore procedure is idempotent, so you can run it multiple times without any side effects in case of failures. + +To restore a Space from an existing Space Backup, follow these steps: + +1. Install Spaces from scratch as needed. +2. Create a `SpaceBackupConfig` as needed to access the SpaceBackup from the object storage, for example named `my-backup-config`. +3. Select the backup you want to restore from, for example `my-backup`. +4. Run the following command to restore the Space: + +```shell +export SPACE_BACKUP_CONFIG=my-backup-config +export SPACE_BACKUP=my-backup +kubectl exec -ti -n upbound-system deployments/spaces-controller -c spaces -- hyperspace restore $SPACE_BACKUP $SPACE_BACKUP_CONFIG +``` + +### Restore specific control planes + +:::important +This feature is available from Spaces v1.11. +::: + +Instead of restoring the whole Space, you can choose to restore specific control planes +from a backup using the `--controlplanes` flag. You can also use +the `--skip-space-restore` flag to skip restoring Space objects. +This allows Spaces admins to restore individual control planes without +needing to restore the entire Space. + +```shell +export SPACE_BACKUP_CONFIG=my-backup-config +export SPACE_BACKUP=my-backup +kubectl exec -ti -n upbound-system deployments/spaces-controller -c spaces +-- hyperspace restore $SPACE_BACKUP $SPACE_BACKUP_CONFIG --controlplanes default/ctp1,default/ctp2 --skip-space-restore +``` + + +[shared-backups]: /spaces/howtos/self-hosted/workload-id/backup-restore-config/ +[spacebackupconfig]: /reference/apis/spaces-api/v1_9 +[thanos-object-storage]: https://thanos.io/tip/thanos/storage.md/ +[spacebackupschedule]: /reference/apis/spaces-api/v1_9 +[cron-formatted]: https://en.wikipedia.org/wiki/Cron +[spacebackup]: /reference/apis/spaces-api/v1_9 +[spaces-api-documentation]: /reference/apis/spaces-api/v1_9 + diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/gitops-with-argocd.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/gitops-with-argocd.md new file mode 100644 index 000000000..004247a10 --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/gitops-with-argocd.md @@ -0,0 +1,142 @@ +--- +title: GitOps with ArgoCD in Self-Hosted Spaces +sidebar_position: 80 +description: Set up GitOps workflows with Argo CD in self-hosted Spaces +plan: "business" +--- + +:::info Deployment Model +This guide applies to **self-hosted Spaces** deployments. For Upbound Cloud Spaces, see [GitOps with Upbound Control Planes](/spaces/howtos/cloud-spaces/gitops-on-upbound/). +::: + +GitOps is an approach for managing a system by declaratively describing desired resources' configurations in Git and using controllers to realize the desired state. Upbound's control planes are compatible with this pattern and it's strongly recommended you integrate GitOps in the platforms you build on Upbound. + + +## Integrate with Argo CD + + +[Argo CD][argo-cd] is a project in the Kubernetes ecosystem commonly used for +GitOps. You can use it in tandem with Upbound control planes to achieve GitOps +flows. The sections below explain how to integrate these tools with Upbound. + +### Configure connection secrets for control planes + +You can configure control planes to write their connection details to a secret. +Do this by setting the +[`spec.writeConnectionSecretToRef`][spec-writeconnectionsecrettoref] field in a +control plane manifest. For example: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: ctp1 + namespace: default +spec: + writeConnectionSecretToRef: + name: kubeconfig-ctp1 + namespace: default +``` + + +### Configure Argo CD + + +To configure Argo CD for Annotation resource tracking, edit the Argo CD +ConfigMap in the Argo CD namespace. Add `application.resourceTrackingMethod: +annotation` to the data section as below. + +Next, configure the [auto respect RBAC for the Argo CD +controller][auto-respect-rbac-for-the-argo-cd-controller-1]. By default, Argo CD +attempts to discover some Kubernetes resource types that don't exist in a +control plane. You must configure Argo CD to respect the cluster's RBAC rules so +that Argo CD can sync. Add `resource.respectRBAC: normal` to the data section as +below. + +```bash +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-cm +data: + ... + application.resourceTrackingMethod: annotation + resource.respectRBAC: normal +``` + +:::tip +The `resource.respectRBAC` configuration above tells Argo to respect RBAC for +_all_ cluster contexts. If you're using an Argo CD instance to manage more than +only control planes, you should consider changing the `clusters` string match +for the configuration to apply only to control planes. For example, if every +control plane context name followed the convention of being named +`controlplane-`, you could set the string match to be `controlplane-*` +::: + + +### Create a cluster context definition + + +Once the control plane is ready, extract the following values from the secret +containing the kubeconfig: + +```bash +kubeconfig_content=$(kubectl get secrets kubeconfig-ctp1 -n default -o jsonpath='{.data.kubeconfig}' | base64 -d) +server=$(echo "$kubeconfig_content" | grep 'server:' | awk '{print $2}') +bearer_token=$(echo "$kubeconfig_content" | grep 'token:' | awk '{print $2}') +ca_data=$(echo "$kubeconfig_content" | grep 'certificate-authority-data:' | awk '{print $2}') +``` + +Generate a new secret in the cluster where you installed Argo, using the prior +values extracted: + +```yaml +cat < + +import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; + + + + +:::important +This feature is only available for select Business Critical customers. You can't +set up your own Managed Space without the assistance of Upbound. If you're +interested in this deployment mode, please [contact us][contact]. +::: + + + +A Managed Space deployed on AWS is a single-tenant deployment of a control plane +space in your AWS organization in an isolated sub-account. With Managed Spaces, +you can use the same API, CLI, and Console that Upbound offers, with the benefit +of running entirely in a cloud account that you own and Upbound manages for you. + +The following guide walks you through setting up a Managed Space in your AWS +organization. If you have any questions while working through this guide, +contact your Upbound Account Representative for help. + + + + + +A Managed Space deployed on GCP is a single-tenant deployment of a control plane +space in your GCP organization in an isolated project. With Managed Spaces, you +can use the same API, CLI, and Console that Upbound offers, with the benefit of +running entirely in a cloud account that you own and Upbound manages for you. + +The following guide walks you through setting up a Managed Space in your GCP +organization. If you have any questions while working through this guide, +contact your Upbound Account Representative for help. + + + + +## Managed Space on your cloud architecture + + + +A Managed Space is a deployment of the Upbound Spaces software inside an +Upbound-controlled sub-account in your AWS cloud environment. The Spaces +software runs in this sub-account, orchestrated by Kubernetes. Backups and +billing data get stored inside bucket or blob storage in the same sub-account. +The control planes deployed and controlled by the Spaces software runs on the +Kubernetes cluster which gets deployed into the sub-account. + +The diagram below illustrates the high-level architecture of Upbound Managed Spaces: + +![Upbound Managed Spaces arch](/img/managed-arch-aws.png) + +The Spaces software gets deployed on an EKS Cluster in the region of your +choice. This EKS cluster is where your control planes are ultimately run. +Upbound also deploys buckets, 1 for the collection of the billing data and 1 for +control plane backups. + +Upbound doesn't have access to other sub-accounts nor your organization-level +settings in your cloud environment. Outside of your cloud organization, Upbound +runs the Upbound Console, which includes the Upbound API and web application, +including the dashboard you see at `console.upbound.io`. By default, all +connections are encrypted, but public. Optionally, you also have the option to +use private network connectivity through [AWS PrivateLink][aws-privatelink]. + + + + + + +A Managed Space is a deployment of the Upbound Spaces software inside an +Upbound-controlled project in your GCP cloud environment. The Spaces software +runs in this project, orchestrated by Kubernetes. Backups and billing data get +stored inside bucket or blob storage in the same project. The control planes +deployed and controlled by the Spaces software runs on the Kubernetes cluster +which gets deployed into the project. + +The diagram below illustrates the high-level architecture of Upbound Managed Spaces: + +![Upbound Managed Spaces arch](/img/managed-arch-gcp.png) + +The Spaces software gets deployed on a GKE Cluster in the region of your choice. +This GKE cluster is where your control planes are ultimately run. Upbound also +deploys cloud buckets, 1 for the collection of the billing data and 1 for +control plane backups. + +Upbound doesn't have access to other projects nor your organization-level +settings in your cloud environment. Outside of your cloud organization, Upbound +runs the Upbound Console, which includes the Upbound API and web application, +including the dashboard you see at `console.upbound.io`. By default, all +connections are encrypted, but public. Optionally, you also have the option to +use private network connectivity through [GCP Private Service +Connect][gcp-private-service-connect]. + + + +## Prerequisites + +- An organization created on Upbound + + + +- You should have a preexisting AWS organization to complete this guide. +- You must create a new AWS sub-account. Read the [AWS documentation][aws-documentation] to learn how to create a new sub-account in an existing organization on AWS. + +After the sub-account information gets provided to Upbound, **don't change it +any further.** Any changes made to the sub-account or the resources created by +Upbound for the purposes of the Managed Space deployments voids the SLA you have +with Upbound. If you want to make configuration changes, contact your Upbound +Solutions Architect. + + + + + +- You should have a preexisting GCP organization with an active Cloud Billing account to complete this guide. +- You must create a new GCP project. Read the [GCP documentation][gcp-documentation] to learn how to create a new project in an existing organization on GCP. + +After the project information gets provided to Upbound, **don't change it any +further.** Any changes made to the project or the resources created by Upbound +for the purposes of the Managed Space deployments voids the SLA you have with +Upbound. If you want to make configuration changes, contact your Upbound +Solutions Architect. + + + + + +## Set up cross-account management + +Upbound supports using AWS Key Management Service with cross-account IAM +permissions. This enables the isolation of keys so the infrastructure operated +by Upbound has limited access to symmetric keys. + +In the KMS key's account, apply the baseline key policy: + +```json +{ + "Sid": "Allow Upbound to use this key", + "Effect": "Allow", + "Principal": { + "AWS": ["[Managed Space sub-account ID]"] + }, + "Action": ["kms:Encrypt", "kms:Decrypt", "kms:ReEncrypt*", "kms:GenerateDataKey*", "kms:DescribeKey"], + "Resource": "*" +} +``` + +You need another key policy to let the sub-account create persistent resources +with the KMS key: + +```json +{ + "Sid": "Allow attachment of persistent resources for an Upbound Managed Space", + "Effect": "Allow", + "Principal": { + "AWS": "[Managed Space sub-account ID]" + }, + "Action": ["kms:CreateGrant", "kms:ListGrants", "kms:RevokeGrant"], + "Resource": "*", + "Condition": { + "Bool": { + "kms:GrantIsForAWSResource": "true" + } + } +} +``` + +### Configure PrivateLink + +By default, all connections to the Upbound Console are encrypted, but public. +AWS PrivateLink is a feature that allows VPC peering whereby your traffic +doesn't traverse the public internet. To have this configured, contact your +Upbound Account Representative. + + + + + +## Enable APIs + +Enable the following APIs in the new project: + +- Kubernetes Engine API +- Cloud Resource Manager API +- Compute Engine API +- Cloud DNS API + +:::tip +Read how to enable APIs in a GCP project [here][here]. +::: + +## Create a service account + +Create a service account in the new project. Name the service account, +upbound-sa. Give the service account the following roles: + +- Compute Admin +- Project IAM Admin +- Service Account Admin +- DNS Administrator +- Editor + +Select the service account you just created. Select keys. Add a new key and +select JSON. The key gets downloaded to your machine. Save this for later. + +## Create a DNS Zone + +Create a DNS Zone, set the **Zone type** to `Public`. + +### Configure Private Service Connect + +By default, all connections to the Upbound Console are encrypted, but public. +GCP Private Service Connect is a feature that allows VPC peering whereby your +traffic doesn't traverse the public internet. To have this configured, contact +your Upbound Account Representative. + + + +## Provide information to Upbound + +Once these policies get attached to the key, tell your Upbound Account +Representative, providing them the following: + + + +- the full ARN of the KMS key. +- the name of the organization that you created in Upbound. Use the up CLI command, `up org list`, so see this information. +- Confirmation of which region in AWS you want the deployment to target. + + + + + +- The service account JSON key +- The NS records associated with the DNS name created in the last step. +- the name of the organization that you created in Upbound. Use the up CLI command, `up org list`, so see this information. +- Confirmation of which region in GCP you want the deployment to target. + + + +Once Upbound has this information, the request gets processed in a business day. + +## Use your Managed Space + +Once the Managed Space gets deployed, you can see it in the Space selector when browsing your environment on [`console.upbound.io`][console-upbound-io]. + + + + +[contact]: https://www.upbound.io/contact-us +[aws-privatelink]: #configure-privatelink +[aws-documentation]: https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_create.html#orgs_manage_accounts_create-new +[gcp-private-service-connect]: #configure-private-service-connect +[gcp-documentation]: https://cloud.google.com/resource-manager/docs/creating-managing-organization +[here]: https://cloud.google.com/apis/docs/getting-started#enabling_apis +[console-upbound-io]: https://console.upbound.io/ diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/oidc-configuration.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/oidc-configuration.md new file mode 100644 index 000000000..cbef4dc42 --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/oidc-configuration.md @@ -0,0 +1,289 @@ +--- +title: Configure OIDC +sidebar_position: 20 +description: Configure OIDC in your Space +--- +:::important +This guide is only applicable for administrators who've deployed self-hosted Spaces. general RBAC in Upbound, read [Upbound RBAC][upbound-rbac]. +::: + +Upbound uses the Kubernetes [Structured Authentication Configuration][structured-auth-config] to validate OIDC tokens sent to the API. Upbound stores this +configuration as a `ConfigMap` and authenticates with the Upbound router +component during installation with Helm. + +This guide walks you through how to create and apply an authentication +configuration to validate Upbound with an external identity provider. Each +section focuses on a specific part of the configuration file. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. + +For details on authentication and access control across versions, see the . related platform authentication features, see the [Platform manual](../../../../platform/). +::: + +## Creating the `AuthenticationConfiguration` file + +First, create a file called `config.yaml` with an `AuthenticationConfiguration` +kind. The `AuthenticationConfiguration` is the initial authentication structure +necessary for Upbound to communicate with your chosen identity provider. + +```yaml +apiVersion: apiserver.config.k8s.io/v1beta1 +kind: AuthenticationConfiguration +jwt: +- issuer: + url: oidc-issuer-url + audiences: + - oidc-client-id + claimMappings: # optional + username: + claim: oidc-username-claim + prefix: oidc-username-prefix + groups: + claim: oidc-groups-claim + prefix: oidc-groups-prefix +``` + + +For detailed configuration options, including the CEL-based token validation, +review the feature [documentation][structured-auth-config]. + + +The `AuthenticationConfiguration` allows you to configure multiple JWT +authenticators as separate issuers. + +### Configure an issuer + +The `jwt` array requires an `issuer` specification and typically contains: + +- A `username` claim mapping +- A `groups` claim mapping +Optionally, the configuration may also include: +- A set of claim validation rules +- A set of user validation rules + +The `issuer` URL must be unique across all configured authenticators. + +```yaml +issuer: + url: https://example.com + discoveryUrl: https://discovery.example.com/.well-known/openid-configuration + certificateAuthority: |- + + audiences: + - client-id-a + - client-id-b + audienceMatchPolicy: MatchAny +``` + +By default, the authenticator assumes the OIDC Discovery URL is +`{issuer.url}/.well-known/openid-configuration`. Most identity providers follow +this structure, and you can omit the `discoveryUrl` field. To use a separate +discovery service, specify the full path to the discovery endpoint in this +field. + +If the CA for the Issuer isn't public, provide the PEM encoded CA for the Discovery URL. + +At least one of the `audiences` entries must match the `aud` claim in the JWT. +For OIDC tokens, this is the Client ID of the application attempting to access +the Upbound API. Having multiple values set allows the same configuration to +apply to multiple client applications, for example the `kubectl` CLI and an +Internal Developer Portal. + +If you specify multiple `audiences` , `audienceMatchPolicy` must equal `MatchAny`. + +### Configure `claimMappings` + +#### Username claim mapping + +By default, the authenticator uses the `sub` claim as the user name. To override this, either: + +- specify *both* `claim` and `prefix`. `prefix` may be explicitly set to the empty string. +or + +- specify a CEL `expression` to calculate the user name. + +```yaml +claimMappings: + username: + claim: "sub" + prefix: "keycloak" + # + expression: 'claims.username + ":external-user"' +``` + + +#### Groups claim mapping + +By default, this configuration doesn't map groups, unless you either: + +- specify both `claim` and `prefix`. `prefix` may be explicitly set to the empty string. +or + +- specify a CEL `expression` that returns a string or list of strings. + + +```yaml +claimMappings: + groups: + claim: "groups" + prefix: "" + # + expression: 'claims.roles.split(",")' +``` + + +### Validation rules + + +Validation rules are outside the scope of this document. Review the +[documentation][structured-auth-config] for more information. Examples include +using CEL expressions to validate authentication such as: + + +- Validating that a token claim has a specific value +- Validating that a token has a limited lifetime +- Ensuring usernames and groups don't contain reserved prefixes + +## Required claims + +To interact with Space and ControlPlane APIs, users must have the `upbound.io/aud` claim set to one of the following: + +| Upbound.io Audience | Notes | +| -------------------------------------------------------- | -------------------------------------------------------------------- | +| `[]` | No Access to Space-level or ControlPlane APIs | +| `['upbound:spaces:api']` | This Identity is only for Space-level APIs | +| `['upbound:spaces:controlplanes']` | This Identity is only for ControlPlane APIs | +| `['upbound:spaces:api', 'upbound:spaces:controlplanes']` | This Identity is for both Space-level and ControlPlane APIs | + + +You can set this claim in two ways: + +- In the identity provider mapped in the ID token. +- Inject in the authenticator with the `jwt.claimMappings.extra` array. + +For example: +```yaml +apiVersion: apiserver.config.k8s.io/v1beta1 +kind: AuthenticationConfiguration +jwt: +- issuer: + url: https://keycloak:8443/realms/master + certificateAuthority: |- + + audiences: + - master-realm + audienceMatchPolicy: MatchAny + claimMappings: + username: + claim: "preferred_username" + prefix: "keycloak:" + groups: + claim: "groups" + prefix: "" + extra: + - key: 'upbound.io/aud' + valueExpression: "['upbound:spaces:controlplanes', 'upbound:spaces:api']" +``` + +## Install the `AuthenticationConfiguration` + +Once you create an `AuthenticationConfiguration` file, specify this file as a +`ConfigMap` in the host cluster for the Upbound Space. + +```sh +kubectl create configmap -n upbound-system --from-file=config.yaml=./path/to/config.yaml +``` + + +To enable OIDC authentication and disable Upbound IAM when installing the Space, +reference the configuration and pass an empty value to the Upbound IAM issuer +parameter: + + +```sh +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "authentication.structuredConfig=" \ + --set "router.controlPlane.extraArgs[0]=--upbound-iam-issuer-url=" +``` + +## Configure RBAC + + +In this scenario, the external identity provider handles authentication, but +permissions for Spaces and ControlPlane APIs use standard RBAC objects. + +### Spaces APIs + +The Spaces APIs include: +```yaml +- apiGroups: + - spaces.upbound.io + resources: + - controlplanes + - sharedexternalsecrets + - sharedsecretstores + - backups + - backupschedules + - sharedbackups + - sharedbackupconfigs + - sharedbackupschedules +- apiGroups: + - observability.spaces.upbound.io + resources: + - sharedtelemetryconfigs +``` + +### ControlPlane APIs + + + +Crossplane specifies three [roles][crossplane-managed-clusterroles] for a +ControlPlane: admin, editor, and viewer. These map to the verbs `admin`, `edit`, +and `view` on the `controlplanes/k8s` resource in the `spaces.upbound.io` API +group. + + +### Control access + +The `groups` claim in the `AuthenticationConfiguration` allows you to control +resource access when you create a `ClusterRoleBinding`. A `ClusterRole` defines +the role parameters and a `ClusterRoleBinding` subject. + +The example below allows `admin` permissions for all ControlPlanes to members of +the `ctp-admins` group: + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: allow-ctp-admin +rules: +- apiGroups: + - spaces.upbound.io + resources: + - controlplanes/k8s + verbs: + - admin +``` + +ctp-admins ClusterRoleBinding +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: allow-ctp-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: allow-ctp-admin +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: Group + name: ctp-admins +``` + +[structured-auth-config]: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#using-authentication-configuration +[crossplane-managed-clusterroles]: https://github.com/crossplane/crossplane/blob/master/design/design-doc-rbac-manager.md#managed-rbac-clusterroles +[upbound-rbac]: /manuals/platform/concepts/authorization/upbound-rbac diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/proxies-config.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/proxies-config.md new file mode 100644 index 000000000..3802e4cb0 --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/proxies-config.md @@ -0,0 +1,31 @@ +--- +title: Proxied configuration +sidebar_position: 20 +description: Configure Upbound within a proxied environment +--- + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments in proxied environments. Proxy configuration procedures are consistent across versions. + +For version-specific deployment considerations, see the . +::: + + + +When you install Upbound with Helm in a proxied environment, please update the specified registry with your internal registry. + + + +```bash +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "authentication.hubIdentities=true" \ + --set "authorization.hubRBAC=true" \ + --set "registry=registry.company.corp/spaces" \ + --set "controlPlanes.uxp.registryOverride=registry.company.corp/xpkg.upbound.io" \ + --set "controlPlanes.uxp.repository=registry.company.corp/spaces" \ + --wait +``` diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/query-api.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/query-api.md new file mode 100644 index 000000000..c112e9001 --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/query-api.md @@ -0,0 +1,396 @@ +--- +title: Deploy Query API infrastructure +weight: 130 +description: Query API +aliases: + - /all-spaces/self-hosted-spaces/query-api + - /self-hosted-spaces/query-api + - all-spaces/self-hosted-spaces/query-api +--- + + + + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). Query API has evolved across versions: + +- **Cloud Spaces**: Available since v1.6 (enabled by default) +- **Self-Hosted**: Available since v1.8 (requires manual enablement) + +For details on Query API availability across versions, see the . +::: + +:::important + +This feature is in preview. The Query API is available in the Cloud Space offering in `v1.6` and enabled by default. + +This is a requirement to be able to connect a Space since `v1.8.0`, and is off by default, see below to enable it. + +::: + +Upbound's Query API allows users to inspect objects and resources within their control planes. The read-only `up alpha query` and `up alpha get` CLI commands allow you to gather information on your control planes in a fast and efficient package. These commands follow the [`kubectl` conventions][kubectl-conventions] for filtering, sorting, and retrieving information from your Space. + +Query API requires a PostgreSQL database to store the data. You can use the default PostgreSQL instance provided by Upbound or bring your own PostgreSQL instance. + +## Managed setup + +:::tip +If you don't have specific requirements for your setup, Upbound recommends following this approach. +::: + +To enable this feature, set `features.alpha.apollo.enabled=true` and `apollo.apollo.storage.postgres.create=true` when installing Spaces. + +However, you need to install CloudNativePG (`CNPG`) to provide the PostgreSQL instance. You can let the `up` CLI do this for you, or install it manually. + +For more customization, see the [Helm chart reference][helm-chart-reference]. You can modify the number +of PostgreSQL instances, pooling instances, storage size, and more. + +If you have specific requirements not addressed in the Helm chart, see below for more information on how to bring your own [PostgreSQL setup][postgresql-setup]. + +### Using the up CLI + +Before you begin, make sure you have the most recent version of the [`up` CLI installed][up-cli-installed]. + +To enable this feature, set `features.alpha.apollo.enabled=true` and `apollo.apollo.storage.postgres.create=true` when installing Spaces: + +```bash +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "features.alpha.apollo.enabled=true" \ + --set "apollo.apollo.storage.postgres.create=true" +``` + +`up space init` and `up space upgrade` install CloudNativePG automatically, if needed. + +### Helm chart + +If you are installing the Helm chart in some other way, you can manually install CloudNativePG in one of the [supported ways][supported-ways], for example: + +```shell +kubectl apply --server-side -f \ + https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.24.1/cnpg-1.24.1.yaml +kubectl rollout status -n cnpg-system deployment cnpg-controller-manager -w --timeout 120s +``` + +Next, install the Spaces Helm chart with the necessary values, for example: + +```shell +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + ... + --set "features.alpha.apollo.enabled=true" \ + --set "apollo.apollo.storage.postgres.create=true" \ + --wait +``` + +## Self-hosted PostgreSQL configuration + + +If your workflow requires more customization, you can provide your own +PostgreSQL instance and configure credentials manually. + +Using your own PostgreSQL instance requires careful architecture consideration. +Review the architecture and requirements guidelines. + +### Architecture + +The Query API architecture uses three components, other than a PostgreSQL database: +* **Apollo Syncers**: Watching `ETCD` for changes and syncing them to PostgreSQL. One, or more, per control plane. +* **Apollo Server**: Serving the Query API out of the data in PostgreSQL. One, or more, per Space. + +The default setup also uses the `PgBouncer` connection pooler to manage connections from the syncers. +```mermaid +graph LR + User[User] + + subgraph Cluster["Cluster (Spaces)"] + direction TB + Apollo[apollo] + + subgraph ControlPlanes["Control Planes"] + APIServer[API Server] + Syncer[apollo-syncer] + end + end + + PostgreSQL[(PostgreSQL)] + + User -->|requests| Apollo + + Apollo -->|connects| PostgreSQL + Apollo -->|creates schemas & users| PostgreSQL + + Syncer -->|watches| APIServer + Syncer -->|writes| PostgreSQL + + PostgreSQL -->|data| Apollo + + style PostgreSQL fill:#e1f5ff,stroke:#333,stroke-width:2px,color:#000 + style Apollo fill:#ffe1e1,stroke:#333,stroke-width:2px,color:#000 + style Cluster fill:#f0f0f0,stroke:#333,stroke-width:2px,color:#000 + style ControlPlanes fill:#fff,stroke:#666,stroke-width:1px,stroke-dasharray: 5 5,color:#000 +``` + + +Each component needs to connect to the PostgreSQL database. + +In the event of database issues, you can provide a new database and the syncers +automatically repopulate the data. + +### Requirements + +* A PostgreSQL 16 instance or cluster. +* A database, for example named `upbound`. +* **Optional**: A dedicated user for the Apollo Syncers, otherwise the Spaces Controller generates a dedicated set of credentials per syncer with the necessary permissions, for example named `syncer`. +* A dedicated **superuser or admin account** for the Apollo Server. +* **Optional**: A connection pooler, like PgBouncer, to manage connections from the Apollo Syncers. If you didn't provide the optional users, you might have to configure the pooler to allow users to connect using the same credentials as PostgreSQL. +* **Optional**: A read replica for the Apollo Syncers to connect to, to reduce load on the primary database, this might cause a slight delay in the data being available through the Query API. + +Below you can find examples of setups to get you started, you can mix and match the examples to suit your needs. + +### In-cluster setup + +:::tip + +If you don't have strong opinions on your setup, but still want full control on +the resources created for some unsupported customizations, Upbound recommends +the in-cluster setup. + +::: + +For more customization than the managed setup, you can use CloudNativePG for +PostgreSQL in the same cluster. + +For in-cluster setup, manually deploy the operator in one of the [supported ways][supported-ways-1], for example: + +```shell +kubectl apply --server-side -f \ + https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.24.1/cnpg-1.24.1.yaml +kubectl rollout status -n cnpg-system deployment cnpg-controller-manager -w --timeout 120s +``` + +Then create a `Cluster` and `Pooler` in the `upbound-system` namespace, for example: + +```shell +kubectl create ns upbound-system + +kubectl apply -f - < + +### External setup + + +:::tip + +If you want to run your PostgreSQL instance outside the cluster, but are fine with credentials being managed by the `apollo` user, this is the suggested way to proceed. + +::: + +When using this setup, you must manually create the required Secrets in the +`upbound-system` namespace. The `apollo` user must have permissions to create +schemas and users. + +```shell + +kubectl create ns upbound-system + +# A Secret containing the necessary credentials to connect to the PostgreSQL instance +kubectl create secret generic spaces-apollo-pg-app -n upbound-system \ + --from-literal=password=supersecret + +# A Secret containing the necessary CA certificate to verify the connection to the PostgreSQL instance +kubectl create secret generic spaces-apollo-pg-ca -n upbound-system \ + --from-file=ca.crt=/path/to/ca.crt +``` + +Next, install Spaces with the necessary settings: + +```shell +export PG_URL=your-postgres-host:5432 +export PG_POOLED_URL=your-pgbouncer-host:5432 # this could be the same as above + +helm upgrade --install ... \ + --set "features.alpha.apollo.enabled=true" \ + --set "apollo.apollo.storage.postgres.create=false" \ + --set "apollo.apollo.storage.postgres.connection.url=$PG_URL" \ + --set "apollo.apollo.storage.postgres.connection.credentials.secret.name=spaces-apollo-pg-app" \ + --set "apollo.apollo.storage.postgres.connection.credentials.format=basicauth" \ + --set "apollo.apollo.storage.postgres.connection.ca.name=spaces-apollo-pg-ca" \ + --set "apollo.apollo.storage.postgres.connection.syncer.url=$PG_POOLED_URL" +``` + +### External setup with all custom credentials + +For custom credentials with Apollo Syncers or Server, create a new secret in the +`upbound-system` namespace: + +```shell +export APOLLO_SYNCER_USER=syncer +export APOLLO_SERVER_USER=apollo + +kubectl create ns upbound-system + +# A Secret containing the necessary credentials to connect to the PostgreSQL instance +kubectl create secret generic spaces-apollo-pg-app -n upbound-system \ + --from-literal=password=supersecret + +# A Secret containing the necessary CA certificate to verify the connection to the PostgreSQL instance +kubectl create secret generic spaces-apollo-pg-ca -n upbound-system \ + --from-file=ca.crt=/path/to/ca.crt + +# A Secret containing the necessary credentials for the Apollo Syncers to connect to the PostgreSQL instance. +# These will be used by all Syncers in the Space. +kubectl create secret generic spaces-apollo-pg-syncer -n upbound-system \ + --from-literal=username=$APOLLO_SYNCER_USER \ + --from-literal=password=supersecret + +# A Secret containing the necessary credentials for the Apollo Server to connect to the PostgreSQL instance. +kubectl create secret generic spaces-apollo-pg-apollo -n upbound-system \ + --from-literal=username=$APOLLO_SERVER_USER \ + --from-literal=password=supersecret +``` + +Next, install Spaces with the necessary settings: + +```shell +export PG_URL=your-postgres-host:5432 +export PG_POOLED_URL=your-pgbouncer-host:5432 # this could be the same as above + +helm ... \ + --set "features.alpha.apollo.enabled=true" \ + --set "apollo.apollo.storage.postgres.create=false" \ + --set "apollo.apollo.storage.postgres.connection.url=$PG_URL" \ + --set "apollo.apollo.storage.postgres.connection.credentials.secret.name=spaces-apollo-pg-app" \ + --set "apollo.apollo.storage.postgres.connection.credentials.format=basicauth" \ + --set "apollo.apollo.storage.postgres.connection.ca.name=spaces-apollo-pg-ca" \ + --set "apollo.apollo.storage.postgres.connection.syncer.url=$PG_POOLED_URL" \ + + #. the syncers + --set "apollo.apollo.storage.postgres.connection.syncer.credentials.format=basicauth" \ + --set "apollo.apollo.storage.postgres.connection.syncer.credentials.user=$APOLLO_SYNCER_USER" \ + --set "apollo.apollo.storage.postgres.connection.syncer.credentials.secret.name=spaces-apollo-pg-syncer" \ + + #. the server + --set "apollo.apollo.storage.postgres.connection.apollo.credentials.format=basicauth" \ + --set "apollo.apollo.storage.postgres.connection.apollo.credentials.user=$APOLLO_SERVER_USER" \ + --set "apollo.apollo.storage.postgres.connection.apollo.credentials.secret.name=spaces-apollo-pg-apollo" \ + --set "apollo.apollo.storage.postgres.connection.apollo.url=$PG_POOLED_URL" +``` + + +## Using the Query API + + +See the [Query API documentation][query-api-documentation] for more information on how to use the Query API. + + + + +[postgresql-setup]: #self-hosted-postgresql-configuration +[up-cli-installed]: /manuals/cli/overview +[query-api-documentation]: /spaces/howtos/query-api + +[helm-chart-reference]: /reference/helm-reference +[kubectl-conventions]: https://kubernetes.io/docs/reference/kubectl/generated/kubectl_get/ +[supported-ways]: https://cloudnative-pg.io/documentation/current/installation_upgrade/ +[supported-ways-1]: https://cloudnative-pg.io/documentation/current/installation_upgrade/ +[cloudnativepg-documentation]: https://cloudnative-pg.io/documentation/1.24/storage/#configuration-via-a-pvc-template +[postgresql-cluster]: https://cloudnative-pg.io/documentation/1.24/resource_management/ +[pooler]: https://cloudnative-pg.io/documentation/1.24/connection_pooling/#pod-templates +[postgresql-cluster-2]: https://cloudnative-pg.io/documentation/1.24/replication/ +[pooler-3]: https://cloudnative-pg.io/documentation/1.24/connection_pooling/#high-availability-ha +[postgresql-cluster-4]: https://cloudnative-pg.io/documentation/1.24/operator_capability_levels/#override-of-operand-images-through-the-crd +[pooler-5]: https://cloudnative-pg.io/documentation/1.24/connection_pooling/#pod-templates +[cloudnativepg-documentation-6]: https://cloudnative-pg.io/documentation/1.24/postgresql_conf/ diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/scaling-resources.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/scaling-resources.md new file mode 100644 index 000000000..7bb04d2c2 --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/scaling-resources.md @@ -0,0 +1,184 @@ +--- +title: Scaling vCluster and etcd Resources +weight: 950 +description: A guide for scaling vCluster and etcd resources in self-hosted Spaces +aliases: + - /all-spaces/self-hosted-spaces/scaling-resources + - /spaces/scaling-resources +--- + +In large workloads or control plane migration, you may performance impacting +resource constraints. This guide explains how to scale vCluster and `etcd` +resources for optimal performance in your self-hosted Space. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Scaling procedures are consistent across versions. + +For version-specific resource requirements and capacity planning, see the . +::: + +## Signs of resource constraints + +You may need to scale your vCluster or `etcd` resources if you observe: + +- API server timeout errors such as `http: Handler timeout` +- Error messages about `too many requests` and requests to `try again later` +- Operations like provider installation failing with errors like `cannot apply provider package secret` +- vCluster pods experiencing continuous restarts +- API performance degrades with high resource volume + + +## Scaling vCluster resources + + +The vCluster component handles Kubernetes API requests for your control planes. +Deployments with multiple control planes or providers may exceed default resource allocations. + +```yaml +# Default settings +controlPlanes.vcluster.resources.limits.cpu: "3000m" +controlPlanes.vcluster.resources.limits.memory: "3960Mi" +controlPlanes.vcluster.resources.requests.cpu: "170m" +controlPlanes.vcluster.resources.requests.memory: "1320Mi" +``` + +For larger workloads, like migrating from an existing control plane with several +providers, increase these resource limits in your Spaces `values.yaml` file. + +```yaml +controlPlanes: + vcluster: + resources: + limits: + cpu: "4000m" # Increase to 4 cores + memory: "6Gi" # Increase to 6GB memory + requests: + cpu: "500m" # Increase baseline CPU request + memory: "2Gi" # Increase baseline memory request +``` + +## Scaling `etcd` storage + +Kubernetes relies on `etcd` performance, which can lead to IOPS (input/output +operations per second) bottlenecks. Upbound allocates `50Gi` volumes for `etcd` +in cloud environments to ensure adequate IOPS performance. + +```yaml +# Default setting +controlPlanes.etcd.persistence.size: "5Gi" +``` + +For production environments or when migrating large control planes, increase +`etcd` volume size and specify an appropriate storage class: + +```yaml +controlPlanes: + etcd: + persistence: + size: "50Gi" # Recommended for production + storageClassName: "fast-ssd" # Use a high-performance storage class +``` + +### Storage class considerations + +For AWS: +- Use GP3 volumes with adequate IOPS +-. AWS GP3 volumes, IOPS scale with volume size (3000 IOPS baseline) +-. optimal performance, provision at least 32Gi to support up to 16,000 IOPS + +For GCP and Azure: +- Use SSD-based persistent disk types for optimal performance +- Consider premium storage options for high-throughput workloads + +## Scaling Crossplane resources + +Crossplane manages provider resources in your control planes. You may need to increase provider resources for larger deployments: + +```yaml +# Default settings +controlPlanes.uxp.resourcesCrossplane.requests.cpu: "370m" +controlPlanes.uxp.resourcesCrossplane.requests.memory: "400Mi" +``` + + +For environments with many providers or managed resources: + + +```yaml +controlPlanes: + uxp: + resourcesCrossplane: + limits: + cpu: "1000m" # Add CPU limit + memory: "1Gi" # Add memory limit + requests: + cpu: "500m" # Increase CPU request + memory: "512Mi" # Increase memory request +``` + +## High availability configuration + +For production environments, enable High Availability mode to ensure resilience: + +```yaml +controlPlanes: + ha: + enabled: true +``` + +## Best practices for migration scenarios + +When migrating from existing control planes into a self-hosted Space: + +1. **Pre-scale resources**: Scale up resources before performing the migration +2. **Monitor resource usage**: Watch resource consumption during and after migration with `kubectl top pods` +3. **Scale incrementally**: If issues persist, increase resources incrementally until performance stabilizes +4. **Consider storage performance**: `etcd` is sensitive to storage I/O performance + +## Helm values configuration + +Apply these settings through your Spaces Helm values file: + +```yaml +controlPlanes: + vcluster: + resources: + limits: + cpu: "4000m" + memory: "6Gi" + requests: + cpu: "500m" + memory: "2Gi" + etcd: + persistence: + size: "50Gi" + storageClassName: "gp3" # Use your cloud provider's fast storage class + uxp: + resourcesCrossplane: + limits: + cpu: "1000m" + memory: "1Gi" + requests: + cpu: "500m" + memory: "512Mi" + ha: + enabled: true #. production environments +``` + +Apply the configuration using Helm: + +```bash +helm upgrade --install spaces oci://xpkg.upbound.io/spaces-artifacts/spaces \ + -f values.yaml \ + -n upbound-system +``` + +## Considerations + +- **Provider count**: Each provider adds resource overhead - consider using provider families to optimize resource usage +- **Managed resources**: The number of managed resources impacts CPU usage more than memory +- **Vertical pod autoscaling**: Consider using vertical pod autoscaling in Kubernetes to automatically adjust resources based on usage +- **Storage performance**: Storage performance is as important as capacity for etcd +- **Network latency**: Low-latency connections between components improve performance + + diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/self-hosted-spaces-deployment.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/self-hosted-spaces-deployment.md new file mode 100644 index 000000000..e549e3939 --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/self-hosted-spaces-deployment.md @@ -0,0 +1,461 @@ +--- +title: Deployment Workflow +sidebar_position: 3 +description: A quickstart guide for Upbound Spaces +tier: "business" +--- +import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; + + + + + +This guide deploys a self-hosted Upbound cluster in AWS. + + + + + +This guide deploys a self-hosted Upbound cluster in Azure. + + + + + +This guide deploys a self-hosted Upbound cluster in GCP. + + + +Disconnected Spaces allows you to host control planes in your preferred environment. + +## Prerequisites + +To get started deploying your own Disconnected Space, you need: + +- An Upbound organization account string, provided by your Upbound account representative +- A `token.json` license, provided by your Upbound account representative + + + +- An AWS account and the AWS CLI + + + + + +- An Azure account and the Azure CLI + + + + + +- An GCP account and the GCP CLI + + + +:::important +Disconnected Spaces are a business critical feature of Upbound and requires a license token to successfully complete the installation. [Contact Upbound][contact-upbound] if you want to try out Upbound with Disconnected Spaces. +::: + +## Provision the hosting environment + +### Create a cluster + + + +Configure the name and target region you want the EKS cluster deployed to. + +```ini +export SPACES_CLUSTER_NAME=upbound-space-quickstart +export SPACES_REGION=us-east-1 +``` + +Provision a 3-node cluster using eksctl. + +```bash +cat < + + + +Configure the name and target region you want the AKS cluster deployed to. + +```ini +export SPACES_RESOURCE_GROUP_NAME=upbound-space-quickstart +export SPACES_CLUSTER_NAME=upbound-space-quickstart +export SPACES_LOCATION=westus +``` + +Provision a new Azure resource group. + +```bash +az group create --name ${SPACES_RESOURCE_GROUP_NAME} --location ${SPACES_LOCATION} +``` + +Provision a 3-node cluster. + +```bash +az aks create -g ${SPACES_RESOURCE_GROUP_NAME} -n ${SPACES_CLUSTER_NAME} \ + --enable-managed-identity \ + --node-count 3 \ + --node-vm-size Standard_D4s_v4 \ + --enable-addons monitoring \ + --enable-msi-auth-for-monitoring \ + --generate-ssh-keys \ + --network-plugin kubenet \ + --network-policy calico +``` + +Get the kubeconfig of your AKS cluster. + +```bash +az aks get-credentials --resource-group ${SPACES_RESOURCE_GROUP_NAME} --name ${SPACES_CLUSTER_NAME} +``` + + + + + +Configure the name and target region you want the GKE cluster deployed to. + +```ini +export SPACES_PROJECT_NAME=upbound-spaces-project +export SPACES_CLUSTER_NAME=upbound-spaces-quickstart +export SPACES_LOCATION=us-west1-a +``` + +Create a new project and set it as the current project. + +```bash +gcloud projects create ${SPACES_PROJECT_NAME} +gcloud config set project ${SPACES_PROJECT_NAME} +``` + +Provision a 3-node cluster. + +```bash +gcloud container clusters create ${SPACES_CLUSTER_NAME} \ + --enable-network-policy \ + --num-nodes=3 \ + --zone=${SPACES_LOCATION} \ + --machine-type=e2-standard-4 +``` + +Get the kubeconfig of your GKE cluster. + +```bash +gcloud container clusters get-credentials ${SPACES_CLUSTER_NAME} --zone=${SPACES_LOCATION} +``` + + + +## Configure the pre-install + +### Set your Upbound organization account details + +Set your Upbound organization account string as an environment variable for use in future steps + +```ini +export UPBOUND_ACCOUNT= +``` + +### Set up pre-install configurations + +Export the path of the license token JSON file provided by your Upbound account representative. + +```ini {copy-lines="2"} +# Change the path to where you saved the token. +export SPACES_TOKEN_PATH="/path/to/token.json" +``` + +Set the version of Spaces software you want to install. + +```ini +export SPACES_VERSION= +``` + +Set the router host and cluster type. The `SPACES_ROUTER_HOST` is the domain name that's used to access the control plane instances. It's used by the ingress controller to route requests. + +```ini +export SPACES_ROUTER_HOST="proxy.upbound-127.0.0.1.nip.io" +``` + +:::important +Make sure to replace the placeholder text in `SPACES_ROUTER_HOST` and provide a real domain that you own. +::: + + +## Install the Spaces software + + +### Install cert-manager + +Install cert-manager. + +```bash +kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.11.0/cert-manager.yaml +kubectl wait deployment -n cert-manager cert-manager-webhook --for condition=Available=True --timeout=360s +``` + + + +### Install ALB Load Balancer + +```bash +helm install aws-load-balancer-controller aws-load-balancer-controller --namespace kube-system \ + --repo https://aws.github.io/eks-charts \ + --set clusterName=${SPACES_CLUSTER_NAME} \ + --set serviceAccount.create=false \ + --set serviceAccount.name=aws-load-balancer-controller \ + --wait +``` + + + +### Install ingress-nginx + +Starting with Spaces v1.10.0, you need to configure the ingress-nginx +controller to allow SSL-passthrough mode. You can do so by passing the +`--enable-ssl-passthrough=true` command-line option to the controller. +The following Helm install command enables this with the `controller.extraArgs` +parameter: + + + +```bash +helm upgrade --install ingress-nginx ingress-nginx \ + --create-namespace --namespace ingress-nginx \ + --repo https://kubernetes.github.io/ingress-nginx \ + --version 4.12.1 \ + --set 'controller.service.type=LoadBalancer' \ + --set 'controller.extraArgs.enable-ssl-passthrough=true' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-type=external' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-scheme=internet-facing' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-nlb-target-type=ip' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-healthcheck-protocol=http' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-healthcheck-path=/healthz' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-healthcheck-port=10254' \ + --wait +``` + + + + + +```bash +helm upgrade --install ingress-nginx ingress-nginx \ + --create-namespace --namespace ingress-nginx \ + --repo https://kubernetes.github.io/ingress-nginx \ + --version 4.12.1 \ + --set 'controller.service.type=LoadBalancer' \ + --set 'controller.extraArgs.enable-ssl-passthrough=true' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/azure-load-balancer-health-probe-request-path=/healthz' \ + --wait +``` + + + + + +```bash +helm upgrade --install ingress-nginx ingress-nginx \ + --create-namespace --namespace ingress-nginx \ + --repo https://kubernetes.github.io/ingress-nginx \ + --version 4.12.1 \ + --set 'controller.service.type=LoadBalancer' \ + --set 'controller.extraArgs.enable-ssl-passthrough=true' \ + --wait +``` + + + +### Install Upbound Spaces software + +Create an image pull secret so that the cluster can pull Upbound Spaces images. + +```bash +kubectl create ns upbound-system +kubectl -n upbound-system create secret docker-registry upbound-pull-secret \ + --docker-server=https://xpkg.upbound.io \ + --docker-username="$(jq -r .accessId $SPACES_TOKEN_PATH)" \ + --docker-password="$(jq -r .token $SPACES_TOKEN_PATH)" +``` + +Log in with Helm to be able to pull chart images for the installation commands. + +```bash +jq -r .token $SPACES_TOKEN_PATH | helm registry login xpkg.upbound.io -u $(jq -r .accessId $SPACES_TOKEN_PATH) --password-stdin +``` + +Install the Spaces software. + +```bash +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "authentication.hubIdentities=true" \ + --set "authorization.hubRBAC=true" \ + --wait +``` + +### Create a DNS record + +:::important +If you chose to create a public ingress, you also need to create a DNS record for the load balancer of the public facing ingress. Do this before you create your first control plane. +::: + +Create a DNS record for the load balancer of the public facing ingress. To get the address for the Ingress, run the following: + + + +```bash +kubectl get ingress \ + -n upbound-system mxe-router-ingress \ + -o jsonpath='{.status.loadBalancer.ingress[0].hostname}' +``` + + + + + +```bash +kubectl get ingress \ + -n upbound-system mxe-router-ingress \ + -o jsonpath='{.status.loadBalancer.ingress[0].ip}' +``` + + + + + +```bash +kubectl get ingress \ + -n upbound-system mxe-router-ingress \ + -o jsonpath='{.status.loadBalancer.ingress[0].ip}' +``` + + + +If the preceding command doesn't return a load balancer address then your provider may not have allocated it yet. Once it's available, add a DNS record for the `ROUTER_HOST` to point to the given load balancer address. If it's an IPv4 address, add an A record. If it's a domain name, add a CNAME record. + +## Configure the up CLI + +With your kubeconfig pointed at the Kubernetes cluster where you installed +Upbound Spaces, create a new profile in the `up` CLI. This profile interacts +with your Space: + +```bash +up profile create --use ${SPACES_CLUSTER_NAME} --type=disconnected --organization ${UPBOUND_ACCOUNT} +``` + +Optionally, log in to your Upbound account using the new profile so you can use the Upbound Marketplace with this profile as well: + +```bash +up login +``` + + +## Connect to your Space + + +Use `up ctx` to create a kubeconfig context pointed at your new Space: + +```bash +up ctx disconnected/$(kubectl config current-context) +``` + +## Create your first control plane + +You can now create a control plane with the `up` CLI: + +```bash +up ctp create ctp1 +``` + +You can also create a control plane with kubectl: + +```yaml +cat < +```yaml +observability: + spacesCollector: + env: + - name: API_KEY + valueFrom: + secretKeyRef: + name: my-secret + key: api-key + config: + exporters: + otlphttp: + endpoint: "" + headers: + api-key: ${env:API_KEY} + exportPipeline: + logs: + - otlphttp + metrics: + - otlphttp + traces: + - otlphttp +``` + + +You can export metrics, logs, and traces from your Crossplane installation, Spaces +infrastructure (controller, API, router, etc.), provider-helm, and +provider-kubernetes. + +### Router metrics + +The Spaces router component uses Envoy as a reverse proxy and exposes detailed +metrics about request handling, circuit breakers, and connection pooling. +Upbound collects these metrics in your Space after you enable Space-level +observability. + +Envoy metrics in Upbound include: + +- **Upstream cluster metrics** - Request status codes, timeouts, retries, and latency for traffic to control planes and services +- **Circuit breaker metrics** - Connection and request circuit breaker state for both `DEFAULT` and `HIGH` priority levels +- **Downstream listener metrics** - Client connections and requests received +- **HTTP connection manager metrics** - End-to-end HTTP request processing and latency + +For a complete list of available router metrics and example PromQL queries, see the [Router metrics reference][router-ref]. + +### Router tracing + +The Spaces router generates distributed traces through OpenTelemetry integration, +providing end-to-end visibility into request flow across the system. Use these +traces to debug latency issues, understand request paths, and correlate errors +across services. + +The router uses: + +- **Protocol**: OTLP (OpenTelemetry Protocol) over gRPC +- **Service name**: `spaces-router` +- **Transport**: TLS-encrypted connection to telemetry collector + +#### Trace configuration + +Enable tracing and configure the sampling rate with the following Helm values: + +```yaml +observability: + enabled: true + tracing: + enabled: true + sampling: + rate: 0.1 # Sample 10% of new traces (0.0-1.0) +``` + +The sampling behavior depends on whether a parent trace context exists: + +- **With parent context**: If a `traceparent` header is present, the parent's + sampling decision is respected, enabling proper distributed tracing across services. +- **Root spans**:. new traces without a parent, Envoy samples based on + `x-request-id` hashing. The default sampling rate is 10%. + +#### TLS configuration for external collectors + +To send traces to an external OTLP collector, configure the endpoint and TLS settings: + +```yaml +observability: + enabled: true + tracing: + enabled: true + endpoint: "otlp-gateway.example.com" + port: 443 + tls: + caBundleSecretRef: "custom-ca-secret" +``` + +If `caBundleSecretRef` is set, the router uses the CA bundle from the referenced +Kubernetes secret. The secret must contain a key named `ca.crt` with the +PEM-encoded CA bundle. If not set, the router uses the Spaces CA for the +in-cluster collector. + +#### Custom trace tags + +The router adds custom tags to every span to enable filtering and grouping by +control plane: + +| Tag | Source | Description | +|-----|--------|-------------| +| `controlplane.id` | `x-upbound-mxp-id` header | Control plane UUID | +| `controlplane.name` | `x-upbound-mxp-host` header | Internal vcluster hostname | +| `hostcluster.id` | `x-upbound-hostcluster-id` header | Host cluster identifier | + +These tags enable queries like "show all slow requests to control plane X" or +"find errors for control planes in host cluster Y." + +#### Example trace + +The following example shows the attributes from a successful GET request: + +```text +Span: ingress +├─ Service: spaces-router +├─ Duration: 8.025ms +├─ Attributes: +│ ├─ http.method: GET +│ ├─ http.status_code: 200 +│ ├─ upstream_cluster: ctp-b2b37aaa-ee55-492c-ba0c-4d561a6325fa-api-cluster +│ ├─ controlplane.id: b2b37aaa-ee55-492c-ba0c-4d561a6325fa +│ ├─ controlplane.name: vcluster.mxp-b2b37aaa-ee55-492c-ba0c-4d561a6325fa-system +│ └─ response_size: 1827 +``` + +## Available metrics + +Space-level observability collects metrics from multiple infrastructure components: + +### Infrastructure component metrics + +- Crossplane controller metrics +- Spaces controller, API, and router metrics +- Provider metrics (provider-helm, provider-kubernetes) + +### Router metrics + +The router component exposes Envoy proxy metrics for monitoring traffic flow and +service health. Key metric categories include: + +- `envoy_cluster_upstream_rq_*` - Upstream request metrics (status codes, timeouts, retries, latency) +- `envoy_cluster_circuit_breakers_*` - Circuit breaker state and capacity +- `envoy_listener_downstream_*` - Client connection and request metrics +- `envoy_http_downstream_*` - HTTP request processing metrics + +Example query to monitor total request rate: + +```promql +sum(rate(envoy_cluster_upstream_rq_total{job="spaces-router-envoy"}[5m])) +``` + +Example query for P95 latency: + +```promql +histogram_quantile( + 0.95, + sum by (le) ( + rate(envoy_cluster_upstream_rq_time_bucket{job="spaces-router-envoy"}[5m]) + ) +) +``` + +For detailed router metrics documentation and more query examples, see the [Router metrics reference][router-ref]. + + +## OpenTelemetryCollector image + + +Control plane (`SharedTelemetry`) and Space observability deploy the same custom +OpenTelemetry Collector image. The OpenTelemetry Collector image supports +`otlhttp`, `datadog`, and `debug` exporters. + +For more information on observability configuration, review the [Helm chart reference][helm-chart-reference]. + +## Observability in control planes + +Read the [observability documentation][observability-documentation] to learn +about the features Upbound offers for collecting telemetry from control planes. + + +## Router metrics reference {#router-ref} + +To avoid overwhelming observability tools with hundreds of Envoy metrics, an +allow-list filters metrics to only the following metric families. + +### Upstream cluster metrics + +Metrics tracking requests sent from Envoy to configured upstream clusters. +Individual control planes, spaces-api, and other services are each considered +an upstream cluster. Use these metrics to monitor service health, identify +upstream errors, and measure backend latency. + +| Metric | Description | +|--------|-------------| +| `envoy_cluster_upstream_rq_xx_total` | HTTP status codes (2xx, 3xx, 4xx, 5xx) with label `envoy_response_code_class` | +| `envoy_cluster_upstream_rq_timeout_total` | Requests that timed out waiting for upstream | +| `envoy_cluster_upstream_rq_retry_limit_exceeded_total` | Requests that exhausted retry attempts | +| `envoy_cluster_upstream_rq_total` | Total upstream requests | +| `envoy_cluster_upstream_rq_time_bucket` | Latency histogram (for P50/P95/P99 calculations) | +| `envoy_cluster_upstream_rq_time_sum` | Sum of request durations | +| `envoy_cluster_upstream_rq_time_count` | Count of requests | + +### Circuit breaker metrics + + + +Metrics tracking circuit breaker state and remaining capacity. Circuit breakers +prevent cascading failures by limiting connections and concurrent requests to +unhealthy upstreams. Two priority levels exist: `DEFAULT` for watch requests and +`HIGH` for API requests. + + +| Name | Description | +|--------|-------------| +| `envoy_cluster_circuit_breakers_default_cx_open` | `DEFAULT` priority connection circuit breaker open (gauge) | +| `envoy_cluster_circuit_breakers_default_rq_open` | `DEFAULT` priority request circuit breaker open (gauge) | +| `envoy_cluster_circuit_breakers_default_remaining_cx` | Available `DEFAULT` priority connections (gauge) | +| `envoy_cluster_circuit_breakers_default_remaining_rq` | Available `DEFAULT` priority request slots (gauge) | +| `envoy_cluster_circuit_breakers_high_cx_open` | `HIGH` priority connection circuit breaker open (gauge) | +| `envoy_cluster_circuit_breakers_high_rq_open` | `HIGH` priority request circuit breaker open (gauge) | +| `envoy_cluster_circuit_breakers_high_remaining_cx` | Available `HIGH` priority connections (gauge) | +| `envoy_cluster_circuit_breakers_high_remaining_rq` | Available `HIGH` priority request slots (gauge) | + +### Downstream listener metrics + +Metrics tracking requests received from clients such as kubectl and API consumers. +Use these metrics to monitor client connection patterns, overall request volume, +and responses sent to external users. + +| Name | Description | +|--------|-------------| +| `envoy_listener_downstream_rq_xx_total` | HTTP status codes for responses sent to clients | +| `envoy_listener_downstream_rq_total` | Total requests received from clients | +| `envoy_listener_downstream_cx_total` | Total connections from clients | +| `envoy_listener_downstream_cx_active` | Currently active client connections (gauge) | + + + +### HTTP connection manager metrics + + +Metrics from Envoy's HTTP connection manager tracking end-to-end request +processing. These metrics provide a comprehensive view of the HTTP request +lifecycle including status codes and client-perceived latency. + +| Name | Description | +|--------|-------------| +| `envoy_http_downstream_rq_xx` | HTTP status codes (note: no `_total` suffix for this metric family) | +| `envoy_http_downstream_rq_total` | Total HTTP requests received | +| `envoy_http_downstream_rq_time_bucket` | Downstream request latency histogram | +| `envoy_http_downstream_rq_time_sum` | Sum of downstream request durations | +| `envoy_http_downstream_rq_time_count` | Count of downstream requests | + +[router-ref]: #router-ref +[observability-documentation]: /spaces/howtos/observability +[opentelemetry-collector]: https://opentelemetry.io/docs/collector/ +[opentelemetry-operator]: https://opentelemetry.io/docs/kubernetes/operator/ +[helm-chart-reference]: /reference/helm-reference diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/spaces-management.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/spaces-management.md new file mode 100644 index 000000000..3df61c306 --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/spaces-management.md @@ -0,0 +1,219 @@ +--- +title: Interacting with Disconnected Spaces +sidebar_position: 10 +description: Common operations in Spaces +--- + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Spaces management procedures are consistent across versions. + +For version compatibility details, see the . +::: + +## Spaces management + +### Create a Space + +To install an Upbound Space into a cluster, it's recommended you dedicate an entire Kubernetes cluster for the Space. You can use [up space init][up-space-init] to install an Upbound Space. Below is an example: + +```bash +up space init "v1.9.0" +``` +:::tip +For a full guide to get started with Spaces, read the [quickstart][quickstart] guide: +::: + +You can also install the helm chart for Spaces directly. In order for a Spaces install to succeed, you must install some prerequisites first and configure them. This includes: + +- UXP +- provider-helm and provider-kubernetes +- cert-manager + +Furthermore, the Spaces chart requires a pull secret, which Upbound must provide to you. + +```bash +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "v1.9.0" \ + --set "ingress.host=your-host.com" \ + --set "clusterType=eks" \ + --set "account=your-upbound-account" \ + --wait +``` +For a complete tutorial of the helm install, read one of the deployment guides for [AWS][aws], [Azure][azure] , or [GCP][gcp] which cover the step-by-step process. + +### Upgrade a Space + +To upgrade a Space from one version to the next, use [up space upgrade][up-space-upgrade]. Spaces supports upgrading from version `ver x.N.*` to version `ver x.N+1.*`. + +```bash +up space upgrade "v1.9.0" +``` + +You can also upgrade a Space by manually bumping the Helm chart version. Before +upgrading, review the release notes for any breaking changes or +special requirements: + +1. Review the release notes for the target version in the [Spaces Release Notes][spaces-release-notes] +2. Upgrade the Space by updating the helm chart version: + +```bash +helm -n upbound-system upgrade spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "v1.9.0" \ + --reuse-values \ + --wait +``` + +For major version upgrades or configuration changes, extract your current values +and adjust: + +```bash +# Extract current values to a file +helm -n upbound-system get values spaces > spaces-values.yaml + +# Upgrade with modified values +helm -n upbound-system upgrade spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "v1.9.0" \ + -f spaces-values.yaml \ + --wait +``` + +### Downgrade a Space + +To rollback a Space from one version to the previous, use [up space upgrade][up-space-upgrade-1]. Spaces supports downgrading from version `ver x.N.*` to version `ver x.N-1.*`. + +```bash +up space upgrade --rollback +``` + +You can also downgrade a Space manually using Helm by specifying an earlier version: + +```bash +helm -n upbound-system upgrade spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "v1.8.0" \ + --reuse-values \ + --wait +``` + +When downgrading, make sure to: +1. Check the [release notes][release-notes] for specific downgrade instructions +2. Verify compatibility between the downgraded Space and any control planes +3. Back up any critical data before proceeding + +### Uninstall a Space + +To uninstall a Space from a Kubernetes cluster, use [up space destroy][up-space-destroy]. A destroy operation uninstalls core components and orphans control planes and their associated resources. + +```bash +up space destroy +``` + +## Control plane management + +You can manage control planes in a Space via the [up CLI][up-cli] or the Spaces-local Kubernetes API. When you install a Space, it defines new a API type, `kind: Controlplane`, that you can use to create and manage control planes in the Space. + +### Create a control plane + +To create a control plane in a Space using `up`, run the following: + +```bash +up ctp create ctp1 +``` + +You can also declare a new control plane like the example below and apply it to your Spaces cluster: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: ctp1 + namespace: default +spec: + writeConnectionSecretToRef: + name: kubeconfig-ctp1 + namespace: default +``` + +This manifest: + +- Creates a new control plane in the space called `ctp1`. +- Publishes the kubeconfig to connect to the control plane to a secret in the Spaces cluster, called `kubeconfig-ctp1` + +### Connect to a control plane + +To connect to a control plane in a Space using `up`, run the following: + +```bash +up ctp connect new-control-plane +``` + +The command changes your kubeconfig's current context to the control plane you specify. If you want to change your kubeconfig back to a previous context, run: + +```bash +up ctp disconnect +``` + +If you configured your control plane to publish connection details, you can also access it this way. Once the control plane is ready, use the secret (containing connection details) to connect to the API server of your control plane. + +```bash +kubectl get secret -n default -o jsonpath='{.data.kubeconfig}' | base64 -d > /tmp/.yaml +``` + +Reference the kubeconfig whenever you want to interact directly with the API server of the control plane (vs the Space's API server): + +```bash +kubectl get providers --kubeconfig=/tmp/.yaml +``` + +### Configure a control plane + +Spaces offers a built-in feature that allows you to connect a control plane to a Git source. This experience is like when a control plane runs in [Upbound's SaaS environment][upbound-s-saas-environment]. Upbound recommends using the built-in Git integration to drive configuration of your control planes in a Space. + +Learn more in the [Spaces Git integration][spaces-git-integration] documentation. + +### List control planes + +To list all control planes in a Space using `up`, run the following: + +```bash +up ctp list +``` + +Or you can use Kubernetes-style semantics to list the control plane: + +```bash +kubectl get controlplanes +``` + + +### Delete a control plane + +To delete a control plane in a Space using `up`, run the following: + +```bash +up ctp delete ctp1 +``` + +Or you can use Kubernetes-style semantics to delete the control plane: + +```bash +kubectl delete controlplane ctp1 +``` + + +[up-space-init]: /reference/cli-reference +[quickstart]: / +[aws]: /spaces/howtos/self-hosted/self-hosted-spaces-deployment +[azure]:/spaces/howtos/self-hosted/self-hosted-spaces-deployment +[gcp]:/spaces/howtos/self-hosted/self-hosted-spaces-deployment +[up-space-upgrade]: /reference/cli-reference +[spaces-release-notes]: /reference/release-notes/spaces +[up-space-upgrade-1]: /reference/cli-reference +[release-notes]: /reference/release-notes/spaces +[up-space-destroy]: /reference/cli-reference +[up-cli]: /reference/cli-reference +[upbound-s-saas-environment]: /spaces/howtos/self-hosted/spaces-management +[spaces-git-integration]: /spaces/howtos/self-hosted/gitops diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/troubleshooting.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/troubleshooting.md new file mode 100644 index 000000000..8d1ca6517 --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/troubleshooting.md @@ -0,0 +1,132 @@ +--- +title: Troubleshooting +sidebar_position: 100 +description: A guide for troubleshooting an issue that occurs in a Space +--- + +Find guidance below on how to find solutions for issues you encounter when deploying and using an Upbound Space. Use the tips below as a supplement to the observability metrics discussed in the [Observability][observability] page. + +## General tips + +Most issues fall into two general categories: + +1. issues with the Spaces management plane +2. issues on a control plane + +If your control plane doesn't reach a `Ready` state, it's indicative of the former. If your control plane is in a created and running state, but resources aren't reconciling, it's indicative of the latter. + +### Spaces component layout + +Run `kubectl get pods -A` against the cluster hosting a Space. You should see a variety of pods across several namespaces. It should look something like this: + +```bash +NAMESPACE NAME READY STATUS RESTARTS AGE +cert-manager cert-manager-6d6769565c-mc5df 1/1 Running 0 25m +cert-manager cert-manager-cainjector-744bb89575-nw4fg 1/1 Running 0 25m +cert-manager cert-manager-webhook-759d6dcbf7-ps4mq 1/1 Running 0 25m +ingress-nginx ingress-nginx-controller-7f8ccfccc6-6szlp 1/1 Running 0 25m +kube-system coredns-5d78c9869d-4p477 1/1 Running 0 26m +kube-system coredns-5d78c9869d-pdxt6 1/1 Running 0 26m +kube-system etcd-kind-control-plane 1/1 Running 0 26m +kube-system kindnet-8s7pq 1/1 Running 0 26m +kube-system kube-apiserver-kind-control-plane 1/1 Running 0 26m +kube-system kube-controller-manager-kind-control-plane 1/1 Running 0 26m +kube-system kube-proxy-l68r8 1/1 Running 0 26m +kube-system kube-scheduler-kind-control-plane 1/1 Running 0 26m +local-path-storage local-path-provisioner-6bc4bddd6b-qsdjt 1/1 Running 0 26m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system coredns-5dc69d6447-f56rh-x-kube-system-x-vcluster 1/1 Running 0 21m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system crossplane-6b6d67bc66-6b8nx-x-upbound-system-x-vcluster 1/1 Running 0 20m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system crossplane-rbac-manager-78f6fc7cb4-pjkhc-x-upbound-s-12253c3c4e 1/1 Running 0 20m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system kube-state-metrics-7f8f4dcc5b-8p8c4 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system mxp-gateway-68f546b9c8-xnz5j-x-upbound-system-x-vcluster 1/1 Running 0 20m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system mxp-ksm-config-54655667bb-hv9br 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system mxp-readyz-5f7f97d967-b98bw 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system otlp-collector-56d7d46c8d-g5sh5-x-upbound-system-x-vcluster 1/1 Running 0 20m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-67c9fb8959-ppb2m 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-api-6bfbccc49d-ffgpj 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-controller-7cc6855656-8c46b 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-etcd-0 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vector-754b494b84-wljw4 1/1 Running 0 22m +mxp-system mxp-charts-chartmuseum-7587f77558-8tltb 1/1 Running 0 23m +upbound-system crossplane-b4dc7b4c9-6hjh5 1/1 Running 0 25m +upbound-system crossplane-contrib-provider-helm-ce18dd03e6e4-7945d8985-4gcwr 1/1 Running 0 24m +upbound-system crossplane-contrib-provider-kubernetes-1f1e32c1957d-577756gs2x4 1/1 Running 0 24m +upbound-system crossplane-rbac-manager-d8cb49cbc-gbvvf 1/1 Running 0 25m +upbound-system spaces-controller-6647677cf9-5zl5q 1/1 Running 0 24m +upbound-system spaces-router-bc78c96d7-kzts2 2/2 Running 0 24m +``` + +What you are seeing is: + +- Pods in the `upbound-system` namespace are components required to run the management plane of the Space. This includes the `spaces-controller`, `spaces-router`, and install of UXP. +- Pods in the `mxp-{GUID}-system` namespace are components that collectively power a control plane. Notable call outs include pod names that look like `vcluster-api-{GUID}` and `vcluster-controller-{GUID}`, which are integral components of a control plane. +- Pods in other notable namespaces, including `cert-manager` and `ingress-nginx`, are prerequisite components that support a Space's successful operation. + + + +### Troubleshooting tips for the Spaces management plane + +Start by getting the status of all the pods in a Space: + +1. Make sure the current context of your kubeconfig points at the Kubernetes cluster hosting your Space +2. Get the status of all the pods in the Space: +```bash +kubectl get pods -A +``` +3. Scan the `Status` column to see if any of the pods report a status besides `Running`. +4. Scan the `Restarts` column to see if any of the pods have restarted. +5. If you notice a Status other than `Running` or see pods that restarted, you should investigate their events by running +```bash +kubectl describe pod -n +``` + +Next, inspect the status of objects and releases: + +1. Make sure the current context of your kubeconfig points at the Kubernetes cluster hosting your Space +2. Inspect the objects in your Space. If any are unhealthy, describe those objects to get the events: +```bash +kubectl get objects +``` +3. Inspect the releases in your Space. If any are unhealthy, describe those releases to get the events: +```bash +kubectl get releases +``` + +### Troubleshooting tips for control planes in a Space + +General troubleshooting in a control plane starts by fetching the events of the control plane: + +1. Make sure the current context of your kubeconfig points at the Kubernetes cluster hosting your Space +2. Run the following to fetch your control planes. +```bash +kubectl get ctp +``` +3. Describe the control plane by providing its name, found in the preceding instruction. +```bash +kubectl describe controlplanes.spaces.upbound.io +``` + +## Issues + + +### Your control plane is stuck in a 'creating' state + +#### Error: unknown field "ports" in io.k8s.api.networking.v1.NetworkPolicySpec + +This error is emitted by a Helm release named `control-plane-host-policies` attempting to be installed by the Spaces software. The full error is: + +_CannotCreateExternalResource failed to install release: unable to build kubernetes objects from release manifest: error validating "": error validating data: ValidationError(NetworkPolicy.spec): unknown field "ports" in io.k8s.api.networking.v1.NetworkPolicySpec_ + +This error may be caused by running a Space on an earlier version of Kubernetes than is supported (`v1.26 or later`). To resolve this issue, upgrade the host Kubernetes cluster version to 1.25 or later. + +### Your Spaces install fails + +#### Error: You tried to install a Space on a previous Crossplane installation + +If you try to install a Space on an existing cluster that previously had Crossplane or UXP on it, you may encounter errors. Due to how the Spaces installer tests for the presence of UXP, it may detect orphaned CRDs that weren't cleaned up by the previous uninstall of Crossplane. You may need to manually [remove old Crossplane CRDs][remove-old-crossplane-crds] for the installer to properly detect the UXP prerequisite. + + + + +[observability]: /spaces/howtos/observability +[remove-old-crossplane-crds]: https://docs.crossplane.io/latest/guides/uninstall-crossplane/ diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/use-argo.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/use-argo.md new file mode 100644 index 000000000..d58f7db44 --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/use-argo.md @@ -0,0 +1,228 @@ +--- +title: Use ArgoCD Plugin +sidebar_position: 15 +description: A guide for integrating Argo with control planes in a Space. +aliases: + - /all-spaces/self-hosted-spaces/use-argo + - /deploy/disconnected-spaces/use-argo-flux + - /all-spaces/self-hosted-spaces/use-argo-flux + - /connect/use-argo +--- + + +:::info API Version Information +This guide covers the ArgoCD Plugin, a preview feature available in all supported versions (v1.9-v1.14+) for self-hosted deployments. + +For details on GitOps patterns and related features across versions, see [Automation and GitOps Overview](/spaces/howtos/automation-and-gitops/overview/). +::: + +:::important +This feature is in preview and is off by default. To enable, set `features.alpha.argocdPlugin.enabled=true` when installing Spaces: + +```bash +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "features.alpha.argocdPlugin.enabled=true" +``` +::: + +Spaces provides an optional plugin to assist with integrating a control plane in a Space with Argo CD. You must enable the plugin for the entire Space at Spaces install or upgrade time. The plugin's job is to propagate the connection details of each control plane in a Space to Argo CD. By default, Upbound stores these connection details in a Kubernetes secret named after the control plane. To run Argo CD across multiple namespaces, Upbound recommends enabling the `features.alpha.argocdPlugin.useUIDFormatForCTPSecrets` flag to use a UID-based format for secret names to avoid conflicts. + +:::tip +For general guidance on integrating Upbound with GitOps flows, see [GitOps with Control Planes][gitops-with-control-planes]. +::: + +## On cluster Argo CD + +If you are running Argo CD on the same cluster as the Space, run the following to enable the plugin: + + + + + + +```bash {hl_lines="3-4"} +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "features.alpha.argocdPlugin.enabled=true" \ + --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ + --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" +``` + + + + + +```bash {hl_lines="7-8"} +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "features.alpha.argocdPlugin.enabled=true" \ + --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ + --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ + --wait +``` + + + + + + +The important flags are: + +- `features.alpha.argocdPlugin.enabled=true` +- `features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true` +- `features.alpha.argocdPlugin.target.secretNamespace=argocd` + +The first flag enables the feature and the second indicates the namespace on the cluster where you installed Argo CD. + +Be sure to [configure Argo][configure-argo] after it's installed. + +## External cluster Argo CD + +If you are running Argo CD on an external cluster from where you installed your Space, you need to provide some extra flags: + + + + + + +```bash {hl_lines="3-7"} +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "features.alpha.argocdPlugin.enabled=true" \ + --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ + --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ + --set "features.alpha.argocdPlugin.target.externalCluster.enabled=true" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig" +``` + + + + + +```bash {hl_lines="7-11"} +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "features.alpha.argocdPlugin.enabled=true" \ + --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ + --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ + --set "features.alpha.argocdPlugin.target.externalCluster.enabled=true" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig" \ + --wait +``` + + + + + +```bash +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "features.alpha.argocdPlugin.enabled=true" \ + --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ + --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ + --set "features.alpha.argocdPlugin.target.externalCluster.enabled=true" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig" \ + --wait +``` + +The extra flags are: + +- `features.alpha.argocdPlugin.target.externalCluster.enabled=true` +- `features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true` +- `features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster` +- `features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig` + +These flags tell the plugin (running in Spaces) where your Argo CD instance is. After you've done this at install-time, you also need to create a `Secret` on the Spaces cluster. This secret must contain a kubeconfig pointing to your Argo CD instance. The secret needs to be in the same namespace as the `spaces-controller`, which is `upbound-system`. + +Once you enable the plugin and configure it, the plugin automatically propagates connection details for your control planes to your Argo CD instance. You can then target the control plane and use Argo to sync Crossplane-related objects to it. + +Be sure to [configure Argo][configure-argo-1] after it's installed. + +## Configure Argo + +Argo's default configuration causes it to try to query for resource kinds that don't exist in control planes. You should configure Argo's [general configmap][general-configmap] to include the resource group/kinds which make sense in the context of control planes. example, the concept of `nodes` isn't exposed in control planes. + +To configure Argo CD, connect to the cluster where you've installed it and edit the configmap: + +```bash +kubectl edit configmap argocd-cm -n argocd +``` + +Adjust the resource inclusions and exclusions under the `data` field of the configmap: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-cm + namespace: argocd +data: + resource.exclusions: | + - apiGroups: + - "*" + kinds: + - "*" + clusters: + - "*" + resource.inclusions: | + - apiGroups: + - "*" + kinds: + - Provider + - Configuration + clusters: + - "*" +``` + +The preceding configuration causes Argo to exclude syncing **all** resource group/kinds--except Crossplane `providers` and `configurations`--for **all** control planes. You're encouraged to adjust the `resource.inclusions` to include the types that make sense for your control plane, such as an `XRD` you've built with Crossplane. You're also encouraged to customize the `clusters` pattern to selectively apply these exclusions/inclusions to control planes (for example, `control-plane-prod-*`). + +## Control plane connection secrets + +To deploy control planes through Argo CD, you need to configure the `writeConnectionSecretToRef` field in your control plane spec. This field specifies where to store the control plane's `kubeconfig` and makes connection details available to Argo CD. + +### Basic Configuration + +In your control plane manifest, include the `writeConnectionSecretToRef` field: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: my-control-plane + namespace: my-control-plane-group +spec: + writeConnectionSecretToRef: + name: kubeconfig-my-control-plane + namespace: my-control-plane-group + # ... other control plane configuration +``` + +### Parameters + +The `writeConnectionSecretToRef` field requires two parameters: + +- `name`: A unique name for the secret containing the kubeconfig (`kubeconfig-my-control-plane`) +- `namespace`: The Kubernetes namespace where you store the secret, which must match the metadata namespace. The system copies it into the `argocd` namespace when you set the `features.alpha.argocdPlugin.target.secretNamespace=argocd` configuration parameter. + +Control plane labels automatically propagate to the connection secret, which allows you to use label selectors in Argo CD for automated discovery and management. + +This configuration enables Argo CD to automatically discover and manage resources on your control planes. + + +[gitops-with-control-planes]: /spaces/howtos/cloud-spaces/gitops +[configure-argo]: #configure-argo +[configure-argo-1]: #configure-argo +[general-configmap]: https://argo-cd.readthedocs.io/en/stable/operator-manual/argocd-cm-yaml/ diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/workload-id/_category_.json b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/workload-id/_category_.json new file mode 100644 index 000000000..c5ecc93f6 --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/workload-id/_category_.json @@ -0,0 +1,11 @@ +{ + "label": "Workload Identity Configuration", + "position": 2, + "collapsed": true, + "customProps": { + "plan": "business" + } + +} + + diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/workload-id/backup-restore-config.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/workload-id/backup-restore-config.md new file mode 100644 index 000000000..935ca69ec --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/workload-id/backup-restore-config.md @@ -0,0 +1,384 @@ +--- +title: Backup and Restore Workload ID +weight: 1 +description: Configure workload identity for Spaces Backup and Restore +--- +import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; + + + + + + + +Workload-identity authentication lets you use access policies to grant temporary +AWS credentials to your Kubernetes pod with a service account. Assigning IAM roles and service accounts allows the pod to assume the IAM role dynamically and much more securely than static credentials. + +This guide walks you through creating an IAM trust role policy and applying it +to your EKS cluster to handle backup and restore storage. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary Azure credentials to your Kubernetes pod based on +a service account. Assigning managed identities and service accounts allows the pod to +authenticate with Azure resources dynamically and much more securely than static credentials. + +This guide walks you through creating a managed identity and federated credential for your AKS +cluster to handle backup and restore storage. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary GCP credentials to your Kubernetes pod based on +a service account. Assigning IAM roles and service accounts allows the pod to +access cloud resources dynamically and much more securely than static credentials. + +This guide walks you through configuring workload identity for your GKE +cluster to handle backup and restore storage. + + + +## Prerequisites + + +To set up a workload-identity, you'll need: + + +- A self-hosted Space cluster +- Administrator access in your cloud provider +- Helm and `kubectl` + +## About the backup and restore component + +The `mxp-controller` component handles backup and restore workloads. It needs to +access your cloud storage to store and retrieve backups. By default, this +component runs in each control plane's host namespace. + +## Configuration + + + +Upbound supports workload-identity configurations in AWS with IAM Roles for +Service Accounts and EKS pod identity association. + +#### IAM Roles for Service Accounts (IRSA) + +With IRSA, you can associate a Kubernetes service account in an EKS cluster with +an AWS IAM role. Upbound authenticates workloads with that service account as +the IAM role using temporary credentials instead of static role credentials. +IRSA relies on AWS `AssumeRoleWithWebIdentity` `STS` to exchange OIDC ID tokens with +the IAM role's temporary credentials. IRSA uses the `eks.amazon.aws/role-arn` +annotation to link the service account and the IAM role. + +First, create an IAM role with appropriate permissions to access your S3 bucket: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:ListBucket", + "s3:DeleteObject" + ], + "Resource": [ + "arn:aws:s3:::${YOUR_BACKUP_BUCKET}", + "arn:aws:s3:::${YOUR_BACKUP_BUCKET}/*" + ] + } + ] +} +``` + +Next, ensure your EKS cluster has an OIDC identity provider: + +```shell +eksctl utils associate-iam-oidc-provider --cluster ${YOUR_CLUSTER_NAME} --approve +``` + +Configure the IAM role trust policy with the namespace for each +provisioned control plane. + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:oidc-provider/${YOUR_OIDC_PROVIDER}" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + "${YOUR_OIDC_PROVIDER}:aud": "sts.amazonaws.com", + "${YOUR_OIDC_PROVIDER}:sub": "system:serviceaccount:${YOUR_NAMESPACE}:mxp-controller" + } + } + } + ] +} +``` + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the Backup and Restore component: + +```shell +--set controlPlanes.mxpController.serviceAccount.annotations."eks\.amazonaws\.com/role-arn"="${SPACES_BR_IAM_ROLE_ARN}" +``` + +This command allows the backup and restore component to authenticate with your +dedicated IAM role in your EKS cluster environment. + +#### EKS pod identities + +Upbound also supports EKS Pod Identity configuration. EKS Pod Identities allow +you to create a pod identity association with your Kubernetes namespace, a +service account, and an IAM role, which allows the EKS control plane to +automatically handle the credential exchange. + +First, create an IAM role with appropriate permissions to access your S3 bucket: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:ListBucket", + "s3:DeleteObject" + ], + "Resource": [ + "arn:aws:s3:::${YOUR_BACKUP_BUCKET}", + "arn:aws:s3:::${YOUR_BACKUP_BUCKET}/*" + ] + } + ] +} +``` + +When you install or upgrade your Space with Helm, add the backup/restore values: + +```shell +helm upgrade spaces spaces-helm-chart \ + --set "billing.enabled=true" \ + --set "backup.enabled=true" \ + --set "backup.storage.provider=aws" \ + --set "backup.storage.aws.region= ${YOUR_AWS_REGION}" \ + --set "backup.storage.aws.bucket= ${YOUR_BACKUP_BUCKET}" +``` + +After Upbound provisions your control plane, create a Pod Identity Association +with the `aws` CLI: + +```shell +aws eks create-pod-identity-association \ + --cluster-name ${YOUR_CLUSTER_NAME} \ + --namespace ${YOUR_CONTROL_PLANE_NAMESPACE} \ + --service-account mxp-controller \ + --role-arn arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/backup-restore-role +``` + + + + + +Upbound supports workload-identity configurations in Azure with Azure's built-in +workload identity feature. + +#### Prepare your cluster + +First, enable the OIDC issuer and workload identity in your AKS cluster: + +```shell +az aks update --resource-group ${YOUR_RESOURCE_GROUP} --name ${YOUR_AKS_CLUSTER_NAME} --enable-oidc-issuer --enable-workload-identity +``` + +Next, find and store the OIDC issuer URL as an environment variable: + +```shell +export AKS_OIDC_ISSUER="$(az aks show --name ${YOUR_AKS_CLUSTER_NAME} --resource-group ${YOUR_RESOURCE_GROUP} --query "oidcIssuerProfile.issuerUrl" --output tsv)" +``` + +#### Create a User-Assigned Managed Identity + +Create a new managed identity to associate with the backup and restore component: + +```shell +az identity create --name backup-restore-identity --resource-group ${YOUR_RESOURCE_GROUP} --location ${YOUR_LOCATION} +``` + +Retrieve the client ID and store it as an environment variable: + +```shell +export USER_ASSIGNED_CLIENT_ID="$(az identity show --name backup-restore-identity --resource-group ${YOUR_RESOURCE_GROUP} --query clientId -otsv)" +``` + +Grant the managed identity you created to access your Azure Storage account: + +```shell +az role assignment create \ + --role "Storage Blob Data Contributor" \ + --assignee ${USER_ASSIGNED_CLIENT_ID} \ + --scope /subscriptions/${YOUR_SUBSCRIPTION_ID}/resourceGroups/${YOUR_RESOURCE_GROUP}/providers/Microsoft.Storage/storageAccounts/${YOUR_STORAGE_ACCOUNT} +``` + +#### Apply the managed identity role + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the backup and restore component: + +```shell +--set controlPlanes.mxpController.serviceAccount.annotations."azure\.workload\.identity/client-id"="${YOUR_USER_ASSIGNED_CLIENT_ID}" +--set controlPlanes.mxpController.pod.customLabels."azure\.workload\.identity/use"="true" +``` + +#### Create a Federated Identity credential + +```shell +az identity federated-credential create \ + --name backup-restore-federated-identity \ + --identity-name backup-restore-identity \ + --resource-group ${YOUR_RESOURCE_GROUP} \ + --issuer ${AKS_OIDC_ISSUER} \ + --subject system:serviceaccount:${YOUR_CONTROL_PLANE_NAMESPACE}:mxp-controller +``` + + + + + +Upbound supports workload-identity configurations in GCP with IAM principal +identifiers and service account impersonation. + +#### Prepare your cluster + +First, enable Workload Identity Federation on your GKE cluster: + +```shell +gcloud container clusters update ${YOUR_CLUSTER_NAME} \ + --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ + --region=${YOUR_REGION} +``` + +#### Create a Google Service Account + +Create a service account for the backup and restore component: + +```shell +gcloud iam service-accounts create backup-restore-sa \ + --display-name "Backup Restore Service Account" \ + --project ${YOUR_PROJECT_ID} +``` + +Grant the service account access to your Google Cloud Storage bucket: + +```shell +gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ + --member "serviceAccount:backup-restore-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" \ + --role "roles/storage.objectAdmin" +``` + +#### Configure Workload Identity + +Create an IAM binding to grant the Kubernetes service account access to the Google service account: + +```shell +gcloud iam service-accounts add-iam-policy-binding \ + backup-restore-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com \ + --role roles/iam.workloadIdentityUser \ + --member "serviceAccount:${YOUR_PROJECT_ID}.svc.id.goog[${YOUR_CONTROL_PLANE_NAMESPACE}/mxp-controller]" +``` + +#### Apply the service account configuration + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the backup and restore component: + +```shell +--set controlPlanes.mxpController.serviceAccount.annotations."iam\.gke\.io/gcp-service-account"="backup-restore-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" +``` + + + +## Verify your configuration + +After you apply the configuration use `kubectl` to verify the service account +has the correct annotation: + +```shell +kubectl get serviceaccount mxp-controller -n ${YOUR_CONTROL_PLANE_NAMESPACE} -o yaml +``` + +Verify the `mxp-controller` pod is running: + +```shell +kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep mxp-controller +``` + +## Restart workload + +You must manually restart a workload's pod when you add the workload identity annotations to the running pod's service account. + + + +This restart enables the EKS pod identity webhook to inject the necessary +environment for using IRSA. + + + + + +This restart enables the workload identity webhook to inject the necessary +environment for using Azure workload identity. + + + + + +This restart enables the workload identity webhook to inject the necessary +environment for using GCP workload identity. + + + +```shell +kubectl rollout restart deployment mxp-controller -n ${YOUR_CONTROL_PLANE_NAMESPACE} +``` + +## Use cases + + +Configuring backup and restore with workload identity eliminates the need for +static credentials in your cluster and the overhead of credential rotation. +These benefits are helpful in: + +* Disaster recovery scenarios +* Control plane migration +* Compliance requirements +* Rollbacks after unsuccessful upgrades + +## Next steps + +Now that you have a workload identity configured for the backup and restore +component, visit the [Backup Configuration][backup-restore-guide] documentation. + +Other workload identity guides are: +* [Billing][billing] +* [Shared Secrets][secrets] + +[backup-restore-guide]: /spaces/howtos/backup-and-restore +[billing]: /spaces/howtos/self-hosted/workload-id/billing-config +[secrets]: /spaces/howtos/self-hosted/workload-id/eso-config diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/workload-id/billing-config.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/workload-id/billing-config.md new file mode 100644 index 000000000..323a6122f --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/workload-id/billing-config.md @@ -0,0 +1,454 @@ +--- +title: Billing Workload ID +weight: 1 +description: Configure workload identity for Spaces Billing +--- +import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; + + + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary AWS credentials to your Kubernetes pod based on +a service account. Assigning IAM roles and service accounts allows the pod to +assume the IAM role dynamically and much more securely than static credentials. + +This guide walks you through creating an IAM trust role policy and applying it to your EKS +cluster for billing in your Space cluster. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary Azure credentials to your Kubernetes pod based on +a service account. Assigning managed identities and service accounts allows the pod to +authenticate with Azure resources dynamically and much more securely than static credentials. + +This guide walks you through creating a managed identity and federated credential for your AKS +cluster for billing in your Space cluster. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary GCP credentials to your Kubernetes pod based on +a service account. Assigning IAM roles and service accounts allows the pod to +access cloud resources dynamically and much more securely than static +credentials. + +This guide walks you through configuring workload identity for your GKE +cluster's billing component. + + + +## Prerequisites + + +To set up a workload-identity, you'll need: + + +- A self-hosted Space cluster +- Administrator access in your cloud provider +- Helm and `kubectl` + +## About the billing component + +The `vector.dev` component handles billing metrics collection in spaces. It +stores account data in your cloud storage. By default, this component runs in +each control plane's host namespace. + +## Configuration + + + +Upbound supports workload-identity configurations in AWS with IAM Roles for +Service Accounts and EKS pod identity association. + +#### IAM Roles for Service Accounts (IRSA) + +With IRSA, you can associate a Kubernetes service account in an EKS cluster with +an AWS IAM role. Upbound authenticates workloads with that service account as +the IAM role using temporary credentials instead of static role credentials. +IRSA relies on AWS `AssumeRoleWithWebIdentity` `STS` to exchange OIDC ID tokens with +the IAM role's temporary credentials. IRSA uses the `eks.amazon.aws/role-arn` +annotation to link the service account and the IAM role. + +**Create an IAM role and trust policy** + +First, create an IAM role appropriate permissions to access your S3 bucket: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:ListBucket", + "s3:DeleteObject" + ], + "Resource": [ + "arn:aws:s3:::${YOUR_BILLING_BUCKET}", + "arn:aws:s3:::${YOUR_BILLING_BUCKET}/*" + ] + } + ] +} +``` + +You must configure the IAM role trust policy with the exact match for each +provisioned control plane. An example of a trust policy for a single control +plane is below: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:oidc-provider/${YOUR_OIDC_PROVIDER}" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + ":aud": "sts.amazonaws.com", + ":sub": "system:serviceaccount:${YOUR_NAMESPACE}:vector" + } + } + } + ] +} +``` + +**Configure the EKS OIDC provider** + +Next, ensure your EKS cluster has an OIDC identity provider: + +```shell +eksctl utils associate-iam-oidc-provider --cluster ${YOUR_CLUSTER_NAME} --approve +``` + +**Apply the IAM role** + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the Billing component: + +```shell +--set "billing.enabled=true" +--set "billing.storage.provider=aws" +--set "billing.storage.aws.region=${YOUR_AWS_REGION}" +--set "billing.storage.aws.bucket=${YOUR_BILLING_BUCKET}" +--set "billing.storage.secretRef.name=" +--set controlPlanes.vector.serviceAccount.customAnnotations."eks\.amazonaws\.com/role-arn"="arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_BILLING_ROLE_NAME}" +``` + +:::important +You **must** set the `billing.storage.secretRef.name` to an empty string to +enable workload identity for the billing component +::: + +#### EKS pod identities + +Upbound also supports EKS Pod Identity configuration. EKS Pod Identities allow +you to create a pod identity association with your Kubernetes namespace, a +service account, and an IAM role, which allows the EKS control plane to +automatically handle the credential exchange. + +**Create an IAM role** + +First, create an IAM role appropriate permissions to access your S3 bucket: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:ListBucket" + ], + "Resource": [ + "arn:aws:s3:::${YOUR_BILLING_BUCKET}", + "arn:aws:s3:::${YOUR_BILLING_BUCKET}/*" + ] + } + ] +} +``` + +**Configure your Space with Helm** + +When you install or upgrade your Space with Helm, add the billing values: + +```shell +helm upgrade spaces spaces-helm-chart \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=aws" \ + --set "billing.storage.aws.region=${YOUR_AWS_REGION}" \ + --set "billing.storage.aws.bucket=${YOUR_BILLING_BUCKET}" \ + --set "billing.storage.secretRef.name=" +``` + +**Create a Pod Identity Association** + +After Upbound provisions your control plane, create a Pod Identity Association +with the `aws` CLI: + +```shell +aws eks create-pod-identity-association \ + --cluster-name ${YOUR_CLUSTER_NAME} \ + --namespace ${YOUR_CONTROL_PLANE_NAMESPACE} \ + --service-account vector \ + --role-arn arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_BILLING_ROLE_NAME} +``` + + + + + +Upbound supports workload-identity configurations in Azure with Azure's built-in +workload identity feature. + +First, enable the OIDC issuer and workload identity in your AKS cluster: + +```shell +az aks update --resource-group ${YOUR_RESOURCE_GROUP} --name ${YOUR_AKS_CLUSTER_NAME} --enable-oidc-issuer --enable-workload-identity +``` + +Next, find and store the OIDC issuer URL as an environment variable: + +```shell +export AKS_OIDC_ISSUER="$(az aks show --name ${YOUR_AKS_CLUSTER_NAME} --resource-group ${YOUR_RESOURCE_GROUP} --query "oidcIssuerProfile.issuerUrl" --output tsv)" +``` + +Create a new managed identity to associate with the billing component: + +```shell +az identity create --name billing-identity --resource-group ${YOUR_RESOURCE_GROUP} --location ${YOUR_LOCATION} +``` + +Retrieve the client ID and store it as an environment variable: + +```shell +export USER_ASSIGNED_CLIENT_ID="$(az identity show --name billing-identity --resource-group ${YOUR_RESOURCE_GROUP} --query clientId -otsv)" +``` + +Grant the managed identity you created to access your Azure Storage account: + +```shell +az role assignment create --role "Storage Blob Data Contributor" --assignee $USER_ASSIGNED_CLIENT_ID --scope /subscriptions/${YOUR_SUBSCRIPTION_ID}/resourceGroups/${YOUR_RESOURCE_GROUP}/providers/Microsoft.Storage/storageAccounts/${YOUR_STORAGE_ACCOUNT} +``` + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the billing component: + +```shell +--set "billing.enabled=true" +--set "billing.storage.provider=azure" +--set "billing.storage.azure.storageAccount=${SPACES_BILLING_STORAGE_ACCOUNT}" +--set "billing.storage.azure.container=${SPACES_BILLING_STORAGE_CONTAINER}" +--set "billing.storage.secretRef.name=" +--set controlPlanes.vector.serviceAccount.customAnnotations."azure\.workload\.identity/client-id"="${SPACES_BILLING_APP_ID}" +--set controlPlanes.vector.pod.customLabels."azure\.workload\.identity/use"="true" +``` + +Create a federated credential to establish trust between the managed identity +and your AKS OIDC provider: + +```shell +az identity federated-credential create \ + --name billing-federated-identity \ + --identity-name billing-identity \ + --resource-group ${YOUR_RESOURCE_GROUP} \ + --issuer ${AKS_OIDC_ISSUER} \ + --subject system:serviceaccount:${YOUR_CONTROL_PLANE_NAMESPACE}:vector +``` + + + + + +Upbound supports workload-identity configurations in GCP with IAM principal +identifiers or service account impersonation. + +#### IAM principal identifiers + +IAM principal identifiers allow you to grant permissions directly to +Kubernetes service accounts without additional annotation. Upbound recommends +this approach for ease-of-use and flexibility. + +First, enable Workload Identity Federation on your GKE cluster: + +```shell +gcloud container clusters update ${YOUR_CLUSTER_NAME} \ + --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ + --region=${YOUR_REGION} +``` + +Next, configure your Spaces installation with the Spaces Helm chart parameters: + +```shell +--set "billing.enabled=true" +--set "billing.storage.provider=gcp" +--set "billing.storage.gcp.bucket=${YOUR_BILLING_BUCKET}" +--set "billing.storage.secretRef.name=" +``` + +:::important +You **must** set the `billing.storage.secretRef.name` to an empty string to +enable workload identity for the billing component. +::: + +Grant the necessary permissions to your Kubernetes service account: + +```shell +gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ + --member="principalSet://iam.googleapis.com/projects/${YOUR_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${YOUR_PROJECT_ID}.svc.id.goog/attribute.kubernetes_namespace/${YOUR_CONTROL_PLANE_NAMESPACE}/attribute.kubernetes_service_account/vector" \ + --role="roles/storage.objectAdmin" +``` + +Enable uniform bucket-level access on your storage bucket: + +```shell +gcloud storage buckets update gs://${YOUR_BILLING_BUCKET} --uniform-bucket-level-access +``` + +#### Service account impersonation + +Service account impersonation allows you to link a Kubernetes service account to +a GCP service account. The Kubernetes service account assumes the permissions of +the GCP service account you specify. + +Enable workload id federation on your GKE cluster: + +```shell +gcloud container clusters update ${YOUR_CLUSTER_NAME} \ + --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ + --region=${YOUR_REGION} +``` + +Next, create a dedicated service account for your billing operations: + +```shell +gcloud iam service-accounts create billing-sa \ + --project=${YOUR_PROJECT_ID} +``` + +Grant storage permissions to the service account you created: + +```shell +gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ + --member="serviceAccount:billing-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" \ + --role="roles/storage.objectAdmin" +``` + +Link the Kubernetes service account to the GCP service account: + +```shell +gcloud iam service-accounts add-iam-policy-binding \ + billing-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com \ + --role="roles/iam.workloadIdentityUser" \ + --member="serviceAccount:${YOUR_PROJECT_ID}.svc.id.goog[${YOUR_CONTROL_PLANE_NAMESPACE}/vector]" +``` + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the billing component: + +```shell +--set "billing.enabled=true" +--set "billing.storage.provider=gcp" +--set "billing.storage.gcp.bucket=${YOUR_BILLING_BUCKET}" +--set "billing.storage.secretRef.name=" +--set controlPlanes.vector.serviceAccount.customAnnotations."iam\.gke\.io/gcp-service-account"="billing-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" +``` + + + +## Verify your configuration + +After you apply the configuration use `kubectl` to verify the service account +has the correct annotation: + +```shell +kubectl get serviceaccount vector -n ${YOUR_CONTROL_PLANE_NAMESPACE} -o yaml +``` + +Verify the `vector` pod is running: + +```shell +kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep vector +``` + +## Restart workload + + + +You must manually restart a workload's pod when you add the +`eks.amazonaws.com/role-arn key` annotation to the running pod's service +account. + +This restart enables the EKS pod identity webhook to inject the necessary +environment for using IRSA. + + + + + +You must manually restart a workload's pod when you add the workload identity annotations to the running pod's service account. + +This restart enables the workload identity webhook to inject the necessary +environment for using Azure workload identity. + + + + + +GCP workload identity doesn't require pod restarts after configuration changes. +If you do need to restart the workload, use the `kubectl` command to force the +component restart: + + + +```shell +kubectl rollout restart deployment vector +``` + + +## Use cases + + +Using workload identity authentication for billing eliminates the need for static +credentials in your cluster as well as the overhead of credential rotation. +These benefits are helpful in: + +* Resource usage tracking across teams/projects +* Cost allocation for multi-tenant environments +* Financial auditing requirements +* Capacity billing and resource optimization +* Automated billing workflows + +## Next steps + +Now that you have workload identity configured for the billing component, visit +the [Billing guide][billing-guide] for more information. + +Other workload identity guides are: +* [Backup and restore][backuprestore] +* [Shared Secrets][secrets] + +[billing-guide]: /spaces/howtos/self-hosted/billing +[backuprestore]: /spaces/howtos/self-hosted/workload-id/backup-restore-config +[secrets]: /spaces/howtos/self-hosted/workload-id/eso-config diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/workload-id/eso-config.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/workload-id/eso-config.md new file mode 100644 index 000000000..c1418c171 --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/workload-id/eso-config.md @@ -0,0 +1,503 @@ +--- +title: Shared Secrets Workload ID +weight: 1 +description: Configure workload identity for Spaces Shared Secrets +--- +import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; + + + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary AWS credentials to your Kubernetes pod based on +a service account. Assigning IAM roles and service accounts allows the pod to +assume the IAM role dynamically and much more securely than static credentials. + +This guide walks you through creating an IAM trust role policy and applying it to your EKS +cluster for secret sharing with Kubernetes. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary Azure credentials to your Kubernetes pod based on +a service account. Assigning managed identities and service accounts allows the pod to +authenticate with Azure resources dynamically and much more securely than static credentials. + +This guide walks you through creating a managed identity and federated credential for your AKS +cluster for shared secrets in your Space cluster. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary GCP credentials to your Kubernetes pod based on +a service account. Assigning IAM roles and service accounts allows the pod to +access cloud resources dynamically and much more securely than static +credentials. + +This guide walks you through configuring workload identity for your GKE +cluster's Shared Secrets component. + + + +## Prerequisites + + +To set up a workload-identity, you'll need: + + +- A self-hosted Space cluster +- Administrator access in your cloud provider +- Helm and `kubectl` + + +## About the Shared Secrets component + + + + +The External Secrets Operator (ESO) runs in each control plane's host namespace as `external-secrets-controller`. It needs to access +your external secrets management service like AWS Secrets Manager. + +To configure your shared secrets workflow controller, you must: + +* Annotate the Kubernetes service account to associate it with a cloud-side + principal (such as an IAM role, service account, or enterprise application). The workload must then + use this service account. +* Label the workload (pod) to allow the injection of a temporary credential set, + enabling authentication. + + + + + +The External Secrets Operator (ESO) component runs in each control plane's host +namespace as `external-secrets-controller`. It synchronizes secrets from +external APIs into Kubernetes secrets. Shared secrets allow you to manage +credentials outside your Kubernetes cluster while making them available to your +application + + + + + +The External Secrets Operator (ESO) component runs in each control plane's host +namespace as `external-secrets-controller`. It synchronizes secrets from +external APIs into Kubernetes secrets. Shared secrets allow you to manage +credentials outside your Kubernetes cluster while making them available to your +application + + + +## Configuration + + + +Upbound supports workload-identity configurations in AWS with IAM Roles for +Service Accounts or EKS pod identity association. + +#### IAM Roles for Service Accounts (IRSA) + +With IRSA, you can associate a Kubernetes service account in an EKS cluster with +an AWS IAM role. Upbound authenticates workloads with that service account as +the IAM role using temporary credentials instead of static role credentials. +IRSA relies on AWS `AssumeRoleWithWebIdentity` `STS` to exchange OIDC ID tokens with +the IAM role's temporary credentials. IRSA uses the `eks.amazon.aws/role-arn` +annotation to link the service account and the IAM role. + +**Create an IAM role and trust policy** + +First, create an IAM role with appropriate permissions to access AWS Secrets Manager: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "secretsmanager:GetSecretValue", + "secretsmanager:DescribeSecret", + "ssm:GetParameter" + ], + "Resource": [ + "arn:aws:secretsmanager:${YOUR_REGION}:${YOUR_AWS_ACCOUNT_ID}:secret:${YOUR_SECRET_PREFIX}*", + "arn:aws:ssm:${YOUR_REGION}:${YOUR_AWS_ACCOUNT_ID}:parameter/${YOUR_PARAMETER_PREFIX}*" + ] + } + ] +} +``` + +You must configure the IAM role trust policy with the exact match for each +provisioned control plane. An example of a trust policy for a single control +plane is below: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:oidc-provider/${YOUR_OIDC_PROVIDER}" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + ":aud": "sts.amazonaws.com" + }, + "StringLike": { + ":sub": "system:serviceaccount:*:external-secrets-controller" + } + } + } + ] +} +``` + +**Configure the EKS OIDC provider** + +Next, ensure your EKS cluster has an OIDC identity provider: + +```shell +eksctl utils associate-iam-oidc-provider --cluster ${YOUR_CLUSTER_NAME} --approve +``` + +**Apply the IAM role** + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the shared secrets component: + +```yaml +--set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."eks\.amazonaws\.com/role-arn"="arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_ESO_ROLE_NAME}" +``` + +This command allows the shared secrets component to authenticate with your +dedicated IAM role in your EKS cluster environment. + +#### EKS pod identities + +Upbound also supports EKS Pod Identity configuration. EKS Pod Identities allow +you to create a pod identity association with your Kubernetes namespace, a +service account, and an IAM role, which allows the EKS control plane to +automatically handle the credential exchange. + +**Create an IAM role** + +First, create an IAM role with appropriate permissions to access AWS Secrets Manager: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "secretsmanager:GetSecretValue", + "secretsmanager:DescribeSecret", + "ssm:GetParameter" + ], + "Resource": [ + "arn:aws:secretsmanager:${YOUR_AWS_REGION}:${YOUR_AWS_ACCOUNT_ID}:secret:${YOUR_SECRET_PREFIX}*", + "arn:aws:ssm:${YOUR_AWS_REGION}:${YOUR_AWS_ACCOUNT_ID}:parameter/${YOUR_PARAMETER_PREFIX}*" + ] + } + ] +} +``` + +**Configure your Space with Helm** + +When you install or upgrade your Space with Helm, add the shared secrets value: + +```shell +helm upgrade spaces spaces-helm-chart \ + --set "sharedSecrets.enabled=true" +``` + +**Create a Pod Identity Association** + +After Upbound provisions your control plane, create a Pod Identity Association +with the `aws` CLI: + +```shell +aws eks create-pod-identity-association \ + --cluster-name ${YOUR_CLUSTER_NAME} \ + --namespace ${YOUR_CONTROL_PLANE_NAMESPACE} \ + --service-account external-secrets-controller \ + --role-arn arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_ROLE_NAME} +``` + + + + + +Upbound supports workload-identity configurations in Azure with Azure's built-in +workload identity feature. + +First, enable the OIDC issuer and workload identity in your AKS cluster: + +```shell +az aks update --resource-group ${YOUR_RESOURCE_GROUP} --name ${YOUR_AKS_CLUSTER_NAME} --enable-oidc-issuer --enable-workload-identity +``` + +Next, find and store the OIDC issuer URL as an environment variable: + +```shell +export AKS_OIDC_ISSUER="$(az aks show --name ${YOUR_AKS_CLUSTER_NAME} --resource-group ${YOUR_RESOURCE_GROUP} --query "oidcIssuerProfile.issuerUrl" --output tsv)" +``` + +Create a new managed identity to associate with the shared secrets component: + +```shell +az identity create --name secrets-identity --resource-group ${YOUR_RESOURCE_GROUP} --location ${YOUR_LOCATION} +``` + +Retrieve the client ID and store it as an environment variable: + +```shell +export USER_ASSIGNED_CLIENT_ID="$(az identity show --name secrets-identity --resource-group ${YOUR_RESOURCE_GROUP} --query clientId -otsv)" +``` + +Grant the managed identity you created to access your Azure Storage account: + +```shell +az keyvault set-policy --name ${YOUR_KEY_VAULT_NAME} \ + --resource-group ${YOUR_RESOURCE_GROUP} \ + --object-id $(az identity show --name secrets-identity --resource-group ${YOUR_RESOURCE_GROUP} --query principalId -otsv) \ + --secret-permissions get list +``` + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the shared secrets component: + +```shell +--set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."azure\.workload\.identity/client-id"="${USER_ASSIGNED_CLIENT_ID}" +--set controlPlanes.sharedSecrets.pod.customLabels."azure\.workload\.identity/use"="true" +``` + +Next, create a federated credential to establish trust between the managed identity +and your AKS OIDC provider: + +```shell +az identity federated-credential create \ + --name secrets-federated-identity \ + --identity-name secrets-identity \ + --resource-group ${YOUR_RESOURCE_GROUP} \ + --issuer ${AKS_OIDC_ISSUER} \ + --subject system:serviceaccount:${YOUR_CONTROL_PLANE_NAMESPACE}:external-secrets-controller +``` + + + + + +Upbound supports workload-identity configurations in GCP with IAM principal +identifiers or service account impersonation. + +#### IAM principal identifiers + +IAM principal identifiers allow you to grant permissions directly to +Kubernetes service accounts without additional annotation. Upbound recommends +this approach for ease-of-use and flexibility. + +First, enable Workload Identity Federation on your GKE cluster: + +```shell +gcloud container clusters update ${YOUR_CLUSTER_NAME} \ + --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ + --region=${YOUR_REGION} +``` + +Next, grant the necessary permissions to your Kubernetes service account: + +```shell +gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ + --member="principalSet://iam.googleapis.com/projects/${YOUR_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${YOUR_PROJECT_ID}.svc.id.goog/attribute.kubernetes_namespace/${YOUR_CONTROL_PLANE_NAMESPACE}/attribute.kubernetes_service_account/external-secrets-controller" \ + --role="roles/secretmanager.secretAccessor" +``` + +#### Service account impersonation + +Service account impersonation allows you to link a Kubernetes service account to +a GCP service account. The Kubernetes service account assumes the permissions of +the GCP service account you specify. + +Enable workload id federation on your GKE cluster: + +```shell +gcloud container clusters update ${YOUR_CLUSTER_NAME} \ + --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ + --region=${YOUR_REGION} +``` + +Next, create a dedicated service account for your secrets operations: + +```shell +gcloud iam service-accounts create secrets-sa \ + --project=${YOUR_PROJECT_ID} +``` + +Grant secret access permissions to the service account you created: + +```shell +gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ + --member="serviceAccount:secrets-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" \ + --role="roles/secretmanager.secretAccessor" +``` + +Link the Kubernetes service account to the GCP service account: + +```shell +gcloud iam service-accounts add-iam-policy-binding \ + secrets-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com \ + --role="roles/iam.workloadIdentityUser" \ + --member="serviceAccount:${YOUR_PROJECT_ID}.svc.id.goog[${YOUR_CONTROL_PLANE_NAMESPACE}/external-secrets-controller]" +``` + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the shared secrets component: + +```shell +--set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."iam\.gke\.io/gcp-service-account"="secrets-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" +``` + + + +## Verify your configuration + +After you apply the configuration use `kubectl` to verify the service account +has the correct annotation: + +```shell +kubectl get serviceaccount external-secrets-controller -n ${YOUR_CONTROL_PLANE_NAMESPACE} -o yaml +``` + + + +Verify the `external-secrets` pod is running correctly: + +```shell +kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep external-secrets +``` + + + + + +Verify the External Secrets Operator pod is running correctly: + +```shell +kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep external-secrets +``` + + + + + +Verify the `external-secrets` pod is running correctly: + +```shell +kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep external-secrets +``` + + + +## Restart workload + + + +You must manually restart a workload's pod when you add the +`eks.amazonaws.com/role-arn key` annotation to the running pod's service +account. + +This restart enables the EKS pod identity webhook to inject the necessary +environment for using IRSA. + + + + + +You must manually restart a workload's pod when you add the workload identity annotations to the running pod's service account. + +This restart enables the workload identity webhook to inject the necessary +environment for using Azure workload identity. + + + + + +GCP workload identity doesn't require pod restarts after configuration changes. +If you do need to restart the workload, use the `kubectl` command to force the +component restart: + + + +```shell +kubectl rollout restart deployment external-secrets +``` + +## Use cases + + + + +Shared secrets with workload identity eliminates the need for static credentials +in your cluster. These benefits are particularly helpful in: + +* Secure application credentials management +* Database connection string storage +* API token management +* Compliance with secret rotation security standards +* Multi-environment configuration with centralized secret management + + + + + +Using workload identity authentication for shared secrets eliminates the need for static +credentials in your cluster as well as the overhead of credential rotation. +These benefits are particularly helpful in: + +* Secure application credentials management +* Database connection string storage +* API token management +* Compliance with secret rotation security standards + + + + + +Configuring the external secrets operator with workload identity eliminates the need for +static credentials in your cluster and the overhead of credential rotation. +These benefits are particularly helpful in: + +* Secure application credentials management +* Database connection string storage +* API token management +* Compliance with secret rotation security standards + + + +## Next steps + +Now that you have workload identity configured for the shared secrets component, visit +the [Shared Secrets][eso-guide] guide for more information. + +Other workload identity guides are: +* [Backup and restore][backuprestore] +* [Billing][billing] + +[eso-guide]: /spaces/howtos/secrets-management +[backuprestore]: /spaces/howtos/self-hosted/workload-id/backup-restore-config +[billing]: /spaces/howtos/self-hosted/workload-id/billing-config diff --git a/spaces_versioned_docs/version-v1.13/howtos/simulations.md b/spaces_versioned_docs/version-v1.13/howtos/simulations.md new file mode 100644 index 000000000..26cb0e657 --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/howtos/simulations.md @@ -0,0 +1,110 @@ +--- +title: Simulate changes to your Control Plane Projects +sidebar_position: 100 +description: Use the Up CLI to mock operations before deploying to your environments. +--- + +:::info API Version Information +This guide covers Simulations, available in v1.10+ (GA since v1.13). version-specific availability and features, see the . + +For API specifications on the Simulations CRD, see the [Spaces API Reference](../../../reference/apis/spaces-api/). +::: + +:::important +The Simulations feature is in private preview. more information, [reach out to Upbound][reach-out-to-upbound]. +::: + +Control plane simulations allow you to preview changes to your resources before +applying them to your control planes. Like a plan or dry-run operation, +simulations expose the impact of updates to compositions or claims without +changing your actual resources. + +A control plane simulation creates a temporary copy of your control plane and +returns a preview of the desired changes. The simulation change plan helps you +reduce the risk of unexpected behavior based on your changes. + +## Simulation benefits + +Control planes are dynamic systems that automatically reconcile resources to +match your desired state. Simulations provide visibility into this +reconciliation process by showing: + + +* New resources to create +* Existing resources to change +* Existing resources to delete +* How configuration changes propagate through the system + +These insights are crucial when planning complex changes or upgrading Crossplane +packages. + +## Requirements + +Simulations are available to select customers on Upbound Cloud with Team +Tier or higher. more information, [reach out to Upbound][reach-out-to-upbound-1]. + +## How to simulate your control planes + +Before you start a simulation, build your project and use the `up +project run` command to run your control plane. + +Use the `up project simulate` command with your control plane name to start the +simulation: + +```ini {copy-lines="all"} +up project simulate --complete-after=60s --terminate-on-finish +``` + +The `complete-after` flag determines how long to run the simulation before it completes and calculates the results. Depending on the change, a simulation may not complete within your defined interval leaving unaffected resources as `unchanged`. + +The `terminate-on-finish` flag terminates the simulation after the time +you set - deleting the control plane that ran the simulation. + +At the end of your simulation, your CLI returns: +* A summary of the resources created, modified, or deleted +* Diffs for each resource affected + +## View your simulation in the Upbound Console +You can also view your simulation results in the Upbound Console: + +1. Navigate to your base control plane in the Upbound Console +2. Select the "Simulations" tab in the menu +3. Select a simulation object for a change list of all + resources affected. + +The Console provides visual indications of changes: + +- Created Resources: Marked with green +- Modified Resources: Marked with yellow +- Deleted Resources: Marked with red +- Unchanged Resources: Displayed in gray + +![Upbound Console Simulation](/img/simulations.png) + +## Considerations + +Simulations is a **private preview** feature. + +Be aware of the following limitations: + +- Simulations can't predict the exact behavior of external systems due to the + complexity and non-deterministic reconciliation pattern in Crossplane. + +- The only completion criteria for a simulation is time. Your simulation may not + receive a conclusive result within that interval. Upbound recommends the + default `60s` value. + +- Providers don't run in simulations. Simulations can't compose resources that + rely on the status of Managed Resources. + + +The Upbound team is working to improve these limitations. Your feedback is always appreciated. + +## Next steps + +For more information, follow the [tutorial][tutorial] on Simulations. + + +[tutorial]: /manuals/cli/howtos/simulations +[reach-out-to-upbound]: https://www.upbound.io/contact-us +[reach-out-to-upbound-1]: https://www.upbound.io/contact-us diff --git a/spaces_versioned_docs/version-v1.13/overview/_category_.json b/spaces_versioned_docs/version-v1.13/overview/_category_.json new file mode 100644 index 000000000..54bb16430 --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/overview/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Overview", + "position": 0 +} diff --git a/spaces_versioned_docs/version-v1.13/overview/index.md b/spaces_versioned_docs/version-v1.13/overview/index.md new file mode 100644 index 000000000..7b79f6e44 --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/overview/index.md @@ -0,0 +1,14 @@ +--- +title: Spaces Overview +sidebar_position: 0 +--- + +# Upbound Spaces + +Welcome to the Upbound Spaces documentation. This section contains comprehensive documentation for Spaces API and Spaces operations across all supported versions (v1.9 through v1.15). + +## Get Started + +- **[Concepts](/spaces/concepts/control-planes)** - Core concepts for Spaces +- **[How-To Guides](/spaces/howtos/auto-upgrade)** - Step-by-step guides for operating Spaces +- **[API Reference](/spaces/reference/)** - API specifications and resources diff --git a/spaces_versioned_docs/version-v1.13/reference/_category_.json b/spaces_versioned_docs/version-v1.13/reference/_category_.json new file mode 100644 index 000000000..4a6a139c4 --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/reference/_category_.json @@ -0,0 +1,5 @@ +{ + "label": "Spaces API", + "position": 1, + "collapsed": true +} diff --git a/spaces_versioned_docs/version-v1.13/reference/index.md b/spaces_versioned_docs/version-v1.13/reference/index.md new file mode 100644 index 000000000..5e68b0768 --- /dev/null +++ b/spaces_versioned_docs/version-v1.13/reference/index.md @@ -0,0 +1,72 @@ +--- +title: Spaces API Reference +description: Documentation for the Spaces API resources (v1.15 - Latest) +sidebar_position: 1 +--- +import CrdDocViewer from '@site/src/components/CrdViewer'; + + +This page documents the Custom Resource Definitions (CRDs) for the Spaces API. + + +## Control Planes +### Control Planes + + +## Observability +### Shared Telemetry Configs + + +## `pkg` +### Controller Revisions + + +### Controller Runtime Configs + + +### Controllers + + +### Remote Configuration Revisions + + +### Remote Configurations + + +## Policy +### Shared Upbound Policies + + +## References +### Referenced Objects + + +## Scheduling +### Environments + + +## Secrets +### Shared External Secrets + + +### Shared Secret Stores + + +## Simulations + + +## Spaces Backups +### Backups + + +### Backup Schedules + + +### Shared Backup Configs + + +### Shared Backups + + +### Shared Backup Schedules + diff --git a/spaces_versioned_docs/version-v1.14/concepts/_category_.json b/spaces_versioned_docs/version-v1.14/concepts/_category_.json new file mode 100644 index 000000000..4b8667e29 --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/concepts/_category_.json @@ -0,0 +1,7 @@ +{ + "label": "Concepts", + "position": 2, + "collapsed": true +} + + diff --git a/spaces_versioned_docs/version-v1.14/concepts/control-planes.md b/spaces_versioned_docs/version-v1.14/concepts/control-planes.md new file mode 100644 index 000000000..7066343de --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/concepts/control-planes.md @@ -0,0 +1,227 @@ +--- +title: Control Planes +weight: 1 +description: An overview of control planes in Upbound +--- + + +Control planes in Upbound are fully isolated Crossplane control plane instances that Upbound manages for you. This means: + +- the underlying lifecycle of infrastructure (compute, memory, and storage) required to power your instance. +- scaling of the infrastructure. +- the maintenance of the core Crossplane components that make up a control plane. + +This lets users focus on building their APIs and operating their control planes, while Upbound handles the rest. Each control plane has its own dedicated API server connecting users to their control plane. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). + +For detailed information about Control Plane API specifications and CRD fields, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +## Control plane architecture + +![Managed Control Plane Architecture](/img/mcp.png) + +Along with underlying infrastructure, Upbound manages the Crossplane system components. You don't need to manage the Crossplane API server or core resource controllers because Upbound manages your control plane lifecycle from creation to deletion. + +### Crossplane API + +Each control plane offers a unified endpoint. You interact with your control plane through Kubernetes and Crossplane API calls. Each control plane runs a Kubernetes API server to handle API requests. You can make API calls in the following ways: + +- Direct calls: HTTP/gRPC +- Indirect calls: the up CLI, Kubernetes clients such as kubectl, or the Upbound Console. + +Like in Kubernetes, the API server is the hub for all communication for the control plane. All internal components such as system processes and provider controllers act as clients of the API server. + +Your API requests tell Crossplane your desired state for the resources your control plane manages. Crossplane attempts to constantly maintain that state. Crossplane lets you configure objects in the API either imperatively or declaratively. + +### Crossplane versions and features + +Upbound automatically upgrades Crossplane system components on control planes to new Crossplane versions for updated features and improvements in the open source project. With [automatic upgrades][automatic-upgrades], you choose the cadence that Upbound automatically upgrades the system components in your control plane. You can also choose to manually upgrade your control plane to a different Crossplane version. + +For detailed information on versions and upgrades, refer to the [release notes][release-notes] and the automatic upgrade documentation. If you don't enroll a control plane in a release channel, Upbound doesn't apply automatic upgrades. + +Features considered "alpha" in Crossplane are by default not supported in a control plane unless otherwise specified. + +### Hosting environments + +Every control plane in Upbound belongs to a [control plane group][control-plane-group]. Control plane groups are a logical grouping of one or more control planes with shared objects (such as secrets or backup configuration). Every group resides in a [Space][space] in Upbound, which are hosting environments for control planes. + +Think of a Space as being conceptually the same as an AWS, Azure, or GCP region. Regardless of the Space type you run a control plane in, the core experience is identical. + +## Management + +### Create a control plane + +You can create a new control plane from the Upbound Console, [up CLI][up-cli], or with Kubernetes clients such as `kubectl`. + + + + + +To use the CLI, run the following: + +```shell +up ctp create +``` + +To learn more about control plane-related commands in `up`, go to the [CLI reference][cli-reference] documentation. + + + +You can create and manage control planes declaratively in Upbound. Before you +begin, ensure you're logged into Upbound and set the correct context: + +```bash +up login +# Example: acmeco/upbound-gcp-us-west-1/default +up ctx ${yourOrganization}/${yourSpace}/${yourGroup} +```` + +```yaml +#controlplane-a.yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: controlplane-a +spec: + crossplane: + autoUpgrade: + channel: Rapid +``` + +```bash +kubectl apply -f controlplane-a.yaml +``` + + + + + +### Connect directly to your control plane + +Each control plane offers a unified endpoint. You interact with your control plane through Kubernetes and Crossplane API calls. Each control plane runs a Kubernetes API server to handle API requests. + +You can connect to a control plane's API server directly via the up CLI. Use the [`up ctx`][up-ctx] command to set your kubeconfig's current context to a control plane: + +```shell +# Example: acmeco/upbound-gcp-us-west-1/default/ctp1 +up ctx ${yourOrganization}/${yourSpace}/${yourGroup}/${yourControlPlane} +``` + +To disconnect from your control plane and revert your kubeconfig's current context to the previous entry, run the following: + +```shell +up ctx .. +``` + +You can also generate a `kubeconfig` file for a control plane with [`up ctx -f`][up-ctx-f]. + +```shell +up ctx ${yourOrganization}/${yourSpace}/${yourGroup}/${yourControlPlane} -f - > ctp-kubeconfig.yaml +``` + +:::tip +To learn more about how to use `up ctx` to navigate different contexts in Upbound, read the [CLI documentation][cli-documentation]. +::: + +## Configuration + +When you create a new control plane, Upbound provides you with a fully isolated instance of Crossplane. Configure your control plane by installing packages that extend its capabilities, like to create and manage the lifecycle of new types of infrastructure resources. + +You're encourage to install any available Crossplane package type (Providers, Configurations, Functions) available in the [Upbound Marketplace][upbound-marketplace] on your control planes. + +### Install packages + +Below are a couple ways to install Crossplane packages on your control plane. + + + + + + +Use the `up` CLI to install Crossplane packages from the [Upbound Marketplace][upbound-marketplace-1] on your control planes. Connect directly to your control plane via `up ctx`. Then, to install a provider: + +```shell +up ctp provider install xpkg.upbound.io/upbound/provider-family-aws +``` + +To install a Configuration: + +```shell +up ctp configuration install xpkg.upbound.io/upbound/platform-ref-aws +``` + +To install a Function: + +```shell +up ctp function install xpkg.upbound.io/crossplane-contrib/function-kcl +``` + + +You can use kubectl to directly apply any Crossplane manifest. Below is an example for installing a Crossplane provider: + +```yaml +cat < + + + +For production-grade scenarios, it's recommended you configure your control plane declaratively via Git plus a Continuous Delivery (CD) Engine such as Argo. guidance on this topic, read [GitOps with control planes][gitops-with-control-planes]. + + + + + + +### Configure Crossplane ProviderConfigs + +#### ProviderConfigs with OpenID Connect + +Use OpenID Connect (`OIDC`) to authenticate to Upbound control planes without credentials. OIDC lets your control plane exchange short-lived tokens directly with your cloud provider. Read how to [connect control planes to external services][connect-control-planes-to-external-services] to learn more. + +#### Generic ProviderConfigs + +The Upbound Console doesn't allow direct editing of ProviderConfigs that don't support `Upbound` authentication. To edit these ProviderConfigs on your control plane, connect to the control plane directly by following the instructions in the previous section and using `kubectl`. + +### Configure secrets + +Upbound gives users the ability to configure the synchronization of secrets from external stores into control planes. Configure this capability at the group-level, explained in the [Spaces documentation][spaces-documentation]. + +### Configure backups + +Upbound gives users the ability to configure backup schedules, take impromptu backups, and conduct self-service restore operations. Configure this capability at the group-level, explained in the [Spaces documentation][spaces-documentation-1]. + +### Configure telemetry + + +Upbound gives users the ability to configure the collection of telemetry (logs, metrics, and traces) in their control planes. Using Upbound's built-in [OTEL][otel] support, you can stream this data out to your preferred observability solution. Configure this capability at the group-level, explained in the [Spaces documentation][spaces-documentation-2]. + + + +[automatic-upgrades]: /spaces/howtos/auto-upgrade +[release-notes]: https://github.com/upbound/universal-crossplane/releases +[control-plane-group]: /spaces/concepts/groups +[space]: /spaces/overview +[up-cli]: /reference/cli-reference +[cli-reference]: /reference/cli-reference +[up-ctx]: /reference/cli-reference +[up-ctx-f]: /reference/cli-reference +[cli-documentation]: /manuals/cli/concepts/contexts +[upbound-marketplace]: https://marketplace.upbound.io +[upbound-marketplace-1]: https://marketplace.upbound.io +[gitops-with-control-planes]: /spaces/howtos/cloud-spaces/gitops +[connect-control-planes-to-external-services]: /manuals/platform/howtos/oidc +[spaces-documentation]: /spaces/howtos/secrets-management +[spaces-documentation-1]: /spaces/howtos/backup-and-restore +[otel]: https://otel.com +[spaces-documentation-2]: /spaces/howtos/observability diff --git a/spaces_versioned_docs/version-v1.14/concepts/deployment-modes.md b/spaces_versioned_docs/version-v1.14/concepts/deployment-modes.md new file mode 100644 index 000000000..f5e718f88 --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/concepts/deployment-modes.md @@ -0,0 +1,53 @@ +--- +title: Deployment Modes +sidebar_position: 10 +description: An overview of deployment modes for Spaces +--- + +Upbound Spaces can be deployed and used in a variety of modes: + +- **Cloud Spaces:** Multi-tenant Upbound-hosted, Upbound-managed Space environment. Cloud Spaces provide a typical SaaS experience. +- **[Dedicated Spaces][dedicated-spaces]:** Single-tenant Upbound-hosted, Upbound-managed Space environment. Dedicated Spaces provide a SaaS experience, with additional isolation guarantees that your workloads run in a fully isolated context. +- **[Managed Spaces][managed-spaces]:** Single-tenant customer-hosted, Upbound-managed Space environment. Managed Spaces provide a SaaS-like experience, with additional guarantees of all hosting infrastructure being served from your own cloud account. +- **[Self-Hosted Spaces][self-hosted-spaces]:** Single-tenant customer-hosted, customer-managed Space environment. This is a fully self-hosted, self-managed software experience for using Spaces. Upbound delivers the Spaces software and you run it yourself. + +The Upbound platform uses a federated model to connect each Space back to a +central service called the [Upbound Console][console], which is deployed and +managed by Upbound. + +By default, customers have access to a set of Cloud Spaces. + +## Supported clouds + +You can use host Upbound Spaces on Amazon Web Services (AWS), Microsoft Azure, +and Google Cloud Platform (GCP). Regardless of the hosting platform, you can use +Spaces to deploy control planes that manage the lifecycle of your resources. + +## Supported regions + +This table lists the cloud service provider regions supported by Upbound. + +### GCP + +| Region | Location | +| --- | --- | +| `us-west-1` | Western US (Oregon) +| `us-central-1` | Central US (Iowa) +| `eu-west-3` | Eastern Europe (Frankfurt) + +### AWS + +| Region | Location | +| --- | --- | +| `us-east-1` | Eastern US (Northern Virginia) + +### Azure + +| Region | Location | +| --- | --- | +| `us-east-1` | Eastern US (Iowa) + +[dedicated-spaces]: /spaces/howtos/cloud-spaces/dedicated-spaces-deployment +[managed-spaces]: /spaces/howtos/self-hosted/managed-spaces-deployment +[self-hosted-spaces]: /spaces/howtos/self-hosted/self-hosted-spaces-deployment +[console]: /manuals/console/upbound-console/ diff --git a/spaces_versioned_docs/version-v1.14/concepts/groups.md b/spaces_versioned_docs/version-v1.14/concepts/groups.md new file mode 100644 index 000000000..d2ccacdb3 --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/concepts/groups.md @@ -0,0 +1,115 @@ +--- +title: Control Plane Groups +sidebar_position: 2 +description: An introduction to the Control Plane Groups in Upbound +plan: "enterprise" +--- + + + +In Upbound, Control Plane Groups (or just, 'groups') are a logical grouping of one or more control planes with shared resources like [secrets][secrets] or [backups][backups]. It's a mechanism for isolating these groups of resources within a single [Space][space]. All role-based access control in Upbound happens at the control plane group-level. + +## When to use multiple groups + +You should use groups in environments where there's a need to have Crossplane manage infrastructure across multiple cloud accounts or projects. For users who only need to deploy and manage resources in a couple cloud accounts, you shouldn't need to think about groups at all. + +Groups are a way to divide access in Upbound between multiple teams. Think of a group as being analogous to a Kubernetes _namespace_. + +## The 'default' group + +Every Cloud Space in Upbound has a group named _default_ available. + +## Working with groups + +### View groups + +You can list groups in a Space using: + +```shell +up group list +``` + +If you're operating in a single-tenant Space and have access to the underlying cluster, you can list namespaces that have the group label: + +```shell +kubectl get namespaces -l spaces.upbound.io/group=true +``` + +### Set the group for a request + +Several commands in _up_ have a group context. To set the group for a request, use the `--group` flag: + +```shell +up ctp list --group=team1 +``` +```shell +up ctp create new-ctp --group=team2 +``` + +### Set the group preference + +The _up_ CLI operates upon a single [Upbound context][upbound-context]. Whatever context gets set is then used as the preference for other commands. An Upbound context is capable of pointing at a variety of altitudes: + +1. A Space in Upbound +2. A group within a Space +3. a control plane within a group + +To set the group preference, use `up ctx` to choose a group as your preferred Upbound context. For example: + +```shell +# This sets the context for the up CLI to the default group in an Upbound-managed Cloud Space (gcp-us-west-1) for an organization called 'acmeco' +up ctx acmeco/upbound-gcp-us-west-1/default/ +``` + +### Create a group + +To create a group, login to Upbound and set your context to your desired Space: + +```shell +up login +up ctx '/' +# Example: up ctx acmeco/upbound-gcp-us-west-1 +``` + + +Create a group: + +```shell +up group create my-new-group +``` + +### Delete a group + +To delete a group, login to Upbound and set your context to your desired Space: + +```shell +up login +up ctx '/' +# Example: up ctx acmeco/upbound-gcp-us-west-1 +``` + +Delete a group: + +```shell +up group delete my-new-group +``` + +### Protected groups + +Once a control plane gets created in a group, Upbound enforces a protection policy on the group. Upbound prevents accidental deletion of the group. To delete a group that has control planes in it, you should first delete all control planes in the group. + +## Groups in the context of single-tenant Spaces + +Upbound offers a variety of deployment models to use the product. If you deploy your own single-tenant Upbound Space (whether connected or disconnected), you're self-hosting Upbound software in a Kubernetes cluster. In these environments, a control plane group maps to a corresponding namespace in the cluster which hosts the Space. + +Most Kubernetes clusters come with some set of predefined namespaces. Because a group maps to a corresponding Kubernetes namespace, whenever a group gets created, there too must be a Kubernetes namespace accordingly. When the Spaces software is newly installed, no groups exist. You _can_ elevate a Kubernetes namespace to become a group by doing the following: + +1. Creating a group with the same name as a preexisting Kubernetes namespace +2. Creating a control plane in a preexisting Kubernetes namespace +3. Labeling a Kubernetes namespace with the label `spaces.upbound.io/group=true` + + +[secrets]: /spaces/howtos/secrets-management +[backups]: /spaces/howtos/self-hosted/workload-id/backup-restore-config/ +[space]: /spaces/overview +[upbound-context]: /manuals/cli/concepts/contexts diff --git a/spaces_versioned_docs/version-v1.14/howtos/_category_.json b/spaces_versioned_docs/version-v1.14/howtos/_category_.json new file mode 100644 index 000000000..d3a8547aa --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/_category_.json @@ -0,0 +1,7 @@ +{ + "label": "How-tos", + "position": 3, + "collapsed": true +} + + diff --git a/spaces_versioned_docs/version-v1.14/howtos/api-connector.md b/spaces_versioned_docs/version-v1.14/howtos/api-connector.md new file mode 100644 index 000000000..a14468f52 --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/api-connector.md @@ -0,0 +1,413 @@ +--- +title: API Connector +weight: 90 +description: Connect Kubernetes clusters to remote Crossplane control planes for resource synchronization +aliases: + - /api-connector + - /concepts/api-connector +--- +:::info API Version Information +This guide covers API Connector, a preview feature available in all supported versions (v1.9-v1.14+). + +For related API specifications and available resources, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +:::warning +API Connector is currently in **Preview**. The feature is under active +development and subject to breaking changes. Use for testing and evaluation +purposes only. +::: + +API Connector enables seamless integration between Kubernetes application +clusters consuming APIs and remote Crossplane control planes providing and +reconciling APIs. + +You can use the API Connector to decouple where Crossplane is running (for +example in an Upbound control plane), and where APIs are consumed +(for example in an existing Kubernetes cluster). This gives you flexibility and +consistency in your control plane operations. + + + +Unlike the [Control Plane Connector](ctp-connector.md) which offers only +coarse-grained connectivity between app clusters and a control plane, API +connector offers fine-grained configuration of which APIs get offered along with +multi-cluster connectivity. + +## Architecture overview + +![API Connector Architecture](/img/api-connector.png) + +API Connector uses a **provider-consumer** model: + +- **Provider control plane**: The Upbound control plane that provides APIs and manages infrastructure. +- **Consumer cluster**: Any Kubernetes cluster where its users wants to use APIs provided by the provider control plane, without having to run Crossplane. API connector gets installed in the consumer cluster, and bidirectionally syncs API objects to the provider. + +### Key components + +**Custom Resource Definitions (CRDs)**: + + +- `ClusterConnection`: Establishes a connection from the consumer to the provider cluster. Pulls bindable CRD APIs from the provider into the consumer cluster for use. + +- `ClusterAPIBinding`: Instructs API connector to sync all API objects cluster-wide with a given API group to a given provider cluster. +- `APIBinding`: Namespaced version of `ClusterAPIBinding`. Instructs API connector to sync API objects within a given namespace and with a given API group to a given provider cluster. + + +## Prerequisites + +Before using API Connector, ensure: + +1. **Consumer cluster** has network access to the provider control plane +1. You have an license to use API connector. If you are unsure, [contact Upbound][contact] or your sales representative. + +This guide walks through how to automate connecting your cluster to an Upbound +control plane. You can also manually configure the API Connector. + +## Publishing APIs in the provider cluster + + + + +First, log in to your provider control plane, and choose which CRD APIs you want +to make accessible to the consumer cluster's. API connector only syncs +these "bindable" CRDs. + + + + + + +Use the `up` CLI to login: + +```bash +up login +``` + +Connect to your control plane: + +```bash +up ctx +``` + +Check what CRDs are available: + +```bash +kubectl get crds +``` + + +Label all CRDs you want to publish with the bindable label: + + +```bash +kubectl label crd 'connect.upbound.io/bindable'='true' --overwrite +``` + + + + +Change context to the provider cluster: +```bash +kubectl config set-context +``` + +Check what CRDs are available: +```bash +kubectl get crds +``` + + +Label all CRDs you want to publish with the bindable label + +```bash +kubectl label crd 'connect.upbound.io/bindable'='true' --overwrite +``` + + + +## Installation + + + + +The up CLI provides the simplest installation method with automatic +configuration: + +Make sure the current Kubeconfig context is set to the **provider control plane** +```bash +up ctx + +up controlplane api-connector install --consumer-kubeconfig [OPTIONS] +``` + +The command: +1. creates a Robot account (named ``) in the Upbound Cloud organization ``, +1. Gives the created robot account `admin` permissions to the provider control plane `` +1. Generates a JWT token for the robot account, and stores it in a Kubernetes Secret in the consumer cluster. +1. Installs the API connector Helm chart in the consumer cluster. +1. Creates a `ClusterConnection` object in the consumer cluster, referring to the newly generated Secret, so that API connector can authenticate successfully to the provider control plane. +1. API connector pulls all published CRDs from the previous step into the consumer cluster. + +**Example**: +```bash +up controlplane api-connector install \ + --consumer-kubeconfig ~/.kube/config \ + --consumer-context my-cluster \ + --upbound-token +``` + +This command uses provided token to authenticate with the **Provider control plane** +and create a `ClusterConnection` resource in the **Consumer cluster** to connect to the +**Provider control plane**. + +**Key Options**: +- `--consumer-kubeconfig`: Path to consumer cluster kubeconfig (required) +- `--consumer-context`: Context name for consumer cluster (required) +- `--name`: Custom name for connection resources (optional) +- `--upbound-token`: API token for authentication (optional) +- `--upgrade`: Upgrade existing installation (optional) +- `--version`: Specific version to install (optional) + + + + +For manual installation or custom configurations: + +```bash +helm upgrade --install api-connector oci://xpkg.upbound.io/spaces-artifacts/api-connector \ + --namespace upbound-system \ + --create-namespace \ + --version \ + --set consumerClusterDisplayName= +``` + +### Authentication methods + +API Connector supports two authentication methods: + + + + +For Upbound Spaces integration: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: spaces-secret + namespace: upbound-system +type: Opaque +stringData: + token: + organization: + spacesBaseURL: + controlPlaneGroupName: + controlPlaneName: +``` + + + +For direct cluster access: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: provider-kubeconfig + namespace: upbound-system +type: Opaque +data: + kubeconfig: +``` + + + + +### Connection setup + +Create a `ClusterConnection` to establish connectivity: + + + + +```yaml +apiVersion: connect.upbound.io/v1alpha1 +kind: ClusterConnection +metadata: + name: spaces-connection + namespace: upbound-system +spec: + secretRef: + kind: UpboundRobotToken + name: spaces-secret + namespace: upbound-system + crdManagement: + pullBehavior: Pull +``` + + + + +```yaml +apiVersion: connect.upbound.io/v1alpha1 +kind: ClusterConnection +metadata: + name: provider-connection + namespace: upbound-system +spec: + secretRef: + kind: KubeConfig + name: provider-kubeconfig + namespace: upbound-system + crdManagement: + pullBehavior: Pull +``` + + + + + + + +### Configuration + +Bind APIs to make them available in your consumer cluster: + +```yaml +apiVersion: connect.upbound.io/v1alpha1 +kind: ClusterAPIBinding +metadata: + name: +spec: + connectionRef: + kind: ClusterConnection + name: # Or --name value +``` + + + + +The `ClusterAPIBinding` name must match the **Resource.Group** (name of the CustomResourceDefinition) of the CRD you want to bind. + + + + +## Usage example + +After configuration, you can create API objects (in the consumer cluster) that +will be synchronized to the provider cluster: + +```yaml +apiVersion: nop.example.org/v1alpha1 +kind: NopResource +metadata: + name: my-resource + namespace: default +spec: + coolField: "Synchronized resource" + compositeDeletePolicy: Foreground +``` + +Verify the resource status: + +```bash +kubectl get nopresource my-resource -o yaml + +``` +When the `APIBound=True` condition is present, it means that the API object has +been synced to the provider cluster, and is being reconciled there. Whenever the +API object in the provider cluster gets status updates (for example +`Ready=True`), that status is synced back to the consumer cluster. + +Switch contexts to the provider cluster to see the API object being created: + +```bash +up ctx +# or kubectl config set-context +``` + +```bash +kubectl get nopresource my-resource -o yaml +``` + +Note that in the provider cluster, the API object is labeled with information on +where the API object originates from, and `connect.upbound.io/managed=true`. + +## Monitoring and troubleshooting + +### Check connection status + +```bash +kubectl get clusterconnection +``` + +Expected output: +``` +NAME STATUS MESSAGE +spaces-connection Ready Provider controlplane is available +``` + +### View available APIs + +```bash +kubectl get clusterconnection spaces-connection -o jsonpath='{.status.offeredAPIs[*].name}' +``` + +### Check API binding status + +```bash +kubectl get clusterapibinding +``` + +### Debug resource synchronization + +```bash +kubectl describe +``` + +## Removal + +### Using the up CLI + +```bash +up controlplane api-connector uninstall \ + --consumer-kubeconfig ~/.kube/config \ + --all +``` + +The `--all` flag removes all resources including connections and secrets. +Without the flag, only runtime related resources won't be removed. + +:::note +Uninstall doesn't remove any API objects in the provider control plane. If you +want to clean up all API objects there, delete all API objects from the consumer +cluster before API connector uninstallation, and wait for the objects to get +deleted. +::: + + +### Using Helm + +```bash +helm uninstall api-connector -n upbound-system +``` + +## Limitations + +- **Preview feature**: Subject to breaking changes. Not yet production grade. +- **CRD updates**: CRDs are pulled once but not automatically updated. If multiple Crossplane clusters offer the same CRD API, API changes must be synchronized out of band, for example using a [Crossplane Configuration](https://docs.crossplane.io/latest/packages/). +- **Network requirements**: Consumer cluster must have direct network access to provider cluster. +- **Wide permissions needed in consumer cluster**: Because the API connector doesn't know up front the names of the APIs it needs to reconcile, it currently runs with full "root" privileges in the consumer cluster. + +- **Connector polling**: API Connector checks for drift between the consumer and provider cluster + periodically through polling. The poll interval can be changed with the `pollInterval` Helm value. + + +## Advanced configuration + +### Multiple connections + +You can connect to multiple provider clusters simultaneously by creating multiple `ClusterConnection` resources with different names and configurations. + +[contact]: https://www.upbound.io/contact-us diff --git a/spaces_versioned_docs/version-v1.14/howtos/auto-upgrade.md b/spaces_versioned_docs/version-v1.14/howtos/auto-upgrade.md new file mode 100644 index 000000000..249056fb4 --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/auto-upgrade.md @@ -0,0 +1,131 @@ +--- +title: Automatically upgrade control planes +sidebar_position: 50 +description: How to configure automatic upgrades of Crossplane in a control plane +plan: "standard" +--- + + + +Upbound Spaces can automatically upgrade the version of Upbound Crossplane in managed control plane instances. You can edit the `spec.crossplane.autoUpgrade` field in your `ControlPlane` specification with the available release channels below. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). The `spec.crossplane.autoUpgrade` field has been available since v1.9. + +For ControlPlane API specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +| Channel | Description | Example | +|------------|-----------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------| +| **None** | Disables auto upgrades. | _Uses version specified in `spec.crossplane.version`._ | +| **Patch** | Upgrades to the latest supported patch release. | _Control plane version 1.12.2-up.2 auto upgrades to 1.12.3-up.1 upon release._ | +| **Stable** | Default setting. Upgrades to the latest supported patch release on minor version _N-1_ where N is the latest supported minor version. | _If latest supported minor version is 1.14, auto upgrades to latest patch - 1.13.2-up.3_ | +| **Rapid** | Upgrades to the latest supported patch release on the latest supported minor version. | _If the latest supported minor version is 1.14, auto upgrades to the latest patch of minor version. 1.14 upgrades to 1.14.5-up.1_ | + + +:::warning + +The `Rapid` channel is only recommended for users willing to accept the risk of new features and potentially breaking changes. + +::: + +## Examples + +The specs below are examples of how to edit the `autoUpgrade` channel in your `ControlPlane` specification. + +To run a control plane with the `Rapid` auto upgrade channel, your spec should look like this: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: example-ctp +spec: + crossplane: + autoUpgrade: + channel: Rapid + writeConnectionSecretToRef: + name: kubeconfig-example-ctp +``` + +To run a control plane with a pinned version of Crossplane, specify in the `version` field: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: example-ctp +spec: + crossplane: + version: 1.14.3-up.1 + autoUpgrade: + channel: None + writeConnectionSecretToRef: + name: kubeconfig-example-ctp +``` + +## Supported Crossplane versions + +Spaces supports the three [preceding minor versions][preceding-minor-versions] from the last supported minor version. example, if the last supported minor version is `1.14`, minor versions `1.13` and `1.12` are also supported. Versions older than the three most recent minor versions aren't supported. Only supported Crossplane versions are valid specifications for new control planes. + +Current Crossplane version support by Spaces version: + +| Spaces Version | Crossplane Version Min | Crossplane Version Max | +|:--------------:|:----------------------:|:----------------------:| +| 1.2 | 1.13 | 1.15 | +| 1.3 | 1.13 | 1.15 | +| 1.4 | 1.14 | 1.16 | +| 1.5 | 1.14 | 1.16 | +| 1.6 | 1.14 | 1.16 | +| 1.7 | 1.14 | 1.16 | +| 1.8 | 1.15 | 1.17 | +| 1.9 | 1.16 | 1.18 | +| 1.10 | 1.16 | 1.18 | +| 1.11 | 1.16 | 1.18 | +| 1.12 | 1.17 | 1.19 | + + +Upbound offers extended support for all installed Crossplane versions released within a 12 month window since the last Spaces release. Contact your Upbound sales representative for more information on version support. + + +:::warning + +If the auto upgrade channel is `Stable` or `Rapid`, the Crossplane version will always stay within the support window after auto upgrade. If set to `Patch` or `None`, the minor version may be outside the support window. You are responsible for upgrading to a supported version + +::: + +To view the support status of a control plane instance, use `kubectl get ctp`. + +```bash +kubectl get ctp +NAME CROSSPLANE VERSION SUPPORTED READY MESSAGE AGE +example-ctp 1.13.2-up.3 True True 31m + +``` + +Unsupported versions return `SUPPORTED: False`. + +```bash +kubectl get ctp +NAME CROSSPLANE VERSION SUPPORTED READY MESSAGE AGE +example-ctp 1.11.5-up.1 False True 31m + +``` + +For more information, use the `-o yaml` flag to return more information. + +```bash +kubectl get controlplanes.spaces.upbound.io example-ctp -o yaml +status: +conditions: +... +- lastTransitionTime: "2024-01-23T06:36:10Z" + message: Crossplane version 1.11.5-up.1 is outside of the support window. + Oldest supported minor version is 1.12. + reason: UnsupportedCrossplaneVersion + status: "False" + type: Supported +``` + + +[preceding-minor-versions]: /reference/usage/lifecycle/#maintenance-and-updates diff --git a/spaces_versioned_docs/version-v1.14/howtos/automation-and-gitops/_category_.json b/spaces_versioned_docs/version-v1.14/howtos/automation-and-gitops/_category_.json new file mode 100644 index 000000000..b65481af6 --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/automation-and-gitops/_category_.json @@ -0,0 +1,8 @@ +{ + "label": "Automation & GitOps", + "position": 11, + "collapsed": true, + "customProps": { + "plan": "business" + } +} diff --git a/spaces_versioned_docs/version-v1.14/howtos/automation-and-gitops/overview.md b/spaces_versioned_docs/version-v1.14/howtos/automation-and-gitops/overview.md new file mode 100644 index 000000000..57eeb15fc --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/automation-and-gitops/overview.md @@ -0,0 +1,138 @@ +--- +title: Automation and GitOps Overview +sidebar_label: Overview +sidebar_position: 1 +description: Guide to automating control plane deployments with GitOps and Argo CD +plan: "business" +--- + +Automating control plane deployments with GitOps enables declarative, version-controlled infrastructure management. This section covers integrating GitOps workflows with Upbound control planes using Argo CD and related tools. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). GitOps patterns are compatible across all versions of Upbound Spaces. + +For details on deployment models and their differences, see the [Deployment Modes](../../concepts/deployment-modes.md) concept guide. version-specific features, see the . +::: + +## What is GitOps? + +GitOps is an approach for managing infrastructure by: +- **Declaratively describing** desired system state in Git +- **Using controllers** to continuously reconcile actual state with desired state +- **Treating Git as the source of truth** for all configuration and deployments + +Upbound control planes are fully compatible with GitOps patterns and we strongly recommend integrating GitOps in the platforms you build on Upbound. + +## Key Concepts + +### Argo CD +[Argo CD](https://argo-cd.readthedocs.io/) is a popular Kubernetes-native GitOps controller. It continuously monitors Git repositories and automatically applies changes to your infrastructure when commits are detected. + +### Deployment Models + +The way you configure GitOps depends on your deployment model: + +| Aspect | Cloud Spaces | Self-Hosted Spaces | +|--------|--------------|-------------------| +| **Access Method** | Upbound API with tokens | Kubernetes native (secrets/kubeconfig) | +| **Configuration** | Kubeconfig via `up` CLI | Control plane connection secrets | +| **Setup Complexity** | More involved (API integration) | Simpler (native Kubernetes) | +| **Typical Use Case** | Managing Upbound resources | Managing workloads on control planes | + +## Getting Started + +**Choose your path based on your deployment model:** + +###. Cloud Spaces +If you're using Upbound Cloud Spaces (Dedicated or Managed): +1. Start with [GitOps with Upbound Control Planes](../cloud-spaces/gitops-on-upbound.md) +2. Learn how to integrate Argo CD with Cloud Spaces +3. Manage both control plane infrastructure and Upbound resources declaratively + +###. Self-Hosted Spaces +If you're running self-hosted Spaces: +1. Start with [GitOps with ArgoCD in Self-Hosted Spaces](../self-hosted/gitops-with-argocd.md) +2. Learn how to configure control plane connection secrets +3. Manage workloads deployed to your control planes + +## Common Workflows + +### Workflow 1: Managing Control Planes with GitOps +Create and manage control planes themselves declaratively using provider-kubernetes: + +```yaml +apiVersion: kubernetes.crossplane.io/v1alpha2 +kind: Object +metadata: + name: my-controlplane +spec: + forProvider: + manifest: + apiVersion: spaces.upbound.io/v1beta1 + kind: ControlPlane + # ... control plane configuration +``` + +### Workflow 2: Managing Workloads on Control Planes +Deploy applications and resources to control planes using standard Kubernetes GitOps patterns: + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + name: my-app +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-app + namespace: my-app +# ... deployment configuration +``` + +### Workflow 3: Managing Upbound Resources +Use provider-upbound to manage Upbound IAM and repository resources: + +- Teams +- Robots and their team memberships +- Repositories and permissions + +## Advanced Topics + +### Argo CD Plugin for Upbound +Learn more in the [ArgoCD Plugin guide](../self-hosted/use-argo.md) for enhanced integration with self-hosted Spaces. + +### Declarative Control Plane Creation +See [Declaratively create control planes](../self-hosted/declarative-ctps.md) for advanced automation patterns. + +### Consuming Control Plane APIs +Understand how to [consume control plane APIs in your app cluster](../mcp-connector-guide.md) with Argo CD. + +## Prerequisites + +Before implementing GitOps with control planes, ensure you have: + +**For Cloud Spaces:** +- Access to Upbound Cloud Spaces +- `up` CLI installed and configured +- API token with appropriate permissions +- Argo CD or similar GitOps controller running +- Familiarity with Kubernetes RBAC + +**For Self-Hosted Spaces:** +- Self-hosted Spaces deployed and running +- Argo CD deployed in your infrastructure +- Kubectl access to the cluster hosting Spaces +- Understanding of control plane architecture + +## Next Steps + +1. **Choose your deployment model** above +2. **Review the relevant getting started guide** +3. **Set up your GitOps controller** (Argo CD) +4. **Deploy your first automated control plane** +5. **Explore advanced topics** as needed + +:::tip +Start with simple deployments to test your GitOps workflow before moving to production. Use [simulations](../simulations.md) to preview changes before applying them. +::: diff --git a/spaces_versioned_docs/version-v1.14/howtos/backup-and-restore.md b/spaces_versioned_docs/version-v1.14/howtos/backup-and-restore.md new file mode 100644 index 000000000..3b8d026cb --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/backup-and-restore.md @@ -0,0 +1,530 @@ +--- +title: Backup and restore +sidebar_position: 13 +description: Configure and manage backups in your Upbound Space. +plan: "enterprise" +--- + + + +Upbound's _Shared Backups_ is a built-in backup and restore feature. Shared Backups lets you configure automatic schedules for taking snapshots of your control planes. You can restore data from these backups by making new control planes. This guide explains how to use Shared Backups for disaster recovery or upgrade scenarios. + +:::info API Version Information & Available Versions +This guide applies to **all supported versions** (v1.9-v1.15+). + +**Select your API version**: +- [Spaces API v1.15 (Latest)](../../../next/reference/apis/spaces-api/) +- [Spaces API v1.14](../../../1.14/reference/apis/spaces-api/) +- [Spaces API v1.13](../../../1.13/reference/apis/spaces-api/) +- [Spaces API v1.12](../../../1.12/reference/apis/spaces-api/) +- [Spaces API v1.11](../../../1.11/reference/apis/spaces-api/) +- [Spaces API v1.10](../../../1.10/reference/apis/spaces-api/) +- [Spaces API v1.9](../../../1.9/reference/apis/spaces-api/) + +For version support policy, see . version compatibility details, see the . +::: + +## Benefits + +The Shared Backups feature provides the following benefits: + +* Automatic backups for control planes without any operational overhead +* Backup schedules for multiple control planes in a group +* Shared Backups are available across all hosting environments of Upbound (Disconnected, Connected or Cloud Spaces) + + +## Configure a Shared Backup Config + + +[SharedBackupConfig][sharedbackupconfig] is a [group-scoped][group-scoped] resource. You should create them in a group containing one or more control planes. This resource configures the storage details and provider. Whenever a backup executes (either by schedule or manually initiated), it references a SharedBackupConfig to tell it where store the snapshot. + + +### Backup config provider + + +The `spec.objectStorage.provider` and `spec.objectStorage.config` fields configures: + +* The object storage provider +* The path to the provider +* The credentials needed to communicate with the provider + +You can only set one provider. Upbound currently supports AWS, Azure, and GCP as providers. + + +`spec.objectStorage.config` is a freeform map of configuration options for the object storage provider. See [Thanos object storage][thanos-object-storage] for more information on the formats for each supported cloud provider. `spec.bucket` and `spec.provider` overrides the required values in the config. + + + +#### AWS as a storage provider + +:::important +For Cloud Spaces, static credentials are currently the only supported auth method. +::: + +This example demonstrates how to use AWS as a storage provider for your backups: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupConfig +metadata: + name: default + namespace: default +spec: + objectStorage: + provider: AWS + bucket: spaces-backup-bucket + config: + endpoint: s3.eu-west-2.amazonaws.com + region: eu-west-2 + credentials: + source: Secret + secretRef: + name: bucket-creds + key: creds +``` + + +This example assumes you've already created an S3 bucket called "spaces-backup-bucket" in AWS `eu-west-2` region. The account credentials to access the bucket should exist in a secret of the same namespace as the Shared Backup Config. + +#### Azure as a storage provider + +:::important +For Cloud Spaces, static credentials are currently the only supported auth method. +::: + +This example demonstrates how to use Azure as a storage provider for your backups: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupConfig +metadata: + name: default + namespace: default +spec: + objectStorage: + provider: Azure + bucket: upbound-backups + config: + storage_account: upbackupstore + container: upbound-backups + endpoint: blob.core.windows.net + credentials: + source: Secret + secretRef: + name: bucket-creds + key: creds +``` + + +This example assumes you've already created an Azure storage account called `upbackupstore` and blob `upbound-backups`. The storage account key to access the blob should exist in a secret of the same namespace as the Shared Backup Config. + + +#### GCP as a storage provider + +:::important +For Cloud Spaces, static credentials are currently the only supported auth method. +::: + +This example demonstrates how to use Google Cloud Storage as a storage provider for your backups: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupConfig +metadata: + name: default + namespace: default +spec: + objectStorage: + provider: GCP + bucket: spaces-backup-bucket + credentials: + source: Secret + secretRef: + name: bucket-creds + key: creds +``` + + +This example assumes you've already created a Cloud bucket called "spaces-backup-bucket" and a service account with access to this bucket. The key file should exist in a secret of the same namespace as the Shared Backup Config. + + +## Configure a Shared Backup Schedule + + +[SharedBackupSchedule][sharedbackupschedule] is a [group-scoped][group-scoped-1] resource. You should create them in a group containing one or more control planes. This resource defines a backup schedule for control planes within its corresponding group. + +Below is an example of a Shared Backup Schedule that takes backups every day of all control planes having `environment: production` labels: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: daily-schedule + namespace: default +spec: + schedule: "@daily" + configRef: + kind: SharedBackupConfig + name: default + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +### Define a schedule + +The `spec.schedule` field is a [Cron-formatted][cron-formatted] string. Some common examples are below: + + +| Entry | Description | +| ----------------- | ------------------------------------------------------------------------------------------------- | +| `@hourly` | Run once an hour. | +| `@daily` | Run once a day. | +| `@weekly` | Run once a week. | +| `0 0/4 * * *` | Run every 4 hours. | +| `0/15 * * * 1-5` | Run every fifteenth minute on Monday through Friday. | +| `@every 1h30m10s` | Run every 1 hour, 30 minutes, and 10 seconds. Hour is the largest measurement of time for @every. | + + +### Exclude resources from the backup + +The `spec.excludedResources` field is an array of resource names to exclude from each backup. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: daily-schedule +spec: + excludedResources: + - "xclusters.aws.platformref.upbound.io" + - "xdatabase.aws.platformref.upbound.io" + - "xrolepolicyattachment.iam.aws.crossplane.io" +``` + +:::warning +You must specify resource names in lowercase "resource.group" format (for example, `xclusters.aws.platformref.upbound.io`). Using only the resource kind (for example, `XCluster`) isn't supported. +::: + +### Suspend a schedule + +Use `spec.suspend` field to suspend the schedule. It creates no new backups, but allows running backups to complete. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: daily-schedule +spec: + suspend: true +``` + +### Set the time to live + +Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: daily-schedule +spec: + ttl: 168h # Backup is garbage collected after 7 days +``` +:::tip +By default, this setting doesn't delete uploaded files. Review the next section to define +the deletion policy. +::: + +### Define the deletion policy + +Set the `spec.deletionPolicy` to define backup deletion actions, including the +deletion of the backup file from the bucket. The Deletion Policy value defaults +to `Orphan`. Set it to `Delete` to remove uploaded files in the bucket. more +information on the backup and restore process, review the [Spaces API +documentation][spaces-api-documentation]. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: daily-schedule +spec: + ttl: 168h # Backup is garbage collected after 7 days + deletionPolicy: Delete # Defaults to Orphan +``` + +### Garbage collect backups when the schedule gets deleted + +Set the `spec.useOwnerReferencesInBackup` to garbage collect associated backups when a shared schedule gets deleted. If set to true, backups are garbage collected when the schedule gets deleted. + +### Control plane selection + +To configure which control planes in a group you want to create a backup schedule for, use the `spec.controlPlaneSelector` field. You can either use `labelSelectors` or the `names` of a control plane directly. A control plane matches if any of the label selectors match. + +This example matches all control planes in the group that have `environment: production` as a label: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: my-backup-schedule +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +You can use the more complex `matchExpressions` to match labels based on an expression. This example matches control planes that have label `environment: production` or `environment: staging`: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: my-backup-schedule +spec: + controlPlaneSelector: + labelSelectors: + - matchExpressions: + - { key: environment, operator: In, values: [production,staging] } +``` + +You can also specify the names of control planes directly: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: my-backup-schedule +spec: + controlPlaneSelector: + names: + - controlplane-dev + - controlplane-staging + - controlplane-prod +``` + + +## Configure a Shared Backup + + + +[SharedBackup][sharedbackup] is a [group-scoped][group-scoped-2] resource. You should create them in a group containing one or more control planes. This resource causes a backups to occur for control planes within its corresponding group. + +Below is an example of a Shared Backup that takes a backup of all control planes having `environment: production` labels: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup + namespace: default +spec: + configRef: + kind: SharedBackupConfig + name: default + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +### Exclude resources from the backup + +The `spec.excludedResources` field is an array of resource names to exclude from each backup. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup +spec: + excludedResources: + - "xclusters.aws.platformref.upbound.io" + - "xdatabase.aws.platformref.upbound.io" + - "xrolepolicyattachment.iam.aws.crossplane.io" +``` + +:::warning +You must specify resource names in lowercase "resource.group" format (for example, `xclusters.aws.platformref.upbound.io`). Using only the resource kind (for example, `XCluster`) isn't supported. +::: + +### Set the time to live + +Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup +spec: + ttl: 168h # Backup is garbage collected after 7 days +``` + + +### Garbage collect backups on Shared Backup deletion + + + +Set the `spec.useOwnerReferencesInBackup` to define whether to garbage collect associated backups when a shared backup gets deleted. If set to true, backups are garbage collected when the shared backup gets deleted. + +### Control plane selection + +To configure which control planes in a group you want to create a backup for, use the `spec.controlPlaneSelector` field. You can either use `labelSelectors` or the `names` of a control plane directly. A control plane matches if any of the label selectors match. + +This example matches all control planes in the group that have `environment: production` as a label: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +You can use the more complex `matchExpressions` to match labels based on an expression. This example matches control planes that have label `environment: production` or `environment: staging`: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup +spec: + controlPlaneSelector: + labelSelectors: + - matchExpressions: + - { key: environment, operator: In, values: [production,staging] } +``` + +You can also specify the names of control planes directly: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup +spec: + controlPlaneSelector: + names: + - controlplane-dev + - controlplane-staging + - controlplane-prod +``` + +## Create a manual backup + +[Backup][backup] is a [group-scoped][group-scoped-3] resource that causes a single backup to occur for a control planes in its corresponding group. + +Below is an example of a manual Backup of a control plane: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: Backup +metadata: + name: my-backup + namespace: default +spec: + configRef: + kind: SharedBackupConfig + name: default + controlPlane: my-awesome-ctp + deletionPolicy: Delete +``` + +The backup specification `DeletionPolicy` defines backup deletion actions, +including the deletion of the backup file from the bucket. The `Deletion Policy` +value defaults to `Orphan`. Set it to `Delete` to remove uploaded files +in the bucket. +For more information on the backup and restore process, review the [Spaces API documentation][spaces-api-documentation-1]. + + +### Choose a control plane to backup + +The `spec.controlPlane` field defines which control plane to execute a backup against. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: Backup +metadata: + name: my-backup + namespace: default +spec: + controlPlane: my-awesome-ctp +``` + +If the control plane doesn't exist, the backup fails after multiple failed retry attempts. + +### Exclude resources from the backup + +The `spec.excludedResources` field is an array of resource names to exclude from the manual backup. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: Backup +metadata: + name: my-backup +spec: + excludedResources: + - "xclusters.aws.platformref.upbound.io" + - "xdatabase.aws.platformref.upbound.io" + - "xrolepolicyattachment.iam.aws.crossplane.io" +``` + +:::warning +You must specify resource names in lowercase "resource.group" format (for example, `xclusters.aws.platformref.upbound.io`). Using only the resource kind (for example, `XCluster`) isn't supported. +::: + +### Set the time to live + +Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: Backup +metadata: + name: my-backup +spec: + ttl: 168h # Backup is garbage collected after 7 days +``` + +## Restore a control plane from a backup + +You can restore a control plane's state from a backup. Below is an example of creating a new control plane from a previous backup called `restore-me`: + + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: my-awesome-restored-ctp + namespace: default +spec: + restore: + source: + kind: Backup + name: restore-me +``` + + +[group-scoped]: /spaces/concepts/groups +[group-scoped-1]: /spaces/concepts/groups +[group-scoped-2]: /spaces/concepts/groups +[group-scoped-3]: /spaces/concepts/groups +[sharedbackupconfig]: /reference/apis/spaces-api/latest +[thanos-object-storage]: https://thanos.io/tip/thanos/storage.md/ +[sharedbackupschedule]: /reference/apis/spaces-api/latest +[cron-formatted]: https://en.wikipedia.org/wiki/Cron +[spaces-api-documentation]: /reference/apis/spaces-api/v1_9 +[sharedbackup]: /reference/apis/spaces-api/latest +[backup]: /reference/apis/spaces-api/latest +[spaces-api-documentation-1]: /reference/apis/spaces-api/v1_9 + + + diff --git a/spaces_versioned_docs/version-v1.14/howtos/cloud-spaces/_category_.json b/spaces_versioned_docs/version-v1.14/howtos/cloud-spaces/_category_.json new file mode 100644 index 000000000..1e1869a38 --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/cloud-spaces/_category_.json @@ -0,0 +1,10 @@ +{ + "label": "Cloud Spaces", + "position": 1, + "collapsed": true, + "customProps": { + "plan": "standard" + } +} + + diff --git a/spaces_versioned_docs/version-v1.14/howtos/cloud-spaces/dedicated-spaces-deployment.md b/spaces_versioned_docs/version-v1.14/howtos/cloud-spaces/dedicated-spaces-deployment.md new file mode 100644 index 000000000..ebad9493e --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/cloud-spaces/dedicated-spaces-deployment.md @@ -0,0 +1,33 @@ +--- +title: Dedicated Spaces +sidebar_position: 4 +description: A guide to Upbound Dedicated Spaces +plan: business +--- + + +## Benefits + +Dedicated Spaces offer the following benefits: + +- **Single-tenancy** A control plane space where Upbound guarantees you're the only tenant operating in the environment. +- **Connectivity to your private network** Establish secure network connections between your Dedicated Cloud Space running in Upbound and your own resources behind your private network. +- **Reduced Overhead.** Offload day-to-day operational burdens to Upbound while focusing on your job of building your platform. + +## Architecture + +A Dedicated Space is a deployment of the Upbound Spaces software inside an +Upbound-controlled cloud account and network. The control planes you run. + +The diagram below illustrates the high-level architecture of Upbound Dedicated Spaces: + +![Upbound Managed Spaces arch](/img/managed-arch-gcp.png) + +## How to get access to Dedicated Spaces + +If you have an interest in Upbound Dedicated Spaces, contact +[Upbound][contact-us]. We can chat more about your +requirements and see if Dedicated Spaces are a good fit for you. + +[contact-us]: https://www.upbound.io/contact-us +[managed-space]: /spaces/howtos/self-hosted/managed-spaces-deployment diff --git a/spaces_versioned_docs/version-v1.14/howtos/cloud-spaces/gitops-on-upbound.md b/spaces_versioned_docs/version-v1.14/howtos/cloud-spaces/gitops-on-upbound.md new file mode 100644 index 000000000..fa59a8dce --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/cloud-spaces/gitops-on-upbound.md @@ -0,0 +1,318 @@ +--- +title: GitOps with Upbound Control Planes +sidebar_position: 80 +description: An introduction to doing GitOps with control planes on Upbound Cloud Spaces +tier: "business" +--- + +:::info Deployment Model +This guide applies to **Upbound Cloud Spaces** (Dedicated and Managed Spaces). For self-hosted Spaces deployments, see [GitOps with ArgoCD in Self-Hosted Spaces](/spaces/howtos/self-hosted/gitops-with-argocd/). +::: + +GitOps is an approach for managing a system by declaratively describing desired resources' configurations in Git and using controllers to realize the desired state. Upbound's control planes are compatible with this pattern and it's strongly recommended you integrate GitOps in the platforms you build on Upbound. + + +## Integrate with Argo CD + + +[Argo CD][argo-cd] is a project in the Kubernetes ecosystem commonly used for GitOps. You can use it in tandem with Upbound control planes to achieve GitOps flows. The sections below explain how to integrate these tools with Upbound. + +### Generate a kubeconfig for your control plane + +Use the up CLI to [generate a kubeconfig][generate-a-kubeconfig] for your control plane. + +```bash +up ctx /// -f - > context.yaml +``` + +### Create an API token + + +You need a personal access token (PAT). You create PATs on a per-user basis in the Upbound Console. Go to [My Account - API tokens][my-account-api-tokens] and select Create New Token. Give the token a name and save the secret value to somewhere safe. + + +### Add the up CLI init container to Argo + +Create a new file called `up-plugin-values.yaml` and paste the following YAML: + +```yaml +controller: + volumes: + - name: up-plugin + emptyDir: {} + - name: up-home + emptyDir: {} + + volumeMounts: + - name: up-plugin + mountPath: /usr/local/bin/up + subPath: up + - name: up-home + mountPath: /home/argocd/.up + + initContainers: + - name: up-plugin + image: xpkg.upbound.io/upbound/up-cli:v0.39.0 + command: ["cp"] + args: + - /usr/local/bin/up + - /plugin/up + volumeMounts: + - name: up-plugin + mountPath: /plugin + +server: + volumes: + - name: up-plugin + emptyDir: {} + - name: up-home + emptyDir: {} + + volumeMounts: + - name: up-plugin + mountPath: /usr/local/bin/up + subPath: up + - name: up-home + mountPath: /home/argocd/.up + + initContainers: + - name: up-plugin + image: xpkg.upbound.io/upbound/up-cli:v0.39.0 + command: ["cp"] + args: + - /usr/local/bin/up + - /plugin/up + volumeMounts: + - name: up-plugin + mountPath: /plugin +``` + +### Install or upgrade Argo using the values file + +Install or upgrade Argo via Helm, including the values from the `up-plugin-values.yaml` file: + +```bash +helm upgrade --install -n argocd -f up-plugin-values.yaml --reuse-values argocd argo/argo-cd +``` + + +### Configure Argo CD + + +To configure Argo CD for Annotation resource tracking, edit the Argo CD ConfigMap in the Argo CD namespace. +Add `application.resourceTrackingMethod: annotation` to the data section as below. +This configuration turns off Argo CD auto pruning, preventing the deletion of Crossplane resources. + +Next, configure the [auto respect RBAC for the Argo CD controller][auto-respect-rbac-for-the-argo-cd-controller]. +By default, Argo CD attempts to discover some Kubernetes resource types that don't exist in a control plane. +You must configure Argo CD to respect the cluster's RBAC rules so that Argo CD can sync. +Add `resource.respectRBAC: normal` to the data section as below. + +```bash +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-cm +data: + ... + application.resourceTrackingMethod: annotation + resource.respectRBAC: normal +``` + +:::tip +The `resource.respectRBAC` configuration above tells Argo to respect RBAC for _all_ cluster contexts. If you're using an Argo CD instance to manage more than only control planes, you should consider changing the `clusters` string match for the configuration to apply only to control planes. For example, if every control plane context name followed the convention of being named `controlplane-`, you could set the string match to be `controlplane-*` +::: + + +### Create a cluster context definition + + +Replace the variables and run the following script to configure a new Argo cluster context definition. + +To configure Argo for a control plane in a Connected Space, replace `stringData.server` with the ingress URL of the control plane. This URL is what's outputted when using `up ctx`. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: my-control-plane + namespace: argocd + labels: + argocd.argoproj.io/secret-type: cluster +type: Opaque +stringData: + name: my-control-plane-context + server: https://.spaces.upbound.io/apis/spaces.upbound.io/v1beta1/namespaces//controlplanes//k8s + config: | + { + "execProviderConfig": { + "apiVersion": "client.authentication.k8s.io/v1", + "command": "up", + "args": [ "org", "token" ], + "env": { + "ORGANIZATION": "", + "UP_TOKEN": "" + } + }, + "tlsClientConfig": { + "insecure": false, + "caData": "" + } + } +``` + + +## GitOps for Upbound resources + + +Like any other cloud service, you can drive the lifecycle of Upbound Cloud resources with Crossplane. This lets you establish GitOps flows to declaratively create and manage: + +- [control plane groups][control-plane-groups] +- [control planes][control-planes] +- [Upbound IAM resources][upbound-iam-resources] + +Use a control plane installed with [provider-upbound][provider-upbound] and [provider-kubernetes][provider-kubernetes] to achieve this. + +### Provider-upbound + +[Provider-upbound][provider-upbound-2] is a Crossplane provider built by Upbound to interact with Upbound resources. use _provider-upbound_ to declaratively create and manage the lifecycle of IAM resources and repositories: + +- [Robots][robots] and their membership to teams +- [Teams][teams] +- [Repositories][repositories] and [permissions][permissions] on those repositories. + +:::tip +This provider defines managed resources for control planes, their auth, and permissions. These resources only applicable for customers who run in Upbound's **Legacy Spaces** control plane hosting environments. Customers should use provider-kubernetes explained below to manage the lifecycle of control planes with Crossplane. +::: + +### Provider-kubernetes + +[Provider-kubernetes][provider-kubernetes-3] is a Crossplane provider that defines an [Object][object] resource. Use _Objects_ as general-purpose resources to wrap _any_ Kubernetes resource for Crossplane to manage. + +Upbound [Space APIs][space-apis] are Kube-like APIs and have implemented support for most Kubernetes-style API concepts. You can use kubectl or any other Kubernetes-compatible tooling to interact with the API. This means you can use _provider-kubernetes_ to drive interactions with Space APIs. + +:::warning +When interacting with a Cloud Space's API, the Kubernetes [watch][watch] feature **isn't implemented.** Argo CD requires _watch_ support to function as expected, meaning you can't point Argo directly at a Cloud Space until it's implemented. +::: + +Use _provider-kubernetes_ to declaratively drive interactions with all [Space APIs][space-apis-1]. Wrap the desired API resource in an _Object_. See the example below for a control plane: + +```yaml +apiVersion: kubernetes.crossplane.io/v1alpha2 +kind: Object +metadata: + name: my-controlplane +spec: + forProvider: + manifest: + apiVersion: spaces.upbound.io/v1beta1 + kind: ControlPlane + metadata: + name: my-controlplane + namespace: default + spec: + crossplane: + autoUpgrade: + channel: Rapid +``` + +[Control plane groups][control-plane-groups-2] are a special case because they technically map to an underlying Kubernetes namespace. You should create a `kind: namespace` with the `spaces.upbound.io/group` label to create a control plane group in a Space. See the example below: + +```yaml +apiVersion: kubernetes.crossplane.io/v1alpha2 +kind: Object +metadata: + name: group1 +spec: + forProvider: + manifest: + apiVersion: v1 + kind: Namespace + metadata: + name: group1 + labels: + spaces.upbound.io/group: "true" + spec: {} +``` + +### Configure auth for provider-kubernetes + +Like any other Crossplane provider, _provider-kubernetes_ requires a valid [ProviderConfig][providerconfig] to authenticate with Upbound before interacting with its APIs. Follow the steps below to configure auth for a ProviderConfig on a control plane that you want to use to interact with Upbound resources. + +1. Define an environment variable for the name of your Upbound org account. Use `up org list` to retrieve this value. +```ini +export UPBOUND_ACCOUNT="" +``` + +2. Create a [personal access token][personal-access-token] and store it as an environment variable. +```shell +export UPBOUND_TOKEN="" +``` + +3. Log on to Upbound. +```shell +up login +``` + +4. Create a kubeconfig for the desired Cloud Space instance you want to interact with. +```shell +export CONTROLPLANE_CONFIG=/tmp/controlplane-kubeconfig +KUBECONFIG=$CONTROLPLANE_CONFIG up ctx $UPBOUND_ACCOUNT/upbound-gcp-us-west-1 # Replace this path with whichever Cloud Space you want to communicate with. +``` + +5. On the control plane you want to use to interact with Upbound resources, create a secret containing the credentials: +```shell +kubectl -n crossplane-system create secret generic cluster-config --from-file=kubeconfig=$CONTROLPLANE_CONFIG +kubectl -n crossplane-system create secret generic upbound-credentials --from-literal=token=$UPBOUND_TOKEN +``` + +6. Create a ProviderConfig that references the credentials created in the prior step. Create this resource in your control plane: +```yaml +apiVersion: kubernetes.crossplane.io/v1alpha1 +kind: ProviderConfig +metadata: + name: default +spec: + credentials: + source: Secret + secretRef: + namespace: crossplane-system + name: cluster-config + key: kubeconfig + identity: + type: UpboundTokens + source: Secret + secretRef: + name: upbound-credentials + namespace: crossplane-system + key: token +``` + +You can now create _Objects_ in the control plane which wrap Space APIs. + +[generate-a-kubeconfig]: /manuals/cli/concepts/contexts +[control-plane-groups]: /spaces/concepts/groups +[control-planes]: /spaces/concepts/control-planes +[upbound-iam-resources]: /manuals/platform/concepts/identity-management +[space-apis]: /reference/apis/spaces-api/v1_9 +[space-apis-1]: /reference/apis/spaces-api/v1_9 +[control-plane-groups-2]: /spaces/concepts/groups + + +[argo-cd]: https://argo-cd.readthedocs.io/en/stable/ +[my-account-api-tokens]: https://accounts.upbound.io/settings/tokens +[auto-respect-rbac-for-the-argo-cd-controller]: https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#auto-respect-rbac-for-controller +[spec-writeconnectionsecrettoref]: /reference/apis/spaces-api/latest +[auto-respect-rbac-for-the-argo-cd-controller-1]: https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#auto-respect-rbac-for-controller +[provider-upbound]: https://marketplace.upbound.io/providers/upbound/provider-upbound +[provider-kubernetes]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes +[provider-upbound-2]: https://marketplace.upbound.io/providers/upbound/provider-upbound +[robots]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/iam.upbound.io/Robot/v1alpha1 +[teams]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/iam.upbound.io/Team/v1alpha1 +[repositories]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/repository.upbound.io/Repository/v1alpha1 +[permissions]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/repository.upbound.io/Permission/v1alpha1 +[provider-kubernetes-3]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes +[object]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes/v0.17.0/resources/kubernetes.crossplane.io/Object/v1alpha2 +[watch]: https://kubernetes.io/docs/reference/using-api/api-concepts/#watch-bookmarks +[providerconfig]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes/v0.17.0/resources/kubernetes.crossplane.io/ProviderConfig/v1alpha1 +[personal-access-token]: https://accounts.upbound.io/settings/tokens diff --git a/spaces_versioned_docs/version-v1.14/howtos/control-plane-topologies.md b/spaces_versioned_docs/version-v1.14/howtos/control-plane-topologies.md new file mode 100644 index 000000000..9020e5a41 --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/control-plane-topologies.md @@ -0,0 +1,566 @@ +--- +title: Control Plane Topologies +sidebar_position: 15 +description: Configure scheduling of composites to remote control planes +--- + +:::info API Version Information +This guide is for the Control Plane Topology feature, which is in **private preview**. interested customers with access to this feature, it applies to v1.12+. + +For related API specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). details on feature availability, see the . +::: + +:::important +This feature is in private preview for select customers in Upbound Spaces. If you're interested in this deployment mode, please [contact us](https://www.upbound.io/support/contact). +::: + +Upbound's _Control Plane Topology_ feature lets you build and deploy a platform +of multiple control planes. These control planes work together for a unified platform +experience. + + +With the _Topology_ feature, you can install resource APIs that are +reconciled by other control planes and configure the routing that occurs between +control planes. You can also build compositions that reference other resources +running on your control plane or elsewhere in Upbound. + +This guide explains how to use Control Plane Topology APIs to install, configure +remote APIs, and build powerful compositions that reference other resources. + +## Benefits + +The Control Plane Topology feature provides the following benefits: + +* Decouple your platform architecture into independent offerings to improve your platform's software development lifecycle. +* Install composite APIs from Configurations as CRDs which are fulfilled and reconciled by other control planes. +* Route APIs to other control planes by configuring an _Environment_ resource, which define a set of routable dimensions. + +## How it works + + +Imagine the scenario where you want to let a user reference a subnet when creating a database instance. To your control plane, the `kind: database` and `kind: subnet` are independent resources. To you as the composition author, these resources have an important relationship. It may be that: + +- you don't want your user to ever be able to create a database without specifying a subnet. +- you want to let them create a subnet when they create the database, if it doesn't exist. +- you want to allow them to reuse a subnet that got created elsewhere or gets shared by another user. + +In each of these scenarios, you must resort to writing complex composition logic +to handle each case. The problem is compounded when the resource exists in a +context separate from the current control plane's context. Imagine a scenario +where one control plane manages Database resources and a second control plane +manages networking resources. With the _Topology_ feature, you can offload these +concerns to Upbound machinery. + + +![Control Plane Topology feature arch](/img/topology-arch.png) + +## Prerequisites + +Enable the Control Plane Topology feature in the Space you plan to run your control plane in: + +- Cloud Spaces: Not available yet +- Connected Spaces: Space administrator must enable this feature +- Disconnected Spaces: Space administrator must enable this feature + + + +## Compose resources with _ReferencedObjects_ + + + +_ReferencedObject_ is a resource type available in an Upbound control plane that lets you reference other Kubernetes resources in Upbound. + +:::tip +This feature is useful for composing resources that exist in a +remote context, like another control plane. You can also use +_ReferencedObjects_ to resolve references to any other Kubernetes object +in the current control plane context. This could be a secret, another Crossplane +resource, or more. +::: + +### Declare the resource reference in your XRD + +To compose a _ReferencedObject_, you should start by adding a resource reference +in your Composite Resource Definition (XRD). The convention for the resource +reference follows the shape shown below: + +```yaml +Ref: + type: object + properties: + apiVersion: + type: string + default: "" + enum: [ "" ] + kind: + type: string + default: "" + enum: [ "" ] + grants: + type: array + default: [ "Observe" ] + items: + type: string + enum: [ "Observe", "Create", "Update", "Delete", "*" ] + name: + type: string + namespace: + type: string + required: + - name +``` + +The `Ref` should be the kind of resource you want to reference. The `apiVersion` and `kind` should be the associated API version and kind of the resource you want to reference. + +The `name` and `namespace` strings are inputs that let your users specify the resource instance. + +#### Grants + +The `grants` field is a special array that lets you give users the power to influence the behavior of the referenced resource. You can configure which of the available grants you let your user select and which it defaults to. Similar in behavior as [Crossplane management policies][crossplane-management-policies], each grant value does the following: + +- **Observe:** The composite may observe the state of the referenced resource. +- **Create:** The composite may create the referenced resource if it doesn't exist. +- **Update:** The composite may update the referenced resource. +- **Delete:** The composite may delete the referenced resource. +- **\*:** The composite has full control over the referenced resource. + +Here are some examples that show how it looks in practice: + +
+ +Show example for defining the reference to another composite resource + +```yaml +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xsqlinstances.database.platform.upbound.io +spec: + type: object + properties: + parameters: + type: object + properties: + networkRef: + type: object + properties: + apiVersion: + type: string + default: "networking.platform.upbound.io" + enum: [ "networking.platform.upbound.io" ] + grants: + type: array + default: [ "Observe" ] + items: + type: string + enum: [ "Observe" ] + kind: + type: string + default: "Network" + enum: [ "Network" ] + name: + type: string + namespace: + type: string + required: + - name +``` + +
+ + +
+Show example for defining the reference to a secret +```yaml +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xsqlinstances.database.platform.upbound.io +spec: + type: object + properties: + parameters: + type: object + properties: + secretRef: + type: object + properties: + apiVersion: + type: string + default: "v1" + enum: [ "v1" ] + grants: + type: array + default: [ "Observe" ] + items: + type: string + enum: [ "Observe", "Create", "Update", "Delete", "*" ] + kind: + type: string + default: "Secret" + enum: [ "Secret" ] + name: + type: string + namespace: + type: string + required: + - name +``` +
+ +### Manually add the jsonPath + +:::important +This step is a known limitation of the preview. We're working on tooling that +removes the need for authors to do this step. +::: + +During the preview timeframe of this feature, you must add an annotation by hand +to the XRD. In your XRD's `metadata.annotations`, set the +`references.upbound.io/schema` annotation. It should be a JSON string in the +following format: + +```json +{ + "apiVersion": "references.upbound.io/v1alpha1", + "kind": "ReferenceSchema", + "references": [ + { + "jsonPath": ".spec.parameters.secretRef", + "kinds": [ + { + "apiVersion": "v1", + "kind": "Secret" + } + ] + } + ] +} +``` + +Flatten this JSON into a string and set the annotation on your XRD. View the +example below for an illustration: + +
+Show example setting the references.upbound.io/schema annotation +```yaml +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xthings.networking.acme.com + annotations: + references.upbound.io/schema: '{"apiVersion":"references.upbound.io/v1alpha1","kind":"ReferenceSchema","references":[{"jsonPath":".spec.secretRef","kinds":[{"apiVersion":"v1","kind":"Secret"}]},{"jsonPath":".spec.configMapRef","kinds":[{"apiVersion":"v1","kind":"ConfigMap"}]}]}' +``` +
+ +
+Show example for setting multiples references in the references.upbound.io/schema annotation +```yaml +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xthings.networking.acme.com + annotations: + references.upbound.io/schema: '{"apiVersion":"references.upbound.io/v1alpha1","kind":"ReferenceSchema","references":[{"jsonPath":".spec.parameters.secretRef","kinds":[{"apiVersion":"v1","kind":"Secret"}]},{"jsonPath":".spec.parameters.configMapRef","kinds":[{"apiVersion":"v1","kind":"ConfigMap"}]}]}' +``` +
+ + +You can use a VSCode extension like [vscode-pretty-json][vscode-pretty-json] to make this task easier. + + +### Compose a _ReferencedObject_ + +To pair with the resource reference declared in your XRD, you must compose the referenced resource. Use the _ReferencedObject_ resource type to bring the resource into your composition. _ReferencedObject_ has the following schema: + +```yaml +apiVersion: references.upbound.io/v1alpha1 +kind: ReferencedObject +spec: + managementPolicies: + - Observe + deletionPolicy: Orphan + composite: + apiVersion: + kind: + name: + jsonPath: .spec.parameters.secretRef +``` + +The `spec.composite.apiVersion` and `spec.composite.kind` should match the API version and kind of the `compositeTypeRef` declared in your composition. The `spec.composite.name` should be the name of the composite resource instance. + +The `spec.composite.jsonPath` should be the path to the root of the resource ref you declared in your XRD. + +
+Show example for composing a resource reference to a secret + +```yaml +apiVersion: apiextensions.crossplane.io/v1 +kind: Composition +metadata: + name: demo-composition +spec: + compositeTypeRef: + apiVersion: networking.acme.com/v1alpha1 + kind: XThing + mode: Pipeline + pipeline: + - step: patch-and-transform + functionRef: + name: crossplane-contrib-function-patch-and-transform + input: + apiVersion: pt.fn.crossplane.io/v1beta1 + kind: Resources + resources: + - name: secret-ref-object + base: + apiVersion: references.upbound.io/v1alpha1 + kind: ReferencedObject + spec: + managementPolicies: + - Observe + deletionPolicy: Orphan + composite: + apiVersion: networking.acme.com/v1alpha1 + kind: XThing + name: TO_BE_PATCHED + jsonPath: .spec.parameters.secretRef + patches: + - type: FromCompositeFieldPath + fromFieldPath: metadata.name + toFieldPath: spec.composite.name +``` +
+ +By declaring a resource reference in your XRD, Upbound handles resolution of the desired resource. + +## Deploy APIs + +To configure routing resource requests between control planes, you need to deploy APIs in at least two control planes. + +### Deploy into a service-level control plane + +Package the APIs you build into a Configuration package an deploy it on a +control plane in an Upbound Space. In Upbound, it's common to refer to the +control plane where the Configuration package is deployed as a **service-level +control plane**. This control plane runs the controllers that processes the API +requests and provisions underlying resources. In a later section, you learn how +you can use _Topology_ features to [configure routing][configure-routing]. + +### Deploy as Remote APIs on a platform control plane + +You should use the same package source as deployed in the **service-level +control planes**, but this time deploy the Configuration in a separate control +plane as a _RemoteConfiguration_. The _RemoteConfiguration_ installs Kubernetes +CustomResourceDefinitions for the APIs defined in the Configuration package, but +no controllers get deployed. + +### Install a _RemoteConfiguration_ + +_RemoteConfiguration_ is a resource type available in an Upbound manage control +planes that acts like a sort of Crossplane [Configuration][configuration] +package. Unlike standard Crossplane Configurations, which install XRDs, +compositions, and functions into a desired control plane, _RemoteConfigurations_ +install only the CRDs for claimable composite resource types. + +#### Install directly + +Install a _RemoteConfiguration_ by defining the following and applying it to +your control plane: + +```yaml +apiVersion: pkg.upbound.io/v1alpha1 +kind: RemoteConfiguration +metadata: + name: +spec: + package: +``` + +#### Declare as a project dependency + +You can declare _RemoteConfigurations_ as dependencies in your control plane's +[project file][project-file]. Use the up CLI to add the dependency, providing +the `--remote` flag: + +```tsx live +up dep add --remote +``` + +This command adds a declaration in the `spec.apiDependencies` stanza of your +project's `upbound.yaml` as demonstrated below: + +```yaml +apiVersion: meta.dev.upbound.io/v1alpha1 +kind: Project +metadata: + name: service-controlplane +spec: + apiDependencies: + - configuration: xpkg.upbound.io/upbound/remote-configuration + version: '>=v0.0.0' + dependsOn: + - provider: xpkg.upbound.io/upbound/provider-kubernetes + version: '>=v0.0.0' +``` + +Like a Configuration, a _RemoteConfigurationRevision_ gets created when the +package gets installed on a control plane. Unlike Configurations, XRDs and +compositions **don't** get installed by a _RemoteConfiguration_. Only the CRDs +for claimable composite types get installed and Crossplane thereafter manages +their lifecycle. You can tell when a CRD gets installed by a +_RemoteConfiguration_ because it has the `internal.scheduling.upbound.io/remote: +true` label: + +```yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: things.networking.acme.com + labels: + internal.scheduling.upbound.io/remote: "true" +``` + +## Use an _Environment_ to route resources + +_Environment_ is a resource type available in Upbound control planes that works +in tandem with resources installed by _RemoteConfigurations_. _Environment_ is a +namespace-scoped resource that lets you configure how to route remote resources +to other control planes by a set of user-defined dimensions. + +### Define a routing dimension + +To establish a routing dimensions between two control planes, you must do two +things: + +1. Annotate the service control plane with the name and value of a dimension. +2. Configure an environment on another control plane with a dimension matching the field and value of the service control plane. + +The example below demonstrates the creation of a service control plane with a +`region` dimension: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + labels: + dimension.scheduling.upbound.io/region: "us-east-1" + name: prod-1 + namespace: default +spec: +``` + +Upbound's Spaces controller keeps an inventory of all declared dimensions and +listens for control planes to route to them. + +### Create an _Environment_ + +Next, create an _Environment_ on a separate control plane, referencing the +dimension from before. The example below demonstrates routing all remote +resource requests in the `default` namespace of the control plane based on a +single `region` dimension: + +```yaml +apiVersion: scheduling.upbound.io/v1alpha1 +kind: Environment +metadata: + name: default + namespace: default +spec: + dimensions: + region: us-east-1 +``` + +You can specify whichever dimensions as you want. The example below demonstrates +multiple dimensions: + +```yaml +apiVersion: scheduling.upbound.io/v1alpha1 +kind: Environment +metadata: + name: default + namespace: default +spec: + dimensions: + region: us-east-1 + env: prod + offering: databases +``` + +In order for the routing controller to match, _all_ dimensions must match for a +given service control plane. + +You can specify dimension overrides on a per-resource group basis. This lets you +configure default routing rules for a given _Environment_ and override routing +on a per-offering basis. + +```yaml +apiVersion: scheduling.upbound.io/v1alpha1 +kind: Environment +metadata: + name: default + namespace: default +spec: + dimensions: + region: us-east-1 + resourceGroups: + - name: database.platform.upbound.io # database + dimensions: + region: "us-east-1" + env: "prod" + offering: "databases" + - name: networking.platform.upbound.io # networks + dimensions: + region: "us-east-1" + env: "prod" + offering: "networks" +``` + +### Confirm the configured route + +After you create an _Environment_ on a control plane, the routes selected get +reported in the _Environment's_ `.status.resourceGroups`. This is illustrated +below: + +```yaml +apiVersion: scheduling.upbound.io/v1alpha1 +kind: Environment +metadata: + name: default +... +status: + resourceGroups: + - name: database.platform.upbound.io # database + proposed: + controlPlane: ctp-1 + group: default + space: upbound-gcp-us-central1 + dimensions: + region: "us-east-1" + env: "prod" + offering: "databases" +``` + +If you don't see a response in the `.status.resourceGroups`, this indicates a +match wasn't found or an error establishing routing occurred. + +:::tip +There's no limit to the number of control planes you can route to. You can also +stack routing and form your own topology of control planes, with multiple layers +of routing. +::: + +### Limitations + + +Routing from one control plane to another is currently scoped to control planes +that exist in a single Space. You can't route resource requests to control +planes that exist on a cross-Space boundary. + + +[project-file]: /manuals/cli/howtos/project +[contact-us]: https://www.upbound.io/usage/support/contact +[crossplane-management-policies]: https://docs.crossplane.io/latest/managed-resources/managed-resources/#managementpolicies +[vscode-pretty-json]: https://marketplace.visualstudio.com/items?itemName=chrismeyers.vscode-pretty-json +[configure-routing]: #use-an-environment-to-route-resources +[configuration]: https://docs.crossplane.io/latest/packages/providers diff --git a/spaces_versioned_docs/version-v1.14/howtos/ctp-connector.md b/spaces_versioned_docs/version-v1.14/howtos/ctp-connector.md new file mode 100644 index 000000000..b2cc48c49 --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/ctp-connector.md @@ -0,0 +1,508 @@ +--- +title: Control Plane Connector +weight: 80 +description: A guide for how to connect a Kubernetes app cluster to a control plane in Upbound using the Control Plane connector feature +plan: "standard" +--- + + + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). Control Plane Connector functionality is consistent across versions. + +For related API specifications and resources, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +Control Plane Connector connects arbitrary Kubernetes application clusters outside the +Upbound Spaces to your control planes running in Upbound Spaces. +This lets you interact with your control plane's API from the app cluster. The claim APIs and the namespaced XR APIs +you define via CompositeResourceDefinitions (XRDs) in the control plane, are available in +your app cluster alongside Kubernetes workload APIs like Pod. Control Plane Connector +enables the same experience as a locally installed Crossplane. + +![control plane connector operations flow](/img/ConnectorFlow.png) + +### Control Plane Connector operations + +Control Plane Connector leverages the [Kubernetes API AggregationLayer][kubernetes-api-aggregationlayer] +to create an extension API server and serve the claim APIs and the namespaced XR APIs in the control plane. It +discovers the claim APIs and the namespaced XR APIs available in the control plane and registers corresponding +APIService resources on the app cluster. Those APIService resources refer to the +extension API server of Control Plane Connector. + +The claim APIs and the namespaced XR APIs are available in your Kubernetes cluster, just like all native +Kubernetes APIs. + +The Control Plane Connector processes every request targeting the claim APIs and the namespaced XR APIs and makes the +relevant requests to the connected control plane. + +Only the connected control plane stores and processes all claims and namespaced XRs created in the app +cluster, eliminating any storage use at the application cluster. The control plane +connector provisions a target namespace at the control plane for the app cluster and stores +all claims and namespaced XRs in this target namespace. + +For managing the claims and namespaced XRs, the Control Plane Connector creates a unique identifier for a +resource by combining input parameters from claims, including: +- `metadata.name` +- `metadata.namespace` +- `your cluster name` + + +It employs SHA-256 hashing to generate a hash value and then extracts the first +16 characters of that hash. This ensures the resulting identifier remains within +the 64-character limit in Kubernetes. + + + +For instance, if a claim named `my-bucket` exists in the test namespace in +`cluster-dev`, the system calculates the SHA-256 hash from +`my-bucket-x-test-x-00000000-0000-0000-0000-000000000000` and takes the first 16 +characters. The control plane side then names the claim `claim-c603e518969b413e`. + +For namespaced XRs, the process is similar, only the prefix is different. +The name becomes `nxr-c603e518969b413e`. + + +### Installation + + + + + +Log in with the up CLI: + +```bash +up login +``` + +Connect your app cluster to a namespace in an Upbound control plane with `up controlplane connector install `. This command creates a user token and installs the Control Plane Connector to your cluster. It's recommended you create a values file called `connector-values.yaml` and provide the following below. Select the tab according to which environment your control plane is running in. + + + + + + +```yaml +upbound: + # This is your org account in Upbound e.g. the name displayed after executing `up org list` + account: + # This is a personal access token generated in the Upbound Console + token: + +spaces: + # If your control plane is running in Upbound's GCP Cloud Space, else use upbound-aws-us-east-1.spaces.upbound.io + host: "upbound-gcp-us-west-1.spaces.upbound.io" + insecureSkipTLSVerify: true + controlPlane: + # The name of the control plane you want the Connector to attach to + name: + # The control plane group the control plane resides in + group: + # The namespace within the control plane to sync claims from the app cluster to. NOTE: This must be created before you install the connector. + claimNamespace: +``` + + + + + +1. Create a [kubeconfig][kubeconfig] for the control plane. Update your Upbound context to the path for your desired control plane. +```ini +up login +up ctx /upbound-gcp-us-central-1/default/your-control-plane +up ctx . -f - > context.yaml +``` + +2. Write it to a secret in the cluster where you plan to +install the Control Plane Connector to. +```ini +kubectl create secret generic my-controlplane-kubeconfig --from-file=context.yaml +``` + +3. Reference this secret in the +`spaces.controlPlane.kubeconfigSecret` field below. + +```yaml +spaces: + controlPlane: + # The namespace within the control plane to sync claims from the app cluster to. NOTE: This must be created before you install the connector. + claimNamespace: + kubeconfigSecret: + name: my-controlplane-kubeconfig + key: kubeconfig +``` + + + + + + +Provide the values file above when you run the CLI command: + + +```bash {copy-lines="3"} +up controlplane connector install my-control-plane my-app-ns-1 --file=connector-values.yaml +``` + +The Claim APIs and the namespaced XR APIs from your control plane are now visible in the cluster. +You can verify this with `kubectl api-resources`. + +```bash +kubectl api-resources +``` + +### Uninstall + +Disconnect an app cluster that you prior installed the Control Plane Connector on by +running the following: + +```bash +up ctp connector uninstall +``` + +This command uninstalls the helm chart for the Control Plane Connector from an app +cluster. It moves any claims in the app cluster into the control plane +at the specified namespace. + +:::tip +Make sure your kubeconfig's current context is pointed at the app cluster where +you want to uninstall Control Plane Connector from. +::: + + + + +It's recommended you create a values file called `connector-values.yaml` and +provide the following below. Select the tab according to which environment your +control plane is running in. + + + + + + +```yaml +upbound: + # This is your org account in Upbound e.g. the name displayed after executing `up org list` + account: + # This is a personal access token generated in the Upbound Console + token: + +spaces: + # Upbound GCP US-West-1 upbound-gcp-us-west-1.spaces.upbound.io + # Upbound AWS US-East-1 upbound-aws-us-east-1.spaces.upbound.io + # Upbound GCP US-Central-1 upbound-gcp-us-central-1.spaces.upbound.io + host: "" + insecureSkipTLSVerify: true + controlPlane: + # The name of the control plane you want the Connector to attach to + name: + # The control plane group the control plane resides in + group: + # The namespace within the control plane to sync claims from the app cluster to. + # NOTE: This must be created before you install the connector. + claimNamespace: +``` + + + + +Create a [kubeconfig][kubeconfig-1] for the +control plane. Write it to a secret in the cluster where you plan to +install the Control Plane Connector to. Reference this secret in the +`spaces.controlPlane.kubeconfigSecret` field below. + +```yaml +spaces: + controlPlane: + # The namespace within the control plane to sync claims from the app cluster to. NOTE: This must be created before you install the connector. + claimNamespace: + kubeconfigSecret: + name: my-controlplane-kubeconfig + key: kubeconfig +``` + + + + + + +Provide the values file above when you `helm install` the Control Plane Connector: + + +```bash +helm install --wait mcp-connector oci://xpkg.upbound.io/spaces-artifacts/mcp-connector -n kube-system -f connector-values.yaml +``` +:::tip +Create an API token from the Upbound user account settings page in the console by following [these instructions][these-instructions]. +::: + +### Uninstall + +You can uninstall Control Plane Connector with Helm by running the following: + +```bash +helm uninstall mcp-connector +``` + + + + + +### Example usage + +This example creates a control plane using [Configuration +EKS][configuration-eks]. `KubernetesCluster` is +available as a claim API in your control plane. The following is [an +example][an-example] +object you can create in your control plane. + +```yaml +apiVersion: k8s.starter.org/v1alpha1 +kind: KubernetesCluster +metadata: + name: my-cluster + namespace: default +spec: + id: my-cluster + parameters: + nodes: + count: 3 + size: small + services: + operators: + prometheus: + version: "34.5.1" + writeConnectionSecretToRef: + name: my-cluster-kubeconfig +``` + +After connecting your Kubernetes app cluster to the control plane, you +can create the `KubernetesCluster` object in your app cluster. Although your +local cluster has an Object, the actual resources is in your managed control +plane inside Upbound. + +```bash {copy-lines="3"} +# Applying the claim YAML above. +# kubectl is set up to talk with your Kubernetes cluster. +kubectl apply -f claim.yaml + + +kubectl get claim -A +NAME SYNCED READY CONNECTION-SECRET AGE +my-cluster True True my-cluster-kubeconfig 2m +``` + +Once Kubernetes creates the object, view the console to see your object. + +![Claim by connector in console](/img/ClaimInConsole.png) + +You can interact with the object through your cluster just as if it +lives in your cluster. + +### Migration to control planes + +This guide details the migration of a Crossplane installation to Upbound-managed +control planes using the Control Plane Connector to manage claims on an application +cluster. + +![migration flow application cluster to control plane](/img/ConnectorMigration.png) + +#### Export all resources + +Before proceeding, ensure that you have set the correct kubecontext for your application +cluster. + +```bash +up controlplane migration export --pause-before-export --output=my-export.tar.gz --yes +``` + +This command performs the following: +- Pauses all claim, composite, and managed resources before export. +- Scans the control plane for resource types. +- Exports Crossplane and native resources. +- Archives the exported state into `my-export.tar.gz`. + +Example output: +```bash +Exporting control plane state... + ✓ Pausing all claim resources before export... 1 resources paused! ⏸️ + ✓ Pausing all composite resources before export... 7 resources paused! ⏸️ + ✓ Pausing all managed resources before export... 34 resources paused! ⏸️ + ✓ Scanning control plane for types to export... 231 types found! 👀 + ✓ Exporting 231 Crossplane resources...125 resources exported! 📤 + ✓ Exporting 3 native resources...19 resources exported! 📤 ✓ Archiving exported state... archived to "my-export.tar.gz"! 📦 + +Successfully exported control plane state! +``` + +#### Import all resources + +The system restores the target control plane with the exported +resources, which serves as the destination for the Control Plane Connector. + + +Log into Upbound and select the correct context: + +```bash +up login +up ctx +up ctp create ctp-a +``` + +Output: +```bash +ctp-a created +``` + +Verify that the Crossplane version on both the application cluster and the new managed +control plane matches the core Crossplane version. + +Use the following command to import the resources: +```bash +up controlplane migration import -i my-export.tar.gz \ + --unpause-after-import \ + --mcp-connector-cluster-id=my-appcluster \ + --mcp-connector-claim-namespace=my-appcluster +``` + +This command: +- Note: `--mcp-connector-cluster-id` needs to be unique per application cluster +- Note: `--mcp-connector-claim-namespace` is the namespace the system creates + during the import +- Restores base resources +- Waits for XRDs and packages to establish +- Imports Claims, XRs resources +- Finalizes the import and resumes managed resources + +Example output: +```bash +Importing control plane state... + ✓ Reading state from the archive... Done! 👀 + ✓ Importing base resources... 56 resources imported!📥 + ✓ Waiting for XRDs... Established! ⏳ + ✓ Waiting for Packages... Installed and Healthy! ⏳ + ✓ Importing remaining resources... 88 resources imported! 📥 + ✓ Finalizing import... Done! 🎉 + ✓ Unpausing managed resources ... Done! ▶️ + +fully imported control plane state! +``` + +Verify Imported Claims + + +The Control Plane Connector renames all claims and adds additional labels to them. + +```bash +kubectl get claim -A +``` + +Example output: +```bash +NAMESPACE NAME SYNCED READY CONNECTION-SECRET AGE +my-appcluster cluster.aws.platformref.upbound.io/claim-e708ff592b974f51 True True platform-ref-aws-kubeconfig 3m17s +``` + +Inspect the labels: +```bash +kubectl get -n my-appcluster cluster.aws.platformref.upbound.io/claim-e708ff592b974f51 -o yaml | yq .metadata.labels +``` + +Example output: +```bash +mcp-connector.upbound.io/app-cluster: my-appcluster +mcp-connector.upbound.io/app-namespace: default +mcp-connector.upbound.io/app-resource-name: example +``` + +#### Cleanup the app cluster + +Remove all Crossplane-related resources from the application cluster, including: + +- Managed Resources +- Claims +- Compositions +- XRDs +- Packages (Functions, Configurations, Providers) +- Crossplane and all associated CRDs + + +#### Install Control Plane Connector + + +Follow the preceding installation guide and configure the `connector-values.yaml`: + +```yaml +# NOTE: clusterID needs to match --mcp-connector-cluster-id used in the import on the managed control Plane +clusterID: my-appcluster +upbound: + account: + token: + +spaces: + host: "" + insecureSkipTLSVerify: true + controlPlane: + name: + group: + # NOTE: This is the --mcp-connector-claim-namespace used during the import to the control plane + claimNamespace: +``` +Once the Control Plane Connector installs, verify that resources exist in the application +cluster: + +```bash +kubectl api-resources | grep platform +``` + +Example output: +```bash +awslbcontrollers aws.platform.upbound.io/v1alpha1 true AWSLBController +podidentities aws.platform.upbound.io/v1alpha1 true PodIdentity +sqlinstances aws.platform.upbound.io/v1alpha1 true SQLInstance +clusters aws.platformref.upbound.io/v1alpha1 true Cluster +osss observe.platform.upbound.io/v1alpha1 true Oss +apps platform.upbound.io/v1alpha1 true App +``` + +Restore claims from the control plane to the application cluster: + +```bash +kubectl get claim -A +``` + +Example output: +```bash +NAMESPACE NAME SYNCED READY CONNECTION-SECRET AGE +default cluster.aws.platformref.upbound.io/example True True platform-ref-aws-kubeconfig 127m +``` + +With this guide, you migrated your Crossplane installation to +Upbound-control planes. This ensures seamless integration with your +application cluster using the Control Plane Connector. + +### Connect multiple app clusters to a control plane + +Claims are store in a unique namespace in the Upbound control plane. +Every cluster creates a new control plane namespace. + +![Multi-cluster architecture with control plane connector](/img/ConnectorMulticlusterArch.png) + +There's no limit on the number of clusters connected to a single control plane. +Control plane operators can see all their infrastructure in a central control +plane. + +Without using control planes and Control Plane Connector, users have to install +Crossplane and providers for cluster. Each cluster requires configuration for +providers with necessary credentials. With a single control plane where multiple +clusters connected through Upbound tokens, you don't need to give out any cloud +credentials to the clusters. + + +[kubeconfig]: /manuals/cli/howtos/context-config/#generate-a-kubeconfig-for-a-control-plane-in-a-group +[kubeconfig-1]:/spaces/concepts/control-planes/#connect-directly-to-your-control-plane +[these-instructions]:/manuals/console/#create-a-personal-access-token +[kubernetes-api-aggregationlayer]: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/ +[configuration-eks]: https://github.com/upbound/configuration-eks +[an-example]: https://github.com/upbound/configuration-eks/blob/9f86b6d/.up/examples/cluster.yaml diff --git a/spaces_versioned_docs/version-v1.14/howtos/debugging-a-ctp.md b/spaces_versioned_docs/version-v1.14/howtos/debugging-a-ctp.md new file mode 100644 index 000000000..521271e40 --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/debugging-a-ctp.md @@ -0,0 +1,128 @@ +--- +title: Debugging issues on a control plane +sidebar_position: 70 +description: A guide for how to debug resources on a control plane running in Upbound. +--- + +This guide provides troubleshooting guidance for how to identify and fix issues on a control plane. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). Debugging methods are consistent across versions. + +For related control plane specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version-specific features, see the . +::: + +## Start from Upbound Console + + +The Upbound [Console][console] has a built-in control plane explorer experience +that surfaces status and events for the resources on your control plane. The +explorer is claim-based. Resources in this view exist only if they exist in the +reference chain originating from a claim. This view is a helpful starting point +if you are attempting to debug an issue originating from a claim. + +:::tip +If you directly create Crossplane Managed Resources (`MR`s) or Composite +Resources (`XR`s), they won't render in the explorer. +::: + +### Example + +The example below uses the control plane explorer view to inspect why a claim for an EKS Cluster isn't healthy. + +#### Check the health status of claims + +From the API type card, two claims branching from of it: one shows a healthy green icon, while the other shows an unhealthy red icon. + +![Use control plane explorer view to see status of claims](/img/debug-overview.png) + +Select `More details` on the unhealthy claim card and Upbound shows details for the claim. + +![Use control plane explorer view to see details of claims](/img/debug-claim-more-details.png) + +Looking at the three events for this claim: + +- **ConfigureCompositeResource**: this event indicates Upbound created the claimed Composite Resource (`XR`). + +- **BindCompositeResource**: this indicates the Composite Resource (`XR`) that's being "claimed" isn't ready yet. A claim doesn't show `HEALTHY` until the XR it references is ready. + +- **ConfigureCompositeResource**: the error saying, `cannot apply composite resource...the object has been modified; please apply your changes to the latest version and try again` is a generic event from Crossplane resources. It's safe to ignore this error. + +Next, look at the `status` field of the rendered YAML for the resource. + +![Use control plane explorer view to see status details of claims](/img/debug-claim-status.png) + +The status reports a similar message as the event stream: this claim is waiting for a Composite Resource to be ready. Based on this, investigate the Composite Resource referenced by this claim next. + +#### Check the health status of the Composite Resource + + +The control plane explorer only shows the claim cards by default. Selecting the claim card renders the rest of the Crossplane resource tree associated with the selected claim. + + +The previous claim expands into this screenshot: + +![Use control plane explorer view to expand tree of claim](/img/debug-claim-expansion.png) + +This renders the XR referenced by the claim (along with all its references). You can see the XR is showing the same unhealthy status icon in its card. Notice the XR has itself two nested XRs. One of the nested XRs shows a healthy green icon on its card, while the other shows an unhealthy red icon. Like the claim, a Composite Resource doesn't show healthy until all referenced resources also show healthy. + +#### Inspecting Managed Resources + +Select `more details` to inspect one of the unhealthy Managed Resources shows the following: + +![Use control plane explorer view to view events for an MR](/img/debug-mr-event.png) + +This event reveals it's unhealthy because it's waiting on a reference to another Managed Resource. Searching the rendered YAML of the MR for this resource shows the following: + +![Use control plane explorer view to view status for an MR](/img/debug-mr-status.png) + +The rendered YAML shows this MR is referencing a sibling MR that shares the same controller. The same parent XR created both of these managed resources. Inspect the sibling MR to see what its status is. + +![Use control plane explorer view to view status for a sibling MR](/img/debug-mr-dependency-status.png) + +The sibling MR event stream shows the Provider processed the resource create request. Ignore the `CannotInitalizeManagedResrouce` event. EKS clusters can take 15 minutes or more to provision in AWS. The root cause is everything is fine -- all the resources are still provisioning. Waiting longer and then looking at the control plane explorer again, shows all resources are healthy. reference, below is an example status field for a resource that's healthy and provisioned. + +```yaml +... +status: + atProvider: + id: team-b-app-cluster-bhwfb-hwtgs-20230403135452772300000008 + conditions: + - lastTransitionTime: '2023-04-03T13:56:35Z' + reason: Available + status: 'True' + type: Ready + - lastTransitionTime: '2023-04-03T13:54:02Z' + reason: ReconcileSuccess + status: 'True' + type: Synced + - lastTransitionTime: '2023-04-03T13:54:53Z' + reason: Success + status: 'True' + type: LastAsyncOperation + - lastTransitionTime: '2023-04-03T13:54:53Z' + reason: Finished + status: 'True' + type: AsyncOperation +``` + +### Control plane explorer limitations + +The control plane explorer view is currently designed around claims (`XC`s). The control plane explorer doesn't inspect other Crossplane resources. To inspect other Crossplane resources, use the `up` CLI. + +Some examples of Crossplane resources that require the `up` CLI + +- Managed Resources that aren't associated with a claim +- Composite Resources that aren't associated with a claim +- The status of _deleting_ resources +- ProviderConfigs +- Provider events + +## Use direct CLI access + +If your preference is to use a terminal instead of a GUI, Upbound supports direct access to the API server of the control plane. Use [`up ctx`][up-ctx] to connect directly to your control plane. + + +[console]: /manuals/console/upbound-console +[up-ctx]: /reference/cli-reference diff --git a/spaces_versioned_docs/version-v1.14/howtos/managed-service.md b/spaces_versioned_docs/version-v1.14/howtos/managed-service.md new file mode 100644 index 000000000..40b983a76 --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/managed-service.md @@ -0,0 +1,23 @@ +--- +title: Managed Upbound control planes +description: "Learn about the managed service capabilities of a Space" +sidebar_position: 10 +--- + +Control planes in Upbound are fully isolated [Upbound Crossplane][uxp] instances +that Upbound manages for you. This means: + +- the underlying lifecycle of infrastructure (compute, memory, and storage) required to power your instance. +- scaling of the infrastructure. +- the maintenance of the core Upbound Crossplane components that make up a control plane. + +This lets users focus on building their APIs and operating their control planes, +while Upbound handles the rest. Each control plane has its own dedicated API +server connecting users to their control plane. + +## Learn about Upbound control planes + +Read the [concept][ctp-concept] documentation to learn about Upbound control planes. + +[uxp]: /manuals/uxp/overview +[ctp-concept]: /spaces/concepts/control-planes \ No newline at end of file diff --git a/spaces_versioned_docs/version-v1.14/howtos/mcp-connector-guide.md b/spaces_versioned_docs/version-v1.14/howtos/mcp-connector-guide.md new file mode 100644 index 000000000..8a3866d07 --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/mcp-connector-guide.md @@ -0,0 +1,169 @@ +--- +title: Consume control plane APIs in an app cluster with control plane connector +sidebar_position: 99 +description: A tutorial to configure a Space with Argo to declaratively create and + manage control planes +--- + +In this tutorial, you learn how to configure a Kubernetes app cluster to communicate with a control plane in an Upbound self-hosted Space. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Control plane connector functionality is consistent across versions. + +For related API specifications and resources, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +The [control plane connector][control-plane-connector] bridges your Kubernetes application clusters---running outside of Upbound--to your control planes running in Upbound. This allows you to interact with your control plane's API right from the app cluster. The claim APIs you define via `CompositeResourceDefinitions` are available alongside Kubernetes workload APIs like `Pod`. In effect, control plane connector provides the same experience as a locally installed Crossplane. + +## Prerequisites + +To complete this tutorial, you need the following: + +- Have already deployed an Upbound Space. +- Have already deployed an Kubernetes cluster (referred to as `app cluster`). + +## Create a control plane + +Create a new control plane in your self-hosted Space. Run the following command in a terminal: + +```bash +up ctp create my-control-plane +``` + +Once the control plane is ready, connect to it. + +```bash +up ctp connect my-control-plane +``` + +For convenience, install a an Upbound [platform reference Configuration][platform-reference-configuration] from the marketplace. production scenarios, replace this with your own Crossplane Configurations or compositions. + +```bash +up ctp configuration install xpkg.upbound.io/upbound/platform-ref-aws:v1.4.0 +``` + +## Fetch the control plane's connection details + +Run the following command in a terminal: + +```shell +kubectl get secret kubeconfig-my-control-plane -n default -o jsonpath='{.data.kubeconfig}' | base64 -d > kubeconfig-my-control-plane.yaml +``` + +This command saves the kubeconfig for the control plane to a file in your working directory. + +## Install control plane connector in your app cluster + +Switch contexts to your Kubernetes app cluster. To install the control plane connector in your app cluster, you must first provide a secret containing your control plane's kubeconfig at install-time. Run the following command in a terminal: + +:::important +Make sure the following commands are executed against your **app cluster**, not your control plane. +::: + +```bash +kubectl create secret generic kubeconfig-my-control-plane -n kube-system --from-file=kubeconfig=./kubeconfig-my-control-plane.yaml +``` + +Set the environment variable below to configure which namespace _in your control plane_ you wish to sync the app cluster's claims to. + +```shell +export CONNECTOR_CTP_NAMESPACE=app-cluster-1 +``` + +Install the Control Plane Connector in the app cluster and point it to your control plane. + +```bash +up ctp connector install my-control-plane $CONNECTOR_CTP_NAMESPACE --control-plane-secret=kubeconfig-my-control-plane +``` + +## Inspect your app cluster + +After you install Control Plane Connector in the app cluster, you can now see APIs which live on the control plane. You can confirm this is the case by running the following command on your app cluster: + +```bash {copy-lines="1"} +kubectl api-resources | grep upbound + +# The output should look like this: +sqlinstances aws.platform.upbound.io/v1alpha1 true SQLInstance +clusters aws.platformref.upbound.io/v1alpha1 true Cluster +osss observe.platform.upbound.io/v1alpha1 true Oss +apps platform.upbound.io/v1alpha1 true App +``` + +## Claim a database instance on your app cluster + +Create a database claim against the `SQLInstance` API and observe resources get created by your control plane. Apply the following resources to your app cluster: + +```yaml +cat < --output + ``` + + The command exports your existing Crossplane control plane configuration/state into an archive file. + +::: note +By default, the export command doesn't make any changes to your existing Crossplane control plane state, leaving it intact. Use the `--pause-before-export` flag to pause the reconciliation on managed resources before exporting the archive file. + +This safety mechanism ensures the control plane you migrate state to doesn't assume ownership of resources before you're ready. +::: + +2. Use the control plane [create command][create-command] to create a managed +control plane in Upbound: + + ```bash + up controlplane create my-controlplane + ``` + +3. Use [`up ctx`][up-ctx] to connect to the control plane created in the previous step: + + ```bash + up ctx "///my-controlplane" + ``` + + The command configures your local `kubeconfig` to connect to the control plane. + +4. Run the following command to import the archive file into the control plane: + + ```bash + up controlplane migration import --input + ``` + +:::note +By default, the import command leaves the control plane in an inactive state by pausing the reconciliation on managed +resources. This pause gives you an opportunity to review the imported configuration/state before activating the control plane. +Use the `--unpause-after-import` flag to change the default behavior and activate the control plane immediately after +importing the archive file. +::: + + + +5. Review and validate the imported configuration/state. When you are ready, activate your managed + control plane by running the following command: + + ```bash + kubectl annotate managed --all crossplane.io/paused- + ``` + + At this point, you can delete the source Crossplane control plane. + +## CLI options + +### Filtering + +The migration tool captures the state of a Control Plane. The only filtering +supported is Kubernetes namespace and Kubernetes resource Type filtering. + +You can exclude namespaces using the `--exclude-namespaces` CLI option. This can prevent the CLI from including unwanted resources in the export. + +```bash +--exclude-namespaces=kube-system,kube-public,kube-node-lease,local-path-storage,... + +# A list of specific namespaces to exclude from the export. Defaults to 'kube-system', 'kube-public','kube-node-lease', and 'local-path-storage'. +``` + +You can exclude Kubernetes Resource types by using the `--exclude-resources` CLI option: + +```bash +--exclude-resources=EXCLUDE-RESOURCES,... + +# A list of resource types to exclude from the export in "resource.group" format. No resources are excluded by default. +``` + +For example, here's an example for excluding the CRDs installed by Crossplane functions (since they're not needed): + +```bash +up controlplane migration export \ + --exclude-resources=gotemplates.gotemplating.fn.crossplane.io,kclinputs.template.fn.crossplane.io +``` + +:::warning +You must specify resource names in lowercase "resource.group" format (for example, `gotemplates.gotemplating.fn.crossplane.io`). Using only the resource kind (for example, `GoTemplate`) isn't supported. +::: + + +:::tip Function Input CRDs + +Exclude function input CRDs (`inputs.template.fn.crossplane.io`, `resources.pt.fn.crossplane.io`, `gotemplates.gotemplating.fn.crossplane.io`, `kclinputs.template.fn.crossplane.io`) from migration exports. Upbound automatically recreates these resources during import. Function input CRDs typically have owner references to function packages and may have restricted RBAC access. Upbound installs these CRDs during the import when function packages are restored. + +::: + + +After export, users can also change the archive file to only include necessary resources. + +### Export non-Crossplane resources + +Use the `--include-extra-resources=` CLI option to select other CRD types to include in the export. + +### Set the kubecontext + +Currently `--context` isn't supported in the migration CLI. You should be able to use the `--kubeconfig` CLI option to use a file that's set to the correct context. example: + +```bash +up controlplane migration export --kubeconfig +``` + +Use this in tandem with `up ctx` to export a control plane's kubeconfig: + +```bash +up ctx --kubeconfig ~/.kube/config + +# To list the current contet +up ctx . --kubeconfig ~/.kube/config +``` + +## Export archive + +The migration CLI exports an archive upon successful completion. Below is an example export of a control plane that excludes several CRD types and skips the confirmation prompt. A file gets written to the working directory, unless you select another output file: + +
+ +View the example export + +```bash +$ up controlplane migration export --exclude-resources=gotemplates.gotemplating.fn.crossplane.io,kclinputs.template.fn.crossplane.io --yes +Exporting control plane state... +✓ Scanning control plane for types to export... 121 types found! 👀 +✓ Exporting 121 Crossplane resources...60 resources exported! 📤 +✓ Exporting 3 native resources...8 resources exported! 📤 +✓ Archiving exported state... archived to "xp-state.tar.gz"! 📦 +``` + +
+ + +When an export occurs, a file named `xp-state.tar.gz` by default gets created in the working directory. You can unzip the file and all the contents of the export are all text YAML files. + +- Each CRD (for example `vpcs.ec2.aws.upbound.io`) gets its own directory +which contains: + - A `metadata.yaml` file that contains Kubernetes Object Metadata + - A list of Kubernetes Categories the resource belongs to +- A `cluster` directory that contains YAML manifests for all resources provisioned +using the CRD. + +Sample contents for a Cluster with a single `XNetwork` Composite from +[configuration-aws-network][configuration-aws-network] is show below: + + +
+ +View the example cluster content + +```bash +├── compositionrevisions.apiextensions.crossplane.io +│ ├── cluster +│ │ ├── kcl.xnetworks.aws.platform.upbound.io-4ca6a8a.yaml +│ │ └── xnetworks.aws.platform.upbound.io-9859a34.yaml +│ └── metadata.yaml +├── configurations.pkg.crossplane.io +│ ├── cluster +│ │ └── configuration-aws-network.yaml +│ └── metadata.yaml +├── deploymentruntimeconfigs.pkg.crossplane.io +│ ├── cluster +│ │ └── default.yaml +│ └── metadata.yaml +├── export.yaml +├── functions.pkg.crossplane.io +│ ├── cluster +│ │ ├── crossplane-contrib-function-auto-ready.yaml +│ │ ├── crossplane-contrib-function-go-templating.yaml +│ │ └── crossplane-contrib-function-kcl.yaml +│ └── metadata.yaml +├── internetgateways.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-xgl4q.yaml +│ └── metadata.yaml +├── mainroutetableassociations.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-t2qh7.yaml +│ └── metadata.yaml +├── namespaces +│ └── cluster +│ ├── crossplane-system.yaml +│ ├── default.yaml +│ └── upbound-system.yaml +├── providerconfigs.aws.upbound.io +│ ├── cluster +│ │ └── default.yaml +│ └── metadata.yaml +├── providerconfigusages.aws.upbound.io +│ ├── cluster +│ │ ├── 0a2a3ec6-ef13-45f9-9cf0-63af7f4a6b6b.yaml +...redacted +│ │ └── f7092b0f-3a78-4bfe-82c8-57e5085a9b11.yaml +│ └── metadata.yaml +├── providers.pkg.crossplane.io +│ ├── cluster +│ │ ├── upbound-provider-aws-ec2.yaml +│ │ └── upbound-provider-family-aws.yaml +│ └── metadata.yaml +├── routes.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-dt9cj.yaml +│ └── metadata.yaml +├── routetableassociations.ec2.aws.upbound.io +│ ├── cluster +│ │ ├── borrelli-backup-test-mr2sd.yaml +│ │ ├── borrelli-backup-test-ngq5h.yaml +│ │ ├── borrelli-backup-test-nrkgg.yaml +│ │ └── borrelli-backup-test-wq752.yaml +│ └── metadata.yaml +├── routetables.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-dv4mb.yaml +│ └── metadata.yaml +├── secrets +│ └── namespaces +│ ├── crossplane-system +│ │ ├── cert-token-signing-gateway-pub.yaml +│ │ ├── mxp-hostcluster-certs.yaml +│ │ ├── package-pull-secret.yaml +│ │ └── xgql-tls.yaml +│ └── upbound-system +│ └── aws-creds.yaml +├── securitygrouprules.ec2.aws.upbound.io +│ ├── cluster +│ │ ├── borrelli-backup-test-472f4.yaml +│ │ └── borrelli-backup-test-qftmw.yaml +│ └── metadata.yaml +├── securitygroups.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-w5jch.yaml +│ └── metadata.yaml +├── storeconfigs.secrets.crossplane.io +│ ├── cluster +│ │ └── default.yaml +│ └── metadata.yaml +├── subnets.ec2.aws.upbound.io +│ ├── cluster +│ │ ├── borrelli-backup-test-8btj6.yaml +│ │ ├── borrelli-backup-test-gbmrm.yaml +│ │ ├── borrelli-backup-test-m7kh7.yaml +│ │ └── borrelli-backup-test-nttt5.yaml +│ └── metadata.yaml +├── vpcs.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-7hwgh.yaml +│ └── metadata.yaml +└── xnetworks.aws.platform.upbound.io +├── cluster +│ └── borrelli-backup-test.yaml +└── metadata.yaml +43 directories, 87 files +``` + +
+ + +The `export.yaml` file contains metadata about the export, including the configuration of the export, Crossplane information, and what's included in the export bundle. + +
+ +View the export + +```yaml +version: v1alpha1 +exportedAt: 2025-01-06T17:39:53.173222Z +options: + excludedNamespaces: + - kube-system + - kube-public + - kube-node-lease + - local-path-storage + includedResources: + - namespaces + - configmaps + - secrets + excludedResources: + - gotemplates.gotemplating.fn.crossplane.io + - kclinputs.template.fn.crossplane.io +crossplane: + distribution: universal-crossplane + namespace: crossplane-system + version: 1.17.3-up.1 + featureFlags: + - --enable-provider-identity + - --enable-environment-configs + - --enable-composition-functions + - --enable-usages +stats: + total: 68 + nativeResources: + configmaps: 0 + namespaces: 3 + secrets: 5 + customResources: + amicopies.ec2.aws.upbound.io: 0 + amilaunchpermissions.ec2.aws.upbound.io: 0 + amis.ec2.aws.upbound.io: 0 + availabilityzonegroups.ec2.aws.upbound.io: 0 + capacityreservations.ec2.aws.upbound.io: 0 + carriergateways.ec2.aws.upbound.io: 0 + compositeresourcedefinitions.apiextensions.crossplane.io: 0 + compositionrevisions.apiextensions.crossplane.io: 2 + compositions.apiextensions.crossplane.io: 0 + configurationrevisions.pkg.crossplane.io: 0 + configurations.pkg.crossplane.io: 1 +...redacted +``` + +
+ +### Skipped resources + +Along with to the resources excluded via CLI options, the following resources aren't +included in the backup: + +- The `kube-root-ca.crt` ConfigMap, since this is cluster-specific +- Resources directly managed via Helm (ArgoCD's helm implementation, which templates +Helm resources and then applies them, get included in the backup). The migration creates the exclusion list by looking for: + - Any Resource with the label `"app.kubernetes.io/managed-by" == "Helm"` + - Kubernetes Secrets with the label prefix `helm.sh/release`. example, `helm.sh/release.v1` +- Resources installed via a Crossplane package. These have an `ownerReference` with +a prefix `pkg.crossplane.io`. The expectation is that during import, the Crossplane Package Manager bears responsibility for installing the resources. +- Crossplane Locks: Any `Lock.pkg.crossplane.io` resource isn't included in the +export. + +## Restore + +The following is an example of a successful import run. At the end of the import, all Managed Resources are in a paused state. + +
+ +View the migration import + +```bash +$ up controlplane migration import +Importing control plane state... +✓ Reading state from the archive... Done! 👀 +✓ Importing base resources... 18 resources imported! 📥 +✓ Waiting for XRDs... Established! ⏳ +✓ Waiting for Packages... Installed and Healthy! ⏳ +✓ Importing remaining resources... 50 resources imported! 📥 +✓ Finalizing import... Done! 🎉 +``` + +
+ +Your scenario may involve migrating resources which already exist through other automation on the platform. When executing an import in these circumstances, the importer applies the new manifests to the cluster. If the resource already exists, the restore sets fields to what's in the backup. + +The importer restores all resources in the export archive. Managed Resources get imported with the `crossplane.io/paused: "true"` annotation set. Use the `--unpause-after-import` CLI argument to automatically un-pause resources that got +paused during backup, or remove the annotation manually. + +### Restore order + +The importer restores based on Kubernetes types. The restore order doesn't include parent/child relationships. + +Because Crossplane Composites create new Managed Resources if not present on the cluster, all +Claims, Composites and Managed Resources get imported in a paused state. You can un-pause them after the restore completes. + +The first step of import is installing Base Resources into the cluster. These resources (such has +packages and XRDs) must be ready before proceeding with the import. +Base Resources are: + +- Kubernetes Resources + - ConfigMaps + - Namespaces + - Secrets +- Crossplane Resources + - ControllerConfigs: `controllerconfigs.pkg.crossplane.io` + - DeploymentRuntimeConfigs: `deploymentruntimeconfigs.pkg.crossplane.io` + - StoreConfigs: `storeconfigs.secrets.crossplane.io` +- Crossplane Packages + - Providers: `providers.pkg.crossplane.io` + - Functions: `functions.pkg.crossplane.io` + - Configurations: `configurations.pkg.crossplane.io` + +Restore waits for the base resources to be `Ready` before moving on to the next step. Next, restore walks through the archive and restores all the manifests present. + +During import, the `crossplane.io/paused` annotation gets added to Managed Resources, Claims +and Composites. + +To manually un-pause managed resources after an import, remove the annotation by running: + +```bash +kubectl annotate managed --all crossplane.io/paused- +``` + +You can also run import again with the `--unpause-after-import` flag to remove the annotations. + +```bash +up controlplane migration import --unpause-after-import +``` + +### Restoring resource status + +The importer applies the status of all resources during import. The importer determines if the CRD version has a status field defined based on the stored CRD version. + + +[cli-command]: /reference/cli-reference +[up-cli]: /reference/cli-reference +[up-cli-1]: /manuals/cli/overview +[create-command]: /reference/cli-reference +[up-ctx]: /reference/cli-reference +[configuration-aws-network]: https://marketplace.upbound.io/configurations/upbound/configuration-aws-network diff --git a/spaces_versioned_docs/version-v1.14/howtos/observability.md b/spaces_versioned_docs/version-v1.14/howtos/observability.md new file mode 100644 index 000000000..8fc5c3278 --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/observability.md @@ -0,0 +1,395 @@ +--- +title: Observability +sidebar_position: 50 +description: A guide for how to use the integrated observability pipeline feature + in a Space. +plan: "enterprise" +--- + + + +This guide explains how to configure observability in Upbound Spaces. Upbound +provides integrated observability features built on +[OpenTelemetry][opentelemetry] to collect, process, and export logs, metrics, +and traces. + +Upbound Spaces offers two levels of observability: + +1. **Space-level observability** - Observes the cluster infrastructure where Spaces software is installed (Self-Hosted only) +2. **Control plane observability** - Observes workloads running within individual control planes + + + + + +:::info API Version Information & Version Selector +This guide applies to all supported versions (v1.9-v1.15+). Observability features evolved: + +- **v1.9-v1.12**: SharedTelemetryConfig available (alpha until v1.11) +- **v1.11+**: Observability promoted to stable with logs export support +- **v1.14+**: Both space-level and control-plane observability GA + +**View API Reference for Your Version**: +| Version | Status | Link | +|---------|--------|------| +| v1.15 | Latest | [Spaces API v1.15](../../../next/reference/apis/spaces-api/) | +| v1.14 | Supported | [Spaces API v1.14](../../../1.14/reference/apis/spaces-api/) | +| v1.13 | Supported | [Spaces API v1.13](../../../1.13/reference/apis/spaces-api/) | +| v1.12 | Supported | [Spaces API v1.12](../../../1.12/reference/apis/spaces-api/) | +| v1.11 | Supported | [Spaces API v1.11](../../../1.11/reference/apis/spaces-api/) | +| v1.10 | Limited | [Spaces API v1.10](../../../1.10/reference/apis/spaces-api/) | +| v1.9 | EOL | [Spaces API v1.9](../../../1.9/reference/apis/spaces-api/) | + +For version support policy and feature availability, see and . +::: + +:::important +**Space-level observability** (available since v1.6.0, GA in v1.14.0): +- Disabled by default +- Requires manual enablement and configuration +- Self-Hosted Spaces only + +**Control plane observability** (available since v1.13.0, GA in v1.14.0): +- Enabled by default +- No additional configuration required +::: + + + + +## Prerequisites + + +**Control plane observability** is enabled by default. No additional setup is +required. + + + +### Self-hosted Spaces + +1. **Enable the observability feature** when installing Spaces: + ```bash + up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "observability.enabled=true" + ``` + +Set `features.alpha.observability.enabled=true` instead if using Spaces version +before `v1.14.0`. + +2. **Install OpenTelemetry Operator** (required for Space-level observability): + ```bash + kubectl apply -f https://github.com/open-telemetry/opentelemetry-operator/releases/download/v0.116.0/opentelemetry-operator.yaml + ``` + + :::important + If running Spaces `v1.11` or later, use OpenTelemetry Operator `v0.110.0` or later due to breaking changes. + ::: + + +## Space-level Observability + +Space-level observability is only available for self-hosted Spaces and allows +administrators to observe the cluster infrastructure. + +### Configuration + +Configure Space-level observability using the `spacesCollector` value in your +Spaces Helm chart: + +```yaml +observability: + spacesCollector: + config: + exporters: + otlphttp: + endpoint: "" + headers: + api-key: YOUR_API_KEY + exportPipeline: + logs: + - otlphttp + metrics: + - otlphttp +``` + +This configuration exports metrics and logs from: + +- Crossplane installation +- Spaces infrastructure (controller, API, router, etc.) + +### Router metrics + +The Spaces router uses Envoy as a reverse proxy and automatically exposes +metrics when you enable Space-level observability. These metrics provide +visibility into: + +- Traffic routing to control planes and services +- Request status codes, timeouts, and retries +- Circuit breaker state preventing cascading failures +- Client connection patterns and request volume +- Request latency (P50, P95, P99) + +For more information about available metrics, example queries, and how to enable +this feature, see the [Space-level observability guide][space-level-o11y]. + +## Control plane observability + +Control plane observability collects telemetry data from workloads running +within individual control planes using `SharedTelemetryConfig` resources. + +The pipeline deploys [OpenTelemetry Collectors][opentelemetry-collectors] per +control plane, defined by a `SharedTelemetryConfig` at the group level. +Collectors pass data to external observability backends. + +:::important +From Spaces `v1.13` and beyond, telemetry only includes user-facing control +plane workloads (Crossplane, providers, functions). + +Self-hosted users can include system workloads (`api-server`, `etcd`) by setting +`observability.collectors.includeSystemTelemetry=true` in Helm. +::: + +:::important +Spaces validates `SharedTelemetryConfig` resources before applying them by +sending telemetry to configured exporters. self-hosted Spaces, ensure that +`spaces-controller` can reach the exporter endpoints. +::: + +### `SharedTelemetryConfig` + +`SharedTelemetryConfig` is a group-scoped custom resource that defines telemetry +configuration for control planes. + +#### New Relic example + +```yaml +apiVersion: observability.spaces.upbound.io/v1alpha1 +kind: SharedTelemetryConfig +metadata: + name: newrelic + namespace: default +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + org: foo + exporters: + otlphttp: + endpoint: https://otlp.nr-data.net + headers: + api-key: YOUR_API_KEY + exportPipeline: + metrics: [otlphttp] + traces: [otlphttp] + logs: [otlphttp] +``` + +#### Datadog Example + +```yaml +apiVersion: observability.spaces.upbound.io/v1alpha1 +kind: SharedTelemetryConfig +metadata: + name: datadog + namespace: default +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + org: foo + exporters: + datadog: + api: + site: ${DATADOG_SITE} + key: ${DATADOG_API_KEY} + exportPipeline: + metrics: [datadog] + traces: [datadog] + logs: [datadog] +``` + +### Control plane selection + +Use `spec.controlPlaneSelector` to specify which control planes should use the +telemetry configuration. + +#### Label-based selection + +```yaml +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +#### Expression-based selection + +```yaml +spec: + controlPlaneSelector: + labelSelectors: + - matchExpressions: + - { key: environment, operator: In, values: [production,staging] } +``` + +#### Name-based selection + +```yaml +spec: + controlPlaneSelector: + names: + - controlplane-dev + - controlplane-staging + - controlplane-prod +``` + +### Manage sensitive data + +:::important +Available from Spaces `v1.10` +::: + +Store sensitive data in Kubernetes secrets and reference them in your +`SharedTelemetryConfig`: + +1. **Create the secret:** + ```bash + kubectl create secret generic sensitive -n \ + --from-literal=apiKey='YOUR_API_KEY' + ``` + +2. **Reference in SharedTelemetryConfig:** + ```yaml + apiVersion: observability.spaces.upbound.io/v1alpha1 + kind: SharedTelemetryConfig + metadata: + name: newrelic + spec: + configPatchSecretRefs: + - name: sensitive + key: apiKey + path: exporters.otlphttp.headers.api-key + controlPlaneSelector: + labelSelectors: + - matchLabels: + org: foo + exporters: + otlphttp: + endpoint: https://otlp.nr-data.net + headers: + api-key: dummy # Replaced by secret value + exportPipeline: + metrics: [otlphttp] + traces: [otlphttp] + logs: [otlphttp] + ``` + +### Telemetry processing + +:::important +Available from Spaces `v1.11` +::: + +Configure processing pipelines to transform telemetry data using the [transform +processor][transform-processor]. + +#### Add labels to metrics + +```yaml +spec: + processors: + transform: + error_mode: ignore + metric_statements: + - context: datapoint + statements: + - set(attributes["newLabel"], "someLabel") + processorPipeline: + metrics: [transform] +``` + +#### Remove labels + +From metrics: +```yaml +processors: + transform: + metric_statements: + - context: datapoint + statements: + - delete_key(attributes, "kubernetes_namespace") +``` + +From logs: +```yaml +processors: + transform: + log_statements: + - context: log + statements: + - delete_key(attributes, "log.file.name") +``` + +#### Modify log messages + +```yaml +processors: + transform: + log_statements: + - context: log + statements: + - set(attributes["original"], body) + - set(body, Concat(["log message:", body], " ")) +``` + +### Monitor status + +Check the status of your `SharedTelemetryConfig`: + +```bash +kubectl get stc +NAME SELECTED FAILED PROVISIONED AGE +datadog 1 0 1 63s +``` + +- `SELECTED`: Number of control planes selected +- `FAILED`: Number of control planes that failed provisioning +- `PROVISIONED`: Number of successfully running collectors + +For detailed status information: + +```bash +kubectl describe stc +``` + +## Supported exporters + +Both Space-level and control plane observability support: +- `datadog` -. Datadog integration +- `otlphttp` - General-purpose exporter (used by New Relic, among others) +- `debug` -. troubleshooting + +## Considerations + +- **Control plane conflicts**: Each control plane can only use one `SharedTelemetryConfig`. Multiple configs selecting the same control plane conflict. +- **Custom collector image**: Both Space-level and control plane observability use the same custom OpenTelemetry Collector image with supported exporters. +- **Resource scope**: `SharedTelemetryConfig` resources are group-scoped, allowing different telemetry configurations per group. + +For more advanced configuration options, review the [Helm chart +reference][helm-chart-reference] and [OpenTelemetry Transformation Language +documentation][opentelemetry-transformation-language]. + + +[opentelemetry]: https://opentelemetry.io/ +[opentelemetry-collectors]: https://opentelemetry.io/docs/collector/ +[opentelemetry-collector-configuration]: https://opentelemetry.io/docs/collector/configuration/#exporters +[opentelemetry-operator]: https://opentelemetry.io/docs/kubernetes/operator/ +[transform-processor]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/transformprocessor/README.md +[opentelemetry-transformation-language]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl +[space-level-o11y]: /spaces/howtos/self-hosted/space-observability +[helm-chart-reference]: /reference/helm-reference +[opentelemetry-transformation-language-functions]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/ottlfuncs/README.md +[opentelemetry-transformation-language-contexts]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts +[guide-on-ottl]: https://betterstack.com/community/guides/observability/ottl/#a-brief-overview-of-the-ottl-grammar diff --git a/spaces_versioned_docs/version-v1.14/howtos/query-api.md b/spaces_versioned_docs/version-v1.14/howtos/query-api.md new file mode 100644 index 000000000..78163de2f --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/query-api.md @@ -0,0 +1,320 @@ +--- +title: Query API +sidebar_position: 40 +description: Use the `up` CLI to query objects and resources +--- + + + + +Upbound's Query API allows users to inspect objects and resources within their control planes. The read-only `up alpha query` and `up alpha get` CLI commands allow you to gather information on your control planes in a fast and efficient package. These commands follow the [`kubectl` conventions][kubectl-conventions] for filtering, sorting, and retrieving information from your Space. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). Query API is available in Cloud Spaces since v1.6 and Self-Hosted since v1.8. + +For detailed deployment procedures, see [Deploy Query API infrastructure - Self-Hosted](./self-hosted/query-api.md). version compatibility details, see the . +::: + + + +## Using the Query API + + +The Query API allows you to retrieve control plane information faster than traditional `kubectl` commands. This feature lets you debug your Crossplane resources with the CLI or within the Upbound Console's enhanced management views. + +### Query within a single control plane + +Use the `up alpha get` command to retrieve information about objects within the current control plane context. This command uses the **Query** endpoint and targets the current control plane. + +To switch between control plane groups, use the [`up ctx` ][up-ctx] and change to your desired context with an interactive prompt or specify with your control plane path: + +```shell +up ctx /// +``` + +You can query within a single control plane with the [`up alpha get` command][up-alpha-get-command] to return more information about a given object within the current kubeconfig context. + +The `up alpha get` command can query resource types and aliases to return objects in your control plane. + +```shell +up alpha get managed +NAME READY SYNCED AGE +custom-account1-5bv5j-sa True True 15m +custom-cluster1-bq6dk-net True True 15m +custom-account1-5bv5j-subnet True True 15m +custom-cluster1-bq6dk-nodepool True True 15m +custom-cluster1-bq6dk-cluster True True 15m +custom-account1-5bv5j-net True True 15m +custom-cluster1-bq6dk-subnet True True 15m +custom-cluster1-bq6dk-sa True True 15m +``` + +The [`-A` flag][a-flag] queries for objects across all namespaces. + +```shell +up alpha get configmaps -A +NAMESPACE NAME AGE +crossplane-system uxp-versions-config 18m +crossplane-system universal-crossplane-config 18m +crossplane-system kube-root-ca.crt 18m +upbound-system kube-root-ca.crt 18m +kube-system kube-root-ca.crt 18m +kube-system coredns 18m +default kube-root-ca.crt 18m +kube-node-lease kube-root-ca.crt 18m +kube-public kube-root-ca.crt 18m +kube-system kube-apiserver-legacy-service-account-token-tracking 18m +kube-system extension-apiserver-authentication 18m +``` + +To query for [multiple resource types][multiple-resource-types], you can add the name or alias for the resource as a comma separated string. + +```shell +up alpha get providers,providerrevisions + +NAME HEALTHY REVISION IMAGE STATE DEP-FOUND DEP-INSTALLED AGE +providerrevision.pkg.crossplane.io/crossplane-contrib-provider-nop-ecc25c121431 True 1 xpkg.upbound.io/crossplane-contrib/provider-nop:v0.2.1 Active 18m +NAME INSTALLED HEALTHY PACKAGE AGE +provider.pkg.crossplane.io/crossplane-contrib-provider-nop True True xpkg.upbound.io/crossplane-contrib/provider-nop:v0.2.1 18m +``` + +### Query multiple control planes + +The [`up alpha query` command][up-alpha-query-command] returns a list of objects of any kind within all the control planes in your Space. This command uses either the **SpaceQuery** or **GroupQuery** endpoints depending on your query scope. The `-A` flag switches the query context from the group level to the entire Space + +The `up alpha query` command accepts resources and aliases to return objects across your group or Space. + +```shell +up alpha query crossplane + +NAME ESTABLISHED OFFERED AGE +compositeresourcedefinition.apiextensions.crossplane.io/xnetworks.platform.acme.co True True 20m +compositeresourcedefinition.apiextensions.crossplane.io/xaccountscaffolds.platform.acme.co True True 20m + + +NAME XR-KIND XR-APIVERSION AGE +composition.apiextensions.crossplane.io/xaccountscaffolds.platform.acme.co XAccountScaffold platform.acme.co/v1alpha1 20m +composition.apiextensions.crossplane.io/xnetworks.platform.acme.co XNetwork platform.acme.co/v1alpha1 20m + + +NAME REVISION XR-KIND XR-APIVERSION AGE +compositionrevision.apiextensions.crossplane.io/xaccountscaffolds.platform.acme.co-5ae9da5 1 XAccountScaffold platform.acme.co/v1alpha1 20m +compositionrevision.apiextensions.crossplane.io/xnetworks.platform.acme.co-414ce80 1 XNetwork platform.acme.co/v1alpha1 20m + +NAME READY SYNCED AGE +nopresource.nop.crossplane.io/custom-cluster1-bq6dk-subnet True True 19m +nopresource.nop.crossplane.io/custom-account1-5bv5j-net True True 19m + +## Output truncated... + +``` + + +The [`--sort-by` flag][sort-by-flag] allows you to return information to your specifications. You can construct your sort order in a JSONPath expression string or integer. + + +```shell +up alpha query crossplane -A --sort-by="{.metadata.name}" + +CONTROLPLANE NAME AGE +default/test deploymentruntimeconfig.pkg.crossplane.io/default 10m + +CONTROLPLANE NAME AGE TYPE DEFAULT-SCOPE +default/test storeconfig.secrets.crossplane.io/default 10m Kubernetes crossplane-system +``` + +To query for multiple resource types, you can add the name or alias for the resource as a comma separated string. + +```shell +up alpha query namespaces,configmaps -A + +CONTROLPLANE NAME AGE +default/test namespace/upbound-system 15m +default/test namespace/crossplane-system 15m +default/test namespace/kube-system 16m +default/test namespace/default 16m + +CONTROLPLANE NAMESPACE NAME AGE +default/test crossplane-system configmap/uxp-versions-config 15m +default/test crossplane-system configmap/universal-crossplane-config 15m +default/test crossplane-system configmap/kube-root-ca.crt 15m +default/test upbound-system configmap/kube-root-ca.crt 15m +default/test kube-system configmap/coredns 16m +default/test default configmap/kube-root-ca.crt 16m + +## Output truncated... + +``` + +The Query API also allows you to return resource types with specific [label columns][label-columns]. + +```shell +up alpha query composite -A --label-columns=crossplane.io/claim-namespace + +CONTROLPLANE NAME SYNCED READY COMPOSITION AGE CLAIM-NAMESPACE +query-api-test/test xeks.argo.discover.upbound.io/test-k7xbk False xeks.argo.discover.upbound.io 51d default + +CONTROLPLANE NAME EXTERNALDNS SYNCED READY COMPOSITION AGE CLAIM-NAMESPACE +spaces-clusters/controlplane-query-api-test-spaces-playground xexternaldns.externaldns.platform.upbound.io/spaces-cluster-0-xd8v2-lhnl7 6.34.2 True True xexternaldns.externaldns.platform.upbound.io 19d default +default/query-api-test xexternaldns.externaldns.platform.upbound.io/space-awg-kine-f7dxq-nkk2q 6.34.2 True True xexternaldns.externaldns.platform.upbound.io 55d default + +## Output truncated... + +``` + +### Query API request format + +The CLI can also return a version of your query request with the [`--debug` flag][debug-flag]. This flag returns the API spec request for your query. + +```shell +up alpha query composite -A -d + +apiVersion: query.spaces.upbound.io/v1alpha1 +kind: SpaceQuery +metadata: + creationTimestamp: null +spec: + cursor: true + filter: + categories: + - composite + controlPlane: {} + limit: 500 + objects: + controlPlane: true + table: {} + page: {} +``` + +For more complex queries, you can interact with the Query API like a Kubernetes-style API by creating a query and applying it with `kubectl`. + +The example below is a query for `claim` resources in every control plane from oldest to newest and returns specific information about those claims. + + +```yaml +apiVersion: query.spaces.upbound.io/v1alpha1 +kind: SpaceQuery +spec: + filter: + categories: + - claim + order: + - creationTimestamp: Asc + cursor: true + count: true + objects: + id: true + controlPlane: true + object: + kind: true + apiVersion: true + metadata: + name: true + uid: true + spec: + containers: + image: true +``` + + +The Query API is served by the Spaces API endpoint. You can use `up ctx` to +switch the kubectl context to the Spaces API ingress. After that, you can use +`kubectl create` and receive the `response` for your query parameters. + + +```shell +kubectl create -f spaces-query.yaml -o yaml +``` + +Your `response` should look similar to this example: + +```yaml {copy-lines="none"} +apiVersion: query.spaces.upbound.io/v1alpha1 +kind: SpaceQuery +metadata: + creationTimestamp: "2024-08-08T14:41:46Z" + name: default +response: + count: 3 + cursor: + next: "" + page: 0 + pageSize: 100 + position: 0 + objects: + - controlPlane: + name: query-api-test + namespace: default + id: default/query-api-test/823b2781-7e70-4d91-a6f0-ee8f455d67dc + object: + apiVersion: spaces.platform.upbound.io/v1alpha1 + kind: Space + metadata: + name: space-awg-kine + resourceVersion: "803868" + uid: 823b2781-7e70-4d91-a6f0-ee8f455d67dc + spec: {} + - controlPlane: + name: test-1 + namespace: test + id: test/test-1/08a573dd-851a-42cc-a600-b6f6ed37ee8d + object: + apiVersion: argo.discover.upbound.io/v1alpha1 + kind: EKS + metadata: + name: test-1 + resourceVersion: "4270320" + uid: 08a573dd-851a-42cc-a600-b6f6ed37ee8d + spec: {} + - controlPlane: + name: controlplane-query-api-test-spaces-playground + namespace: spaces-clusters + id: spaces-clusters/controlplane-query-api-test-spaces-playground/b5a6770f-1f85-4d09-8990-997c84bd4159 + object: + apiVersion: spaces.platform.upbound.io/v1alpha1 + kind: Space + metadata: + name: spaces-cluster-0 + resourceVersion: "1408337" + uid: b5a6770f-1f85-4d09-8990-997c84bd4159 + spec: {} +``` + + +## Query API Explorer + + + +import CrdDocViewer from '@site/src/components/CrdViewer'; + +### Query + +The Query resource allows you to query objects in a single control plane. + + + +### GroupQuery + +The GroupQuery resource allows you to query objects across a group of control planes. + + + +### SpaceQuery + +The SpaceQuery resource allows you to query objects across all control planes in a space. + + + + + + +[documentation]: /spaces/howtos/self-hosted/query-api +[up-ctx]: /reference/cli-reference +[up-alpha-get-command]: /reference/cli-reference +[a-flag]: /reference/cli-reference +[multiple-resource-types]: /reference/cli-reference +[up-alpha-query-command]: /reference/cli-reference +[sort-by-flag]: /reference/cli-reference +[label-columns]: /reference/cli-reference +[debug-flag]: /reference/cli-reference +[kubectl-conventions]: https://kubernetes.io/docs/reference/kubectl/generated/kubectl_get/ diff --git a/spaces_versioned_docs/version-v1.14/howtos/secrets-management.md b/spaces_versioned_docs/version-v1.14/howtos/secrets-management.md new file mode 100644 index 000000000..88e730ae5 --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/secrets-management.md @@ -0,0 +1,719 @@ +--- +title: Secrets Management +sidebar_position: 20 +description: A guide for how to configure synchronizing external secrets into control + planes in a Space. +--- + +Upbound's _Shared Secrets_ is a built in secrets management feature that +provides an integrated way to manage secrets across your platform. It allows you +to store sensitive data like passwords and certificates for your managed control +planes as secrets in an external secret store. + +This feature is a wrapper around the [External Secrets Operator (ESO)][external-secrets-operator-eso] that pulls secrets from external vaults and distributes them across your platform. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). The SharedSecretStore and SharedExternalSecret CRDs have been stable since v1.9. + +For API specifications and configuration details, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +## Benefits + +The Shared Secrets feature allows you to: + +* Access secrets from a variety of external secret stores without operation overhead +* Configure synchronization for multiple control planes in a group +* Store and manage all your secrets centrally +* Use Shared Secrets across all Upbound environments(Cloud and Disconnected Spaces) +* Synchronize secrets across groups of control planes while maintaining clear security boundaries +* Manage secrets at scale programmatically while ensuring proper isolation and access control + +## Understanding the Architecture + +The Shared Secrets feature uses a hierarchical approach to centrally manage +secrets and effectively control their distribution. + +![Shared Secrets workflow diagram](/img/shared-secrets-workflow.png) + +1. The flow begins at the group level, where you define your secret sources and distribution rules +2. These rules automatically create corresponding resources in your control planes +3. In each control plane, specific namespaces receive the secrets +4. Changes at the group level automatically propagate through this chain + +## Component configuration + +Upbound Shared Secrets consists of two components: + +1. **SharedSecretStore**: Defines connections to external secret providers +2. **SharedExternalSecret**: Specifies which secrets to synchronize and where + + +### Connect to an External Vault + + +The `SharedSecretStore` component is the connection point to your external +secret vaults. It provisions ClusterSecretStore resources into control planes +within the group. + + +#### AWS Secrets Manager + + + +In this example, you'll create a `SharedSecretStore` to connect to AWS +Secrets Manager in `us-west-2`. Then apply access to all control planes labeled with +`environment: production`, and make these secrets available in the `default` and +`crossplane-system` namespaces. + + +You can configure access to AWS Secrets Manager using static credentials or +workload identity. + +:::important +While the underlying ESO API supports more auth methods, static credentials are currently the only supported auth method in Cloud Spaces. +::: + +##### Static credentials + +1. Use the AWS CLI to create access credentials. + + +2. Create your access credentials. +```ini +# Create a text file with AWS credentials +cat > aws-credentials.txt << EOF +[default] +aws_access_key_id = +aws_secret_access_key = +EOF +``` + +3. Next,store the access credentials in a secret in the namespace you want to have access to the `SharedSecretStore`. +```shell +kubectl create secret \ + generic aws-credentials \ + -n default \ + --from-file=creds=./aws-credentials.txt +``` + +4. Create a `SharedSecretStore` custom resource file called `secretstore.yaml`. + Paste the following configuration: +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: aws-secrets +spec: + # Define which control planes should receive this configuration + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production + + # Define which namespaces within those control planes can access secrets + namespaceSelector: + names: + - default + - crossplane-system + + # Configure the connection to AWS Secrets Manager + provider: + aws: + service: SecretsManager + region: us-west-2 + auth: + secretRef: + accessKeyIDSecretRef: + name: aws-credentials + key: access-key-id + secretAccessKeySecretRef: + name: aws-credentials + key: secret-access-key +``` + + + +##### Workload Identity with IRSA + + + +You can also use AWS IAM Roles for Service Accounts (IRSA) depending on your +organizations needs: + +1. Ensure you have deployed the Spaces software into an IRSA-enabled EKS cluster. +2. Follow the AWS instructions to create an IAM OIDC provider with your EKS OIDC + provider URL. +3. Determine the Spaces-generated `controlPlaneID` of your control plane: +```shell +kubectl get controlplane -o jsonpath='{.status.controlPlaneID}' +``` + +4. Create an IAM trust policy in your AWS account to match the control plane. +```yaml +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam:::oidc-provider/" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + ":aud": "sts.amazonaws.com", + ":sub": [ +"system:serviceaccount:mxp--system:external-secrets-controller"] + } + } + } + ] +} +``` + +5. Update your Spaces deployment to annotate the SharedSecrets service account + with the role ARN. +```shell +up space upgrade ... \ + --set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."eks\.amazonaws\.com/role-arn"="" +``` + +6. Create a SharedSecretStore and reference the SharedSecrets service account: +```ini {copy-lines="all"} +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: aws-sm + namespace: default +spec: + provider: + aws: + service: SecretsManager + region: + auth: + jwt: + serviceAccountRef: + name: external-secrets-controller + controlPlaneSelector: + names: + - + namespaceSelector: + names: + - default +``` + +When you create a `SharedSecretStore` the underlying mechanism: + +1. Applies at the group level +2. Determines which control planes should receive this configuration by the `controlPlaneSelector` +3. Automatically creates a ClusterSecretStore inside each identified control plane +4. Maintains a connection in each control plane with the ClusterSecretStore + credentials and configuration from the parent SharedSecretStore + +Upbound automatically generates a ClusterSecretStore in each matching control +plane when you create a SharedSecretStore. + +```yaml {copy-lines="none"} +# Automatically created in each matching control plane +apiVersion: external-secrets.io/v1beta1 +kind: ClusterSecretStore +metadata: + name: aws-secrets # Name matches the parent SharedSecretStore +spec: + provider: + upboundspaces: + storeRef: + name: aws-secret +``` + +When you create the SharedSecretStore controller, it replaces the provider with +a special provider called `upboundspaces`. This provider references the +SharedSecretStore object in the Spaces API. This avoids copying the actual cloud +credentials from Spaces to each control plane. + +This workflow allows you to configure the store connection only once at the +group level and automatically propagates to each control plane. Individual control +planes can use the store without exposure to the group-level configuration and +updates all child ClusterSecretStores when updated. + + +#### Azure Key Vault + + +:::important +While the underlying ESO API supports more auth methods, static credentials are currently the only supported auth method in Cloud Spaces. +::: + +##### Static credentials + +1. Use the Azure CLI to create a service principal and authentication file. +2. Create a service principal and save credentials in a file: +```json +{ + "appId": "myAppId", + "displayName": "myServicePrincipalName", + "password": "myServicePrincipalPassword", + "tenant": "myTentantId" +} +``` + +3. Store the credentials as a Kubernetes secret: +```shell +kubectl create secret \ + generic azure-secret-sp \ + -n default \ + --from-file=creds=./azure-credentials.json +``` + +4. Create a SharedSecretStore referencing these credentials: +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: azure-kv +spec: + provider: + azurekv: + tenantId: "" + vaultUrl: "" + authSecretRef: + clientId: + name: azure-secret-sp + key: ClientID + clientSecret: + name: azure-secret-sp + key: ClientSecret + controlPlaneSelector: + names: + - + namespaceSelector: + names: + - default +``` + +##### Workload Identity + + +You can also use Entra Workload Identity Federation to access Azure Key Vault +without needing to manage secrets. + +To use Entra Workload ID with AKS: + + +1. Deploy the Spaces software into a [workload identity-enabled AKS cluster][workload-identity-enabled-aks-cluster]. +2. Retrieve the OIDC issuer URL of the AKS cluster: +```ini +az aks show --name "" \ + --resource-group "" \ + --query "oidcIssuerProfile.issuerUrl" \ + --output tsv +``` + +3. Use the Azure CLI to make a managed identity: +```ini +az identity create \ + --name "" \ + --resource-group "" \ + --location "" \ + --subscription "" +``` + +4. Look up the managed identity's client ID: +```ini +az identity show \ + --resource-group "" \ + --name "" \ + --query 'clientId' \ + --output tsv +``` + +5. Update your Spaces deployment to annotate the SharedSecrets service account with the associated Entra application client ID from the previous step: +```ini +up space upgrade ... \ + --set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."azure\.workload\.identity/client-id"="" \ + --set-string controlPlanes.sharedSecrets.pod.customLabels."azure\.workload\.identity/use"="true" +``` + +6. Determine the Spaces-generated `controlPlaneID` of your control plane. When you deploy a `kind: controlplane` in a Space, the Spaces software deploys a set of pods in a new namespace following the format `mxp--system`. +```ini +kubectl get controlplane -o jsonpath='{.status.controlPlaneID}' +``` + +7. Create a federated identity credential. +```ini +FEDERATED_IDENTITY_CREDENTIAL_NAME= +USER_ASSIGNED_IDENTITY_NAME= +RESOURCE_GROUP= +AKS_OIDC_ISSUER= +CONTROLPLANE_ID= +az identity federated-credential create --name ${FEDERATED_IDENTITY_CREDENTIAL_NAME} --identity-name "${USER_ASSIGNED_IDENTITY_NAME}" --resource-group "${RESOURCE_GROUP}" --issuer "${AKS_OIDC_ISSUER}" --subject system:serviceaccount:"mxp-${CONTROLPLANE_ID}-system:external-secrets-controller" --audience api://AzureADTokenExchange +``` + +8. Assign the `Key Vault Secrets User` role to the user-assigned managed identity that you created earlier. This step gives the managed identity permission to read secrets from the key vault: +```ini +az role assignment create \ + --assignee-object-id "${IDENTITY_PRINCIPAL_ID}" \ + --role "Key Vault Secrets User" \ + --scope "${KEYVAULT_RESOURCE_ID}" \ + --assignee-principal-type ServicePrincipal +``` + +:::important +You must manually restart a workload's pod when you add the annotation to the running pod's service account. The Entra workload identity mutating admission webhook requires a restart to inject the necessary environment. +::: + +8. Create a `SharedSecretStore`. Replace `vaultURL` with the URL of your Azure Key Vault instance. Replace `identityId` with the client ID of the managed identity created earlier: +```yaml {copy-lines="all"} +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: azure-kv +spec: + provider: + azurekv: + authType: WorkloadIdentity + vaultUrl: "" + controlPlaneSelector: + names: + - + namespaceSelector: + names: + - default +``` + + + + +#### Google Cloud Secret Manager + + + +You can configure access to Google Cloud Secret Manager using static credentials or workload identity. Below are instructions for configuring either. See the [ESO provider API][eso-provider-api] for more information. + +:::important +While the underlying ESO API supports more auth methods, static credentials are currently the only supported auth method in Cloud Spaces. +::: + +##### Static credentials + +1. Use the [GCP CLI][gcp-cli] to create access credentials. +2. Save the output in a file called `gcp-credentials.json`. +3. Store the access credentials in a secret in the same namespace as the `SharedSecretStore`. + ```shell {label="kube-create-secret",copy-lines="all"} + kubectl create secret \ + generic gcpsm-secret \ + -n default \ + --from-file=creds=./gcp-credentials.json + ``` + +4. Create a `SharedSecretStore`, referencing the secret created earlier. Replace `projectID` with your GCP Project ID: +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: gcp-sm +spec: + provider: + gcpsm: + auth: + secretRef: + secretAccessKeySecretRef: + name: gcpsm-secret + key: creds + projectID: + controlPlaneSelector: + names: + - + namespaceSelector: + names: + - default +``` + +:::tip +The example above maps a Shared Secret Store into a single namespace of a single control plane. Read [control plane selection][control-plane-selection] and [namespace selection][namespace-selection] to learn how to map into one or more namespaces of one or more control planes. +::: + + +##### Workload identity with Service Accounts to IAM Roles + + +To configure, grant the `roles/iam.workloadIdentityUser` role to the Kubernetes +service account in the control plane namespace to impersonate the IAM service +account. + +1. Ensure you've deployed Spaces on a [Workload Identity Federation-enabled][workload-identity-federation-enabled] GKE cluster. +2. Determine the Spaces-generated `controlPlaneID` of your control plane. When you deploy a `kind: controlplane` in a Space, the Spaces software deploys a set of pods in a new namespace following the format `mxp--system`. +```ini +kubectl get controlplane -o jsonpath='{.status.controlPlaneID}' +``` + +3. Create a GCP IAM service account with the [GCP CLI][gcp-cli-1]: +```ini +gcloud iam service-accounts create \ + --project= +``` + +4. Grant the IAM service account the role to access GCP Secret Manager: +```ini +SA_NAME= +IAM_SA_PROJECT_ID= +gcloud projects add-iam-policy-binding IAM_SA_PROJECT_ID \ + --member "serviceAccount:SA_NAME@IAM_SA_PROJECT_ID.iam.gserviceaccount.com" \ + --role roles/secretmanager.secretAccessor +``` + +5. When you enable the Shared Secrets feature, a service account gets created in each control plane for the External Secrets Operator. Apply a [GCP IAM policy binding][gcp-iam-policy-binding] to associate this service account with the desired GCP IAM role. +```ini +PROJECT_ID= +PROJECT_NUMBER= +CONTROLPLANE_ID= +gcloud projects add-iam-policy-binding projects/${PROJECT_ID} \ + --role "roles/iam.workloadIdentityUser" \ + --member=principal://iam.googleapis.com/projects/${PROJECT_NUMBER}/locations/global/workloadIdentityPools/${PROJECT_ID}.svc.id.goog/subject/ns/mxp-${CONTROLPLANE_ID}-system/sa/external-secrets-controller +``` + +6. Update your Spaces deployment to annotate the SharedSecrets service account with GCP IAM service account's identifier: +```ini +up space upgrade ... \ + --set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."iam\.gke\.io/gcp-service-account"="" +``` + +7. Create a `SharedSecretStore`. Replace `projectID` with your GCP Project ID: +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: gcp-sm +spec: + provider: + gcpsm: + projectID: + controlPlaneSelector: + names: + - + namespaceSelector: + names: + - default +``` + +:::tip +The example above maps a Shared Secret Store into a single namespace of a single control plane. Read [control plane selection][control-plane-selection-1] and [namespace selection][namespace-selection-2] to learn how to map into one or more namespaces of one or more control planes. +::: + +### Manage your secret distribution + +After you create your SharedSecretStore, you can define which secrets to +distribute using SharedExternalSecret: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedExternalSecret +metadata: + name: database-credentials + namespace: default +spec: + # Select the same control planes as your SharedSecretStore + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production + + externalSecretSpec: + refreshInterval: 1h + secretStoreRef: + name: aws-secrets # References the SharedSecretStore name + kind: ClusterSecretStore + target: + name: db-credentials + data: + - secretKey: username + remoteRef: + key: prod/database/credentials + property: username + - secretKey: password + remoteRef: + key: prod/database/credentials + property: password +``` + +This configuration: + +* Pulls database credentials from your external secret provider +* Creates secrets in all production control planes +* Refreshes the secrets every hour +* Creates a secret called `db-credentials` in each control plane + +When you create a SharedExternalSecret at the group level, Upbound's system +creates a template for the corresponding ClusterExternalSecrets in each selected +control plane. + +The example below simulates the ClusterExternalSecret that Upbound creates: + +```yaml +# Inside each matching control plane: +apiVersion: external-secrets.io/v1beta1 +kind: ClusterExternalSecret +metadata: + name: database-credentials +spec: + refreshInterval: 1h + secretStoreRef: + name: aws-secrets + kind: ClusterSecretStore + data: + - secretKey: username + remoteRef: + key: prod/database/credentials + property: username +``` + +The hierarchy in this configuration is: + +1. SharedExternalSecret (group level) defines what secrets to distribute +2. ClusterExternalSecret (control plane level) manages the distribution within + each control plane + +3. Kubernetes Secrets (namespace level) are created in specified namespaces + + +#### Control plane selection + +To configure which control planes in a group you want to project a SecretStore into, use the `spec.controlPlaneSelector` field. You can either use `labelSelectors` or the `names` of a control plane directly. A control plane matches if any of the label selectors match. + +This example matches all control planes in the group that have `environment: production` as a label: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +You can use the more complex `matchExpressions` to match labels based on an expression. This example matches control planes that have label `environment: production` or `environment: staging`: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + controlPlaneSelector: + labelSelectors: + - matchExpressions: + - { key: environment, operator: In, values: [production,staging] } +``` + +You can also specify the names of control planes directly: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + controlPlaneSelector: + names: + - controlplane-dev + - controlplane-staging + - controlplane-prod +``` + + +#### Namespace selection + +To configure which namespaces **within each matched control plane** to project the secret store into, use `spec.namespaceSelector` field. The projected secret store only appears in the namespaces matching the provided selector. You can either use `labelSelectors` or the `names` of namespaces directly. A control plane matches if any of the label selectors match. + +**For all control planes matched by** `spec.controlPlaneSelector`, This example matches all namespaces in each selected control plane that have `team: team1` as a label: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + namespaceSelector: + labelSelectors: + - matchLabels: + team: team1 +``` + +You can use the more complex `matchExpressions` to match labels based on an expression. This example matches namespaces that have label `team: team1` or `team: team2`: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + namespaceSelector: + labelSelectors: + - matchExpressions: + - { key: team, operator: In, values: [team1,team2] } +``` + +You can also specify the names of namespaces directly: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + namespaceSelector: + names: + - team1-namespace + - team2-namespace +``` + +## Configure secrets directly in a control plane + + +The above explains using group-scoped resources to project secrets into multiple control planes. You can also use ESO API types directly in a control plane as you would in standalone Crossplane or Kubernetes. + + +See the [ESO documentation][eso-documentation] for a full guide on using the API types. + +## Best practices + +When you configure secrets management in your Upbound environment, keep the +following best practices in mind: + +**Use consistent labeling schemes** across your control planes for predictable +and manageable secret distribution. + +**Organize your secrets** in your external provider using a hierarchical +structure that mirrors your control plane organization. + +**Set appropriate refresh intervals** based on your security requires and the +nature of the secrets. + +**Use namespace selection sparingly** to limit secret distribution to only the +namespaces that need them. + +**Use separate tokens for each environment.** Keep them in distinct +SharedSecretStores. Users could bypass SharedExternalSecret selectors by +creating ClusterExternalSecrets directly in control planes. This grants access to all +secrets available to that token. + +**Document your secret management architecture**, including which control planes +should receive which secrets. + +[control-plane-selection]: #control-plane-selection +[namespace-selection]: #namespace-selection +[control-plane-selection-1]: #control-plane-selection +[namespace-selection-2]: #namespace-selection + +[external-secrets-operator-eso]: https://external-secrets.io +[workload-identity-enabled-aks-cluster]: https://learn.microsoft.com/en-us/azure/aks/workload-identity-deploy-cluster +[eso-provider-api]: https://external-secrets.io/latest/provider/google-secrets-manager/ +[gcp-cli]: https://cloud.google.com/iam/docs/creating-managing-service-account-keys +[workload-identity-federation-enabled]: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#enable_on_clusters_and_node_pools +[gcp-cli-1]: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#kubernetes-sa-to-iam +[gcp-iam-policy-binding]: https://cloud.google.com/sdk/gcloud/reference/iam/service-accounts/add-iam-policy-binding +[eso-documentation]: https://external-secrets.io/latest/introduction/getting-started/ diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/_category_.json b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/_category_.json new file mode 100644 index 000000000..5bf23bb0a --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/_category_.json @@ -0,0 +1,11 @@ +{ + "label": "Self-Hosted Spaces", + "position": 2, + "collapsed": true, + "customProps": { + "plan": "business" + } + +} + + diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/administer-features.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/administer-features.md new file mode 100644 index 000000000..ce878014e --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/administer-features.md @@ -0,0 +1,121 @@ +--- +title: Administer features +sidebar_position: 12 +description: Enable and disable features in Spaces +--- + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Feature flags and their availability may vary by version. + +For detailed feature availability across versions, see the . +::: + +This guide shows how to enable or disable features in your self-hosted Space. + +## Shared secrets + +**Status:** Preview + +This feature is enabled by default in Cloud Spaces. + +To enable this feature in a self-hosted Space, set +`features.alpha.sharedSecrets.enabled=true` when installing the Space: + +```bash +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "features.alpha.sharedSecrets.enabled=true" \ +``` + + +## Observability + +**Status:** GA +**Available from:** Spaces v1.13+ + +This feature is enabled by default in Cloud Spaces. + + + +To enable this feature in a self-hosted Space, set +`observability.enabled=true` (`features.alpha.observability.enabled=true` before `v1.14.0`) when installing the Space: + +```bash +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "observability.enabled=true" \ +``` + +The observability feature collects telemetry data from user-facing control +plane workloads like: + +* Crossplane +* Providers +* Functions + +Self-hosted Spaces users can add control plane system workloads such as the +`api-server`, `etcd` by setting the +`observability.collectors.includeSystemTelemetry` Helm flag to true. + +### Sensitive data + +To avoid exposing sensitive data in the `SharedTelemetryConfig` resource, use +Kubernetes secrets to store the sensitive data and reference the secret in the +`SharedTelemetryConfig` resource. + +Create the secret in the same namespace/group as the `SharedTelemetryConfig` +resource. The example below uses `kubectl create secret` to create a new secret: + +```bash +kubectl create secret generic sensitive -n \ + --from-literal=apiKey='YOUR_API_KEY' +``` + +Next, reference the secret in the `SharedTelemetryConfig` resource: + +```yaml +apiVersion: observability.spaces.upbound.io/v1alpha1 +kind: SharedTelemetryConfig +metadata: + name: newrelic +spec: + configPatchSecretRefs: + - name: sensitive + key: apiKey + path: exporters.otlphttp.headers.api-key + controlPlaneSelector: + labelSelectors: + - matchLabels: + org: foo + exporters: + otlphttp: + endpoint: https://otlp.nr-data.net + headers: + api-key: dummy # This value is replaced by the secret value, can be omitted + exportPipeline: + metrics: [otlphttp] + traces: [otlphttp] + logs: [otlphttp] +``` + +The `configPatchSecretRefs` field in the `spec` specifies the secret `name`, +`key`, and `path` values to inject the secret value in the +`SharedTelemetryConfig` resource. + +## Shared backups + +As of Spaces `v.12.0`, this feature is enabled by default. + +To disable in a self-hosted Space, pass the `features.alpha.sharedBackup.enabled=false` as a Helm chart value. +`--set "features.alpha.sharedBackup.enabled=false"` + +## Query API + +**Status:** Preview +The Query API is available in the Cloud Space offering and enabled by default. + +Query API is required for self-hosted deployments with connected Spaces. See the +related [documentation][documentation] +to enable this feature. + +[documentation]: /spaces/howtos/query-api/ diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/attach-detach.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/attach-detach.md new file mode 100644 index 000000000..1465921cf --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/attach-detach.md @@ -0,0 +1,198 @@ +--- +title: Connect or disconnect a Space +sidebar_position: 12 +description: Enable and connect self-hosted Spaces to the Upbound console +--- +:::info API Version Information +This guide applies to **Spaces v1.8.0 and later** (Self-Hosted only). Connection to Upbound console requires Query API and RBAC to be enabled. + +For version-specific features and requirements, see the . Query API setup details, see [Deploy Query API infrastructure](./query-api.md). +::: + +:::important +This feature is in preview. Starting in Spaces `v1.8.0` and later, you must +deploy and [enable the Query API][enable-the-query-api] and [enable Upbound +RBAC][enable-upbound-rbac] to connect a Space to Upbound. +::: + +[Upbound][upbound] allows you to connect self-hosted Spaces and enables a streamlined operations and debugging experience in your Console. + +## Usage + +### Connect + +Before you begin, make sure you have: + +- An existing Upbound [organization][organization] in Upbound SaaS. +- The `up` CLI installed and logged into your organization +- `kubectl` installed with the kubecontext of your self-hosted Space cluster. +- A `token.json` license, provided by your Upbound account representative. +- You enabled the [Query API][query-api] in the self-hosted Space. + +Create a new `UPBOUND_SPACE_NAME`. If you don't create a name, `up` automatically generates one for you: + +```ini +export UPBOUND_SPACE_NAME=your-self-hosted-space +``` + +#### With up CLI + +:::tip +The command tries to connect the Space to the org account context pointed at by your `up` CLI profile. Make sure you've logged into Upbound SaaS with `up login -a ` before trying to connect the Space. +::: + +Connect the Space to the Console: + +```bash +up space connect "${UPBOUND_SPACE_NAME}" +``` + +This command installs a Connect agent, creates a service account, and configures permissions in your Upbound cloud organization in the `upbound-system` namespace of your Space. + +#### With Helm + +Export your Upbound org account name to an environment variable called `UPBOUND_ORG_NAME`. You can see this value by running `up org list` after logging on to Upbound. + +```ini +export UPBOUND_ORG_NAME=your-org-name +``` + +Create a new robot token and export it to an environment variable called `UPBOUND_TOKEN`: + +```bash +up robot create "${UPBOUND_SPACE_NAME}" --description="Robot used for authenticating Space '${UPBOUND_SPACE_NAME}' with Upbound Connect" +export UPBOUND_TOKEN=$(up robot token create "$UPBOUND_SPACE_NAME" "$UPBOUND_SPACE_NAME" --file - | jq -r '.token') +``` + +:::note +Follow the [`jq` installation guide][jq-install] if your machine doesn't include +it by default. +::: + +Create a secret containing the robot token: + +```bash +kubectl create secret -n upbound-system generic connect-token --from-literal=token=${UPBOUND_TOKEN} +``` + +Specify your username and password for the helm OCI registry: + +```bash +jq -r .token $SPACES_TOKEN_PATH | helm registry login xpkg.upbound.io -u $(jq -r .accessId $SPACES_TOKEN_PATH) --password-stdin +``` + +In the same cluster where you installed the Spaces software, install the Upbound connect agent with your token secret. + +```bash +helm -n upbound-system upgrade --install agent \ + oci://xpkg.upbound.io/spaces-artifacts/agent \ + --version "0.0.0-441.g68777b9" \ + --set "image.repository=xpkg.upbound.io/spaces-artifacts/agent" \ + --set "registration.image.repository=xpkg.upbound.io/spaces-artifacts/register-init" \ + --set "imagePullSecrets[0].name=upbound-pull-secret" \ + --set "registration.enabled=true" \ + --set "space=${UPBOUND_SPACE_NAME}" \ + --set "organization=${UPBOUND_ORG_NAME}" \ + --set "tokenSecret=connect-token" \ + --wait +``` + + +#### View your Space in the Console + + +Go to the [Upbound Console][upbound-console], log in, and choose the newly connected Space from the Space selector dropdown. + +![A screenshot of the Upbound Console space selector dropdown](/img/attached-space.png) + +:::note +You can only connect a self-hosted Space to a single organization at a time. +::: + +### Disconnect + +#### With up CLI + +To disconnect a self-hosted Space or a deleted self-hosted Space, run the following command: + +```bash +up space disconnect "${UPBOUND_SPACE_NAME}" +``` + +If the Space still exists, this command uninstalls the Connect agent and deletes the associated service account and permissions. + +#### With Helm + +To disconnect a self-hosted Space or a deleted self-hosted Space, run the following command: + +```bash +helm delete -n upbound-system agent +``` + +Clean up the robot token you created for this self-hosted Space: + +```bash +up robot delete "${UPBOUND_SPACE_NAME}" --force +``` + +## Security model + +### Architecture + +![An architectural diagram of a self-hosted Space attached to Upbound](/img/console-attach-architecture.jpg) + +:::note +This diagram illustrates a self-hosted Space running in AWS connected to the global Upbound Console. The same model applies to a Space running in AKS, GKE, or other Kubernetes environments. +::: + +### Data path + +Upbound uses a Pub/Sub model over TLS to communicate between Upbound's global +console and your self-hosted Space. Self-hosted Spaces establishes a secure +connection with `connect.upbound.io`. `api.upbound.io`, and `auth.upbound.io` and subscribes to an +endpoint. + +:::important +Add `connect.upbound.io`, `api.upbound.io`, and `auth.upbound.io` to your organization's list of +allowed endpoints. +::: + +The +Upbound Console communicates to the Space through that endpoint. The data flow +is: + +1. Users sign in to the Upbound Console, redirecting to authenticate with an organization's configured Identity Provider via SSO. +2. Once authenticated, actions in the Console, like listing control planes or specific resource types from a control plane. These requests post as messages to the Upbound Connect service. +3. A user's self-hosted Space polls the Upbound Connect service periodically for new messages, verifies the authenticity of the message, and fulfills the request contained. +4. A user's self-hosted Space returns the results of the request to the Upbound Connect service and the Console renders the results in the user's browser session. + +**Upbound never stores data originated from a self-hosted Space.** The data is transient and only exposed in the user's browser session. The Console needs this data to render your resources and control planes in the UI. + +### Data transmitted + +Users interact with the Upbound Console to generate request queries to the Upbound Connect Service while exploring, managing, or debugging a self-hosted Space. These requests send data back to the user's browser session in the Console, including: + +* Metadata for the Space +* Metadata for control planes in the state +* Configuration manifests for various resource types within your Space: Crossplane managed resources, composite resources, composite resource claims, Upbound shared secrets, Upbound shared backups, Crossplane providers, ProviderConfigs, Configurations, and Crossplane Composite Functions. + +:::important +This data only concerns resource configuration. The data _inside_ the managed +resource in your Space isn't visible at any point. +::: + +**Upbound can't see your data.** Upbound doesn't have access to session-based data rendered for your users in the Upbound Console. Upbound has no information about your self-hosted Space, other than that you've connected a self-hosted Space. + +### Threat vectors + +Only users with editor or administrative permissions can make changes using the Console like creating or deleting control planes or groups. + + +[enable-the-query-api]: /spaces/howtos/self-hosted/query-api +[enable-upbound-rbac]: /manuals/platform/concepts/authorization/upbound-rbac +[upbound]: /manuals/console/upbound-console +[organization]: /manuals/platform/concepts/identity-management/organizations +[query-api]: /spaces/howtos/self-hosted/query-api +[jq-install]: https://jqlang.org/download/ + +[upbound-console]: https://console.upbound.io diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/billing.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/billing.md new file mode 100644 index 000000000..145ff9f03 --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/billing.md @@ -0,0 +1,307 @@ +--- +title: Self-Hosted Space Billing +sidebar_position: 50 +description: A guide for how billing works in an Upbound Space +--- + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Billing models may evolve between versions; see Capacity Licensing for alternative models. + +For version-specific features and capacity-based licensing details, see the . reference specifications, see [Capacity Licensing](/spaces/howtos/self-hosted/capacity-licensing). +::: + +Spaces are a self-hosting feature of Upbound's [flagship product][flagship-product] for platform teams to deploy control planes in their self-managed environments. You can install Spaces into any Kubernetes cluster in your own cloud account, on-premises data center, or on the edge. The pricing usage-based and requires an Upbound account and subscription. The billing unit is a `Loop`. + + +:::info +This guide describes the traditional usage-based billing model using object storage. disconnected or air-gapped environments, consider [Capacity Licensing](/spaces/howtos/self-hosted/capacity-licensing), which provides a simpler fixed-capacity model with local usage tracking. +::: + +## Billing details + +Spaces **aren't connected** to Upbound's global service. To enable proper billing, the Spaces software ships a controller whose responsibility is to collect billing data from your Spaces deployment. The collection and storage of your billing data happens expressly locally within your environment; no data is automatically emitted back to Upbound's global service. This data gets written to object storage of your choice. AWS, Azure, and GCP are currently supported. The Spaces software exports billing usage software every ~15 seconds. + +Spaces customers must periodically provide the billing data to Upbound. Contact your Upbound sales representative to learn more. + + + +## AWS S3 + + + +Configure billing to write to an S3 bucket by providing the following values at install-time. Create an S3 bucket if you don't already have one. + +### IAM policy + +You must create an IAM policy and attach it to the IAM user (for static credentials) or IAM role (for assumed +roles). + +The policy example below enables the necessary S3 permissions: + +```json +{ + "Sid":"EnableS3Permissions", + "Effect":"Allow", + "Action": [ + "s3:PutObject", + "s3:GetObject", + "s3:ListBucket", + "s3:DeleteObject" + ], + "Resource": [ + "arn:aws:s3:::your-bucket-name/*", + "arn:aws:s3:::your-bucket-name" + ] +}, +{ + "Sid": "ListBuckets", + "Effect": "Allow", + "Action": "s3:ListAllMyBuckets", + "Resource": "*" +} +``` + +### Authentication with static credentials + +In your Spaces install cluster, create a secret in the `upbound-system` +namespace. This secret must contain keys `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`. + +```bash +kubectl create secret generic billing-credentials -n upbound-system \ + --from-literal=AWS_ACCESS_KEY_ID= \ + --from-literal=AWS_SECRET_ACCESS_KEY= +``` + +Install the Space software, providing the billing details to the other required values. + + + + + + +```bash {hl_lines="2-6"} +helm -n upbound-system upgrade --install spaces ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=aws" \ + --set "billing.storage.aws.region=" \ + --set "billing.storage.aws.bucket=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + +```bash {hl_lines="2-6"} +up space init ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=aws" \ + --set "billing.storage.aws.region=" \ + --set "billing.storage.aws.bucket=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + + + +### Authentication with an IAM role + + +To use short-lived credentials with an assumed IAM role, create an IAM role with +established trust to the `vector`-serviceaccount in all `mxp-*-system` +namespaces. + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::12345678912:oidc-provider/oidc.eks.eu-west-2.amazonaws.com/id/YOUROIDCPROVIDERID" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringLike": { + "oidc.eks.eu-west-2.amazonaws.com/id/YOUROIDCPROVIDERID:sub": "system:serviceaccount:mxp-*-system:vector" + } + } + } + ] +} +``` + +For more information about workload identities, review the [Workload-identity +Configuration documentation][workload-identity-configuration-documentation] + + + + + + +```bash {hl_lines="2-7"} +helm -n upbound-system upgrade --install spaces ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=aws" \ + --set "billing.storage.aws.region=" \ + --set "billing.storage.aws.bucket=" \ + --set "billing.storage.secretRef.name=" \ + --set "controlPlanes.vector.serviceAccount.customAnnotations[eks.amazonaws.com/role-arn]=" + ... +``` + + + + + +```bash {hl_lines="2-7"} +up space init ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=aws" \ + --set "billing.storage.aws.region=" \ + --set "billing.storage.aws.bucket=" \ + --set "billing.storage.secretRef.name=" \ + --set "controlPlanes.vector.serviceAccount.customAnnotations[eks.amazonaws.com/role-arn]=" + ... +``` + + + + + + +*Note*: You must set `billing.storage.secretRef.name` to an empty string when using an assumed role. + + +## Azure blob storage + +Configure billing to write to a blob in Azure by providing the following values at install-time. Create a storage account and container if you don't already have one. + +Then, on the cluster where you installed the Spaces software, create a secret in `upbound-system`. This secret must contain keys `AZURE_TENANT_ID`, `AZURE_CLIENT_ID`, and `AZURE_CLIENT_SECRET`. Make sure to replace the values with details generated from your Azure account. + +```bash +kubectl create secret generic billing-credentials -n upbound-system \ + --from-literal=AZURE_TENANT_ID= \ + --from-literal=AZURE_CLIENT_ID= \ + --from-literal=AZURE_CLIENT_SECRET= +``` + +Install the Space software, providing the billing details to the other required values. + + + + + + +```bash {hl_lines="2-6"} +helm -n upbound-system upgrade --install spaces ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=azure" \ + --set "billing.storage.azure.storageAccount=" \ + --set "billing.storage.azure.container=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + +```bash {hl_lines="2-6"} +up space init ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=azure" \ + --set "billing.storage.azure.storageAccount=" \ + --set "billing.storage.azure.container=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + + + +## GCP Cloud Storage Buckets + + +Configure billing to write to a Cloud Storage bucket in GCP by providing the following values at install-time. Create a bucket if you don't already have one. + +Then, on the cluster where you installed the Spaces software, create a secret in `upbound-system`. This secret must contain the key `google_application_credentials`. Make sure to replace the value with a GCP service account key JSON generated from your GCP account. + +```bash +kubectl create secret generic billing-credentials -n upbound-system \ + --from-literal=google_application_credentials= +``` + +Install the Space software, providing the billing details to the other required values. + + + + + + +```bash {hl_lines="2-5"} +helm -n upbound-system upgrade --install spaces ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=gcp" \ + --set "billing.storage.gcp.bucket=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + +```bash {hl_lines="2-5"} +up space init ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=gcp" \ + --set "billing.storage.gcp.bucket=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + +## Export billing data to send to Upbound + +To prepare the billing data to send to Upbound, do the following: + +Ensure the current context of your kubeconfig points at the Spaces cluster. Then, run the [export][export] command. + + +:::important +Your current CLI must have read access to the bucket to run this command. +::: + + +The example below exports billing data stored in AWS: + +```bash +up space billing export --provider=aws \ + --bucket=spaces-billing-bucket \ + --account=your-upbound-org \ + --billing-month=2024-07 \ + --force-incomplete +``` + +The command creates a billing report that's zipped up in your current working directory. Send the output to your Upbound sales representative. + + +You can find full instructions and command options in the up [CLI reference][cli-reference] docs. + + +[export]: /reference/cli-reference +[cli-reference]: /reference/cli-reference +[flagship-product]: https://www.upbound.io/platform +[workload-identity-configuration-documentation]: https://docs.upbound.io/operate/accounts/authentication/oidc-configuration diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/capacity-licensing.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/capacity-licensing.md new file mode 100644 index 000000000..a1dc6c101 --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/capacity-licensing.md @@ -0,0 +1,591 @@ +--- +title: Capacity Licensing +sidebar_position: 60 +description: A guide for capacity-based licensing in self-hosted Spaces +plan: "enterprise" +--- + + + + + +This guide explains how to configure and monitor capacity-based licensing in +self-hosted Upbound Spaces. Capacity licensing provides a simplified billing +model for disconnected or air-gapped environments where automated usage +reporting isn't possible. + +:::info +Spaces `v1.15` and later support Capacity Licensing as an +alternative to the traditional usage-based billing model described in the +[Self-Hosted Space Billing][space-billing] guide. +::: + +## Overview + +Capacity licensing allows organizations to purchase a fixed capacity of +resources upfront. The Spaces software tracks usage locally and provides +visibility into consumption against your purchased capacity, all without +requiring external connectivity to Upbound's services. + +### Key concepts + +- **Resource Hours**: The primary billing unit representing all resources + managed by Crossplane over time. This includes managed resources, + composites (XRs), claims (XRCs), and all composed resources - essentially + everything Crossplane manages. The system aggregates resource counts over each + hour using trapezoidal integration to accurately account for changes in + resource count throughout the hour. +- **Operations**: The number of Operations invoked by Crossplane. +- **License Capacity**: The total amount of resource hours and operations included in your license. +- **Usage Tracking**: Continuous monitoring of consumption with real-time utilization percentages. + +### How it works + +1. Upbound provides you with a license file containing your purchased capacity +2. You configure a `SpaceLicense` in your Spaces cluster +3. The metering system automatically: + - Collects measurements from all control planes every minute + - Aggregates usage data into hourly intervals + - Stores usage data in a local PostgreSQL database + - Updates the `SpaceLicense` status with current consumption + +## Prerequisites + +### PostgreSQL database + +Capacity licensing requires a PostgreSQL database to store usage measurements. You can use: + +- An existing PostgreSQL instance +- A managed PostgreSQL service (AWS RDS, Azure Database, Google Cloud SQL) +- A PostgreSQL instance deployed in your cluster + +The database must be: + +- Accessible from the Spaces cluster +- Configured with a dedicated database and credentials + +#### Example: Deploy PostgreSQL with CloudNativePG + +If you don't have an existing PostgreSQL instance, you can deploy one in your +cluster using [CloudNativePG] (CNPG). CNPG is a Kubernetes operator that +manages PostgreSQL clusters. + +1. Install the CloudNativePG operator: + +```bash +kubectl apply -f https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.24/releases/cnpg-1.24.1.yaml +``` + +2. Create a PostgreSQL cluster for metering: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: metering-postgres + namespace: upbound-system +spec: + instances: 1 + imageName: ghcr.io/cloudnative-pg/postgresql:16 + bootstrap: + initdb: + database: metering + owner: metering + postInitApplicationSQL: + - ALTER ROLE "metering" CREATEROLE; + storage: + size: 5Gi + # Optional: Configure resources for production use + # resources: + # requests: + # memory: "512Mi" + # cpu: "500m" + # limits: + # memory: "1Gi" + # cpu: "1000m" +--- +apiVersion: v1 +kind: Secret +metadata: + name: metering-postgres-app + namespace: upbound-system + labels: + cnpg.io/reload: "true" +stringData: + username: metering + password: "your-secure-password-here" +type: kubernetes.io/basic-auth +``` + +```bash +kubectl apply -f metering-postgres.yaml +``` + +3. Wait for the cluster to be ready: + +```bash +kubectl wait --for=condition=ready cluster/metering-postgres -n upbound-system --timeout=5m +``` + +4. You can access the PostgreSQL cluster at `metering-postgres-rw.upbound-system.svc.cluster.local:5432`. + +:::tip +For production deployments, consider: +- Increasing `instances` to 3 for high availability +- Configuring [backups] to object storage +- Setting appropriate resource requests and limits +- Using a dedicated storage class with good I/O performance +::: + +### License file + +Contact your Upbound sales representative to obtain a license file for your organization. The license file contains: +- Your unique license ID +- Purchased capacity (resource hours and operations) +- License validity period +- Any usage restrictions (such as cluster UUID pinning) + +## Configuration + +### Step 1: Create database credentials secret + +Create a Kubernetes secret containing your PostgreSQL password using the pgpass format: + +```bash +# Create a pgpass file with format: hostname:port:database:username:password +# Note: The database name and username must be 'metering' +# For CNPG clusters, use the read-write service endpoint: -rw..svc.cluster.local +echo "metering-postgres-rw.upbound-system.svc.cluster.local:5432:metering:metering:your-secure-password-here" > pgpass + +# Create the secret +kubectl create secret generic metering-postgres-credentials \ + -n upbound-system \ + --from-file=pgpass=pgpass + +# Clean up the pgpass file +rm pgpass +``` + +The secret must contain a single key: +- **`pgpass`**: PostgreSQL password file in the format `hostname:port:metering:metering:password` + +:::note +The database name and username are fixed as `metering`. Ensure your PostgreSQL instance has a database named `metering` with a user `metering` that has appropriate permissions. + +If you deployed PostgreSQL using CNPG as shown in the example above, the password should match what you set in the `metering-postgres-app` secret. +::: + +:::tip +For production environments, consider using external secret management solutions: +- [External Secrets Operator][eso] +- Cloud-specific secret managers (AWS Secrets Manager, Azure Key Vault, GCP Secret Manager) +::: + +### Step 2: Enable metering in Spaces + +Enable the metering feature when installing or upgrading Spaces: + + + + + +```bash {hl_lines="2-7"} +helm -n upbound-system upgrade --install spaces ... \ + --set "metering.enabled=true" \ + --set "metering.storage.postgres.connection.url=metering-postgres-rw.upbound-system.svc.cluster.local:5432" \ + --set "metering.storage.postgres.connection.credentials.secret.name=metering-postgres-credentials" \ + --set "metering.interval=1m" \ + --set "metering.workerCount=10" \ + --set "metering.aggregationInterval=1h" \ + --set "metering.measurementRetentionDays=30" + ... +``` + + + + + +```bash {hl_lines="2-7"} +up space init ... \ + --set "metering.enabled=true" \ + --set "metering.storage.postgres.connection.url=metering-postgres-rw.upbound-system.svc.cluster.local:5432" \ + --set "metering.storage.postgres.connection.credentials.secret.name=metering-postgres-credentials" \ + --set "metering.interval=1m" \ + --set "metering.workerCount=10" \ + --set "metering.aggregationInterval=1h" \ + --set "metering.measurementRetentionDays=30" + ... +``` + + + + + +#### Configuration options + +| Option | Default | Description | +|--------|---------|-------------| +| `metering.enabled` | `false` | Enable the metering feature | +| `metering.storage.postgres.connection.url` | - | PostgreSQL host and port (format: `host:port`, required) | +| `metering.storage.postgres.connection.credentials.secret.name` | - | Name of the secret containing PostgreSQL credentials (required) | +| `metering.storage.postgres.connection.sslmode` | `require` | SSL mode for PostgreSQL connection (`disable`, `allow`, `prefer`, `require`, `verify-ca`, `verify-full`) | +| `metering.storage.postgres.connection.ca.name` | - | Name of the secret containing CA certificate for TLS connections (optional) | +| `metering.interval` | `1m` | How often to collect measurements from control planes | +| `metering.workerCount` | `10` | Number of parallel workers for measurement collection | +| `metering.aggregationInterval` | `1h` | How often to aggregate measurements into hourly usage data | +| `metering.measurementRetentionDays` | `30` | Days to retain raw measurements (0 = indefinite) | + + +#### Database sizing and retention + +The metering system uses two PostgreSQL tables to track usage: + +**Raw measurements table** (`measurements`): +- Stores point-in-time snapshots collected every measurement interval (default: 1 minute) +- One row per control plane per interval +- Affected by the `measurementRetentionDays` setting +- Used for detailed auditing and troubleshooting + +**Aggregated usage table** (`hourly_usage`): +- Stores hourly aggregated resource hours and operations per license +- One row per hour per license +- Never deleted (required for accurate license tracking) +- Grows much slower than raw measurements + +##### Storage sizing guidelines + +Estimate your PostgreSQL storage needs based on these factors: + + +| Deployment Size | Control Planes | Measurement Interval | Retention Days | Raw Measurements | Indexes & Overhead | Total Storage | +|----------------|----------------|---------------------|----------------|------------------|-------------------|---------------| +| Small | 10 | 1m | 30 | ~85 MB | ~40 MB | **~125 MB** | +| Medium | 50 | 1m | 30 | ~430 MB | ~215 MB | **~645 MB** | +| Large | 200 | 1m | 30 | ~1.7 GB | ~850 MB | **~2.5 GB** | +| Large (90-day retention) | 200 | 1m | 90 | ~5.2 GB | ~2.6 GB | **~7.8 GB** | + +The aggregated hourly usage table adds minimal overhead (~50 KB per year per license). + +**Formula for custom calculations**: +``` +Daily measurements per control plane = (24 * 60) / interval_minutes +Total rows = control_planes × daily_measurements × retention_days +Storage (MB) ≈ (total_rows × 200 bytes) / 1,048,576 × 1.5 (with indexes) +``` + +##### Retention behavior + +The `measurementRetentionDays` setting controls retention of raw measurement data: + +- **Default: 30 days** - Balances audit capabilities with storage efficiency +- **Set to 0**: Disables cleanup, retains all raw measurements indefinitely +- **Cleanup runs**: Every aggregation interval (default: hourly) +- **What's kept forever**: Aggregated hourly usage data (needed for license tracking) +- **What's cleaned up**: Raw point-in-time measurements older than retention period + +**Recommendations**: +- **30 days**: For most troubleshooting and short-term auditing +- **60 to 90 days**: For environments requiring extended audit trails +- **Unlimited (0)**: Only for environments with ample storage or specific compliance requirements + +:::note +Increasing retention period linearly increases storage requirements for raw measurements. The aggregated hourly data is always retained regardless of this setting. +::: + +### Step 3: Apply your license + +Use the `up` CLI to apply your license file: + +```bash +up space license apply /path/to/license.json +``` + +This command automatically: +- Creates a secret containing your license file in the `upbound-system` namespace +- Creates the `SpaceLicense` resource configured to use that secret + +:::tip +You can specify a different namespace for the license secret using the `--namespace` flag: +```bash +up space license apply /path/to/license.json --namespace my-namespace +``` +::: + +
+Alternative: Manual kubectl approach + +If you prefer not to use the `up` CLI, you can manually create the resources: + +1. Create the license secret: + +```bash +kubectl create secret generic space-license \ + -n upbound-system \ + --from-file=license.json=/path/to/license.json +``` + +2. Create the SpaceLicense resource: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceLicense +metadata: + name: space +spec: + secretRef: + name: space-license + namespace: upbound-system + key: license.json +``` + +```bash +kubectl apply -f spacelicense.yaml +``` + +:::important +You **must** name the `SpaceLicense` resource `space`. This resource is a singleton and only one can exist in the cluster. +::: + +
+ +## Monitoring usage + +### Check license status + +Use the `up` CLI to view your license details and current usage: + +```bash +up space license show +``` + +Example output: + +``` +Spaces License Status: Valid (License is valid) + +Created: 2024-01-01T00:00:00Z +Expires: 2025-01-01T00:00:00Z + +Plan: enterprise + +Resource Hour Limit: 1000000 +Operation Limit: 500000 + +Enabled Features: +- spaces +- query-api +- backup-restore +``` + +The output shows: +- License validity status and any validation messages +- Creation and expiration dates +- Your commercial plan tier +- Capacity limits for resource hours and operations +- Enabled features in your license +- Any restrictions (such as cluster UUID pinning) + +
+Alternative: View detailed status with kubectl + +For detailed information including usage statistics, use kubectl: + +```bash +kubectl get spacelicense space -o yaml +``` + +Example output showing usage data: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceLicense +metadata: + name: space +spec: + secretRef: + name: space-license + namespace: upbound-system +status: + conditions: + - type: LicenseValid + status: "True" + reason: Valid + message: "License is valid" + id: "lic_abc123xyz" + plan: "enterprise" + capacity: + resourceHours: 1000000 + operations: 500000 + usage: + resourceHours: 245680 + operations: 12543 + resourceHoursUtilization: "24.57%" + operationsUtilization: "2.51%" + firstMeasurement: "2024-01-15T10:00:00Z" + lastMeasurement: "2024-02-10T14:30:00Z" + createdAt: "2024-01-01T00:00:00Z" + expiresAt: "2025-01-01T00:00:00Z" + enabledFeatures: + - "spaces" + - "query-api" + - "backup-restore" +``` + +
+ +### Understanding the status fields + +| Field | Description | +|-------|-------------| +| `status.id` | Unique license identifier | +| `status.plan` | Your commercial plan (community, standard, enterprise) | +| `status.capacity` | Total capacity included in your license | +| `status.usage.resourceHours` | Total resource hours consumed | +| `status.usage.operations` | Total operations performed | +| `status.usage.resourceHoursUtilization` | Percentage of resource hours capacity used | +| `status.usage.operationsUtilization` | Percentage of operations capacity used | +| `status.usage.firstMeasurement` | When usage tracking began | +| `status.usage.lastMeasurement` | Most recent usage update | +| `status.expiresAt` | License expiration date | + +### Monitor with kubectl + +Watch your license utilization in real-time: + +```bash +kubectl get spacelicense space -w +``` + +Short output format: + +``` +NAME PLAN VALID REASON AGE +space enterprise True Valid 45d +``` + +## Managing licenses + +### Updating your license + +To update your license with a new license file (for example, when renewing or upgrading capacity), apply the new license: + +```bash +up space license apply /path/to/new-license.json +``` + +This command replaces the existing license secret and updates the SpaceLicense resource. + +### Removing a license + +To remove a license: + +```bash +up space license remove +``` + +This command: +- Prompts for confirmation before proceeding +- Removes the license secret + +To skip the confirmation prompt, use the `--force` flag: + +```bash +up space license remove --force +``` + +## Troubleshooting + +### License not updating + +If the license status doesn't update with usage data: + +1. **Check metering controller logs**: + ```bash + kubectl logs -n upbound-system deployment/spaces-controller -c metering + ``` + +2**Check if the system captures your measurements**: + + ```bash + # Connect to PostgreSQL and query the measurements table + kubectl exec -it -- psql -U -d \ + -c "SELECT COUNT(*) FROM measurements WHERE timestamp > NOW() - INTERVAL '1 hour';" + ``` + +### High utilization warnings + +If you're approaching your capacity limits: + +1. **Review resource usage** by control plane to identify high consumers +2. **Contact your Upbound sales representative** to discuss capacity expansion +3. **Optimize managed resources** by cleaning up unused resources + +### License validation failures + +If your license shows as invalid: + +1. **Check expiration date**: `kubectl get spacelicense space -o jsonpath='{.status.expiresAt}'` +2. **Verify license file integrity**: Ensure the secret contains valid JSON +3. **Check for cluster UUID restrictions**: Upbound pins some licenses to + specific clusters +4. **Review controller logs** for detailed error messages + +## Differences from traditional billing + +### Capacity licensing + +- ✅ Works in disconnected environments +- ✅ Provides real-time usage visibility +- ✅ No manual data export required +- ✅ Requires PostgreSQL database +- ✅ Fixed capacity model + +### Traditional billing (object storage) + + +- ❌ Requires periodic manual export +- ❌ Delayed visibility into usage +- ✅ Works with S3/Azure Blob/GCS +- ❌ Requires cloud storage access +- ✅ Pay-as-you-go model + +## Best practices + +### Database management + +1. **Regular backups**: Back up your metering database regularly to preserve usage history +2. **Monitor database size**: Set appropriate retention periods to manage storage growth +3. **Use managed databases**: Consider managed PostgreSQL services for production +4. **Connection pooling**: Use connection pooling for better performance at scale + +### License management + +1. **Monitor utilization**: Set up alerts before reaching 80% capacity +2. **Plan renewals early**: Start renewal discussions 60 days before expiration +3. **Track grace periods**: Note the `gracePeriodEndsAt` date for planning +4. **Secure license files**: Treat license files as sensitive credentials + +### Operational monitoring + +1. **Set up dashboards**: Create Grafana dashboards for usage trends +2. **Enable alerting**: Configure alerts for high utilization and expiration +3. **Regular audits**: Periodically review usage patterns across control planes +4. **Capacity planning**: Use historical data to predict future capacity needs + +## Next steps + +- Learn about [Observability] to monitor your Spaces deployment +- Explore [Backup and Restore][backup-restore] to protect your control plane data +- Review [Self-Hosted Space Billing][space-billing] for the traditional billing model +- Contact [Upbound Sales][sales] to discuss capacity licensing options + + +[space-billing]: /spaces/howtos/self-hosted/billing +[CloudNativePG]: https://cloudnative-pg.io/ +[backups]: https://cloudnative-pg.io/documentation/current/backup_recovery/ +[backup-restore]: /spaces/howtos/backup-and-restore +[sales]: https://www.upbound.io/contact +[eso]: https://external-secrets.io/ +[Observability]: /spaces/howtos/observability + + diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/certs.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/certs.md new file mode 100644 index 000000000..e517c250e --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/certs.md @@ -0,0 +1,274 @@ +--- +title: Istio Ingress Gateway With Custom Certificates +sidebar_position: 20 +description: Install self hosted spaces using istio ingress gateway in a Kind cluster +--- + +:::important +Prerequisites + +- Spaces Token available in a file +- `docker login xpkg.upbound.io -u -p ` +- [`istioctl`][istioctl] installation +- `jq` installation +::: + +This document describes the installation of a self hosted space on an example `kind` +cluster along with Istio Ingress Gateway and certificates. The service mesh and certificates +installation is transferable to self hosted spaces in arbitrary clouds. + +## Create a kind cluster + +```shell +cat < +## Install Istio + + + +:::important +This is an example and not recommended for use in production. +::: + + +1. Create the `istio-values.yaml` file + +```shell +cat > istio-values.yaml << 'EOF' +apiVersion: install.istio.io/v1alpha1 +kind: IstioOperator +spec: + hub: gcr.io/istio-release + components: + ingressGateways: + - enabled: true + name: istio-ingressgateway + k8s: + nodeSelector: + ingress-ready: "true" + overlays: + - apiVersion: apps/v1 + kind: Deployment + name: istio-ingressgateway + patches: + - path: spec.template.spec.containers.[name:istio-proxy].ports + value: + - containerPort: 8080 + hostPort: 80 + - containerPort: 8443 + hostPort: 443 +EOF +``` + +2. Install istio via `istioctl` + +```shell +istioctl install -f istio-values.yaml +``` + +## Create a self-signed Certificate via cert-manager + +:::important +This Certificate manifest creates a self-signed certificate for a proof of concept +environment and isn't recommended for production use cases. +::: + +1. Create the upbound-system namespace + +```shell +kubectl create namespace upbound-system +``` + +2. Create a self-signed certificate + +```shell +cat < +## Create an Istio Gateway and VirtualService + + + + +Configure an Istio Gateway and VirtualService to use TLS passthrough. + + +```shell +cat < spaces-values.yaml << 'EOF' +# Configure spaces-router to use the TLS secret created by cert-manager. +externalTLS: + tlsSecret: + name: example-tls-secret + caBundleSecret: + name: example-tls-secret + key: ca.crt +ingress: + provision: false + # Allow Istio Ingress Gateway to communicate to the spaces-router + namespaceLabels: + kubernetes.io/metadata.name: istio-system + podLabels: + app: istio-ingressgateway + istio: ingressgateway +EOF +``` + +2. Set the required environment variables + +```shell +# Update these according to your account/token file +export SPACES_TOKEN_PATH= +export UPBOUND_ACCOUNT= +# Replace SPACES_ROUTER_HOST with your Spaces ingress hostname +export SPACES_ROUTER_HOST="proxy.upbound-127.0.0.1.nip.io" +export SPACES_VERSION="1.14.1" +``` + +3. Create an image pull secret for Spaces + +```shell +kubectl -n upbound-system create secret docker-registry upbound-pull-secret \ + --docker-server=https://xpkg.upbound.io \ + --docker-username="$(jq -r .accessId $SPACES_TOKEN_PATH)" \ + --docker-password="$(jq -r .token $SPACES_TOKEN_PATH)" +``` + +4. Install the Spaces helm chart + +```shell +# Login to xpkg.upbound.io +jq -r .token $SPACES_TOKEN_PATH | helm registry login xpkg.upbound.io -u $(jq -r .accessId $SPACES_TOKEN_PATH) --password-stdin + +# Install spaces helm chart +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "authentication.hubIdentities=true" \ + --set "authorization.hubRBAC=true" \ + --wait -f spaces-values.yaml +``` + +## Validate the installation + +Successful access of the `up` command to interact with your self hosted space validates the +certificate installation. + +- `up ctx .` + +You can also issue control plane creation, list and deletion commands. + +- `up ctp create cert-test` +- `up ctp list` +- `up ctx disconnected/kind-kind/default/cert-test && kubectl get namespace` +- `up ctp delete cert-test` + +:::note +If `up` can't connect to your control plane, follow [this guide to create a new profile][up-profile]. +::: + +## Troubleshooting + +Examine your certificate with `openssl`: + +```shell +openssl s_client -connect proxy.upbound-127.0.0.1.nip.io:443 -showcerts +``` + +[istioctl]: https://istio.io/latest/docs/ops/diagnostic-tools/istioctl/ +[up-profile]: /manuals/cli/howtos/profile-config/ diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/configure-ha.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/configure-ha.md new file mode 100644 index 000000000..ddf36c55e --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/configure-ha.md @@ -0,0 +1,450 @@ +--- +title: Production Scaling and High Availability +description: Configure your Self-Hosted Space for production +sidebar_position: 5 +--- + + + +This guide explains how to configure an existing Upbound Space deployment for +production operation at scale. + +Use this guide when you're ready to deploy production scaling, high availability, +and monitoring in your Space. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. + +For API specifications on ControlPlane resources and configurations, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +## Prerequisites + +Before you begin scaling your Spaces deployment, make sure you have: + + +* A working Space deployment +* Cluster administrator access +* An understanding of load patterns and growth in your organization +* A familiarity with node affinity, tainting, and Horizontal Pod Autoscaling + (HPA) + + +## Production scaling strategy + + +In this guide, you will: + + + +* Create dedicated node pools for different component types +* Configure high-availability to ensure there are no single points of failure +* Set dynamic scaling for variable workloads +* Optimize your storage and component operations +* Monitor your deployment health and performance + +## Spaces architecture + +The basic Spaces workflow follows the pattern below: + + +![Spaces workflow][spaces-workflow] + +## Node architecture + +You can mitigate resource contention and improve reliability by separating system +components into dedicated node pools. + +### `etcd` dedicated nodes + +`etcd` performance directly impacts your entire Space, so isolate it for +consistent performance. + +1. Create a dedicated `etcd` node pool + + **Requirements:** + - **Minimum**: 3 nodes for HA + - **Instance type**: General purpose with high network throughput/low latency + - **Storage**: High performance storage (`etcd` is I/O sensitive) + +2. Taint `etcd` nodes to reserve them + + ```bash + kubectl taint nodes target=etcd:NoSchedule + ``` + +3. Configure `etcd` storage + + `etcd` is sensitive to storage I/O performance. Review the [`etcd` scaling + documentation][scaling] + for specific storage guidance. + +### API server dedicated nodes + +API servers handle all control plane requests and should run on dedicated +infrastructure. + +1. Create dedicated API server nodes + + **Requirements:** + - **Minimum**: 2 nodes for HA + - **Instance type**: Compute-optimized, memory-optimized, or general-purpose + - **Scaling**: Scale vertically based on API server load patterns + +2. Taint API server nodes + + ```bash + kubectl taint nodes target=apiserver:NoSchedule + ``` + +### Configure cluster autoscaling + +Enable cluster autoscaling for all node pools. + +For AWS EKS clusters, Upbound recommends using [`Karpenter`][karpenter] for +improved bin-packing and instance type selection. + +For GCP GKE clusters, follow the [GKE autoscaling][gke-autoscaling] guide. + +For Azure AKS clusters, follow the [AKS autoscaling][aks-autoscaling] guide. + + +## Configure high availability + +Ensure control plane components can survive node and zone failures. + +### Enable high availability mode + +1. Configure control planes for high availability + + ```yaml + controlPlanes: + ha: + enabled: true + ``` + + This configures control plane pods to run with multiple replicas and + associated pod disruption budgets. + +### Configure component distribution + +1. Set up API server pod distribution + + ```yaml + controlPlanes: + vcluster: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: target + operator: In + values: + - apiserver + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vcluster + topologyKey: "kubernetes.io/hostname" + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vcluster + topologyKey: topology.kubernetes.io/zone + weight: 100 + ``` + +2. Configure `etcd` pod distribution + + ```yaml + controlPlanes: + etcd: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: target + operator: In + values: + - etcd + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vcluster-etcd + topologyKey: "kubernetes.io/hostname" + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vcluster-etcd + topologyKey: topology.kubernetes.io/zone + weight: 100 + ``` + +### Configure tolerations + +Allow control plane pods to schedule on the tainted dedicated nodes (available +in Spaces v1.14+). + +1. Add tolerations for `etcd` pods + + ```yaml + controlPlanes: + etcd: + tolerations: + - key: "target" + operator: "Equal" + value: "etcd" + effect: "NoSchedule" + ``` + +2. Add tolerations for API server pods + + ```yaml + controlPlanes: + vcluster: + tolerations: + - key: "target" + operator: "Equal" + value: "apiserver" + effect: "NoSchedule" + ``` + + +## Configure autoscaling for Spaces components + + +Set up the Spaces system components to handle variable load automatically. + +### Scale API and `apollo` services + +1. Configure minimum replicas for availability + + ```yaml + api: + replicaCount: 2 + + features: + alpha: + apollo: + enabled: true + replicaCount: 2 + ``` + + Both services support horizontal and vertical scaling based on load patterns. + +### Configure router autoscaling + +The `spaces-router` is the entry point for all traffic and needs intelligent +scaling. + + +1. Enable Horizontal Pod Autoscaler + + ```yaml + router: + hpa: + enabled: true + minReplicas: 2 + maxReplicas: 8 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 80 + ``` + +2. Monitor scaling factors + + **Router scaling behavior:** + - **Vertical scaling**: Scales based on number of control planes + - **Horizontal scaling**: Scales based on request volume + - **Resource monitoring**: Monitor CPU and memory usage + + + +### Configure controller scaling + +The `spaces-controller` manages Space-level resources and requires vertical +scaling. + +1. Configure adequate resources with headroom + + ```yaml + controller: + resources: + requests: + cpu: "500m" + memory: "1Gi" + limits: + cpu: "2000m" + memory: "4Gi" + ``` + + **Important**: The controller can spike when reconciling large numbers of + control planes, so provide adequate headroom for resource spikes. + +## Set up production storage + + +### Configure Query API database + + +1. Use a managed PostgreSQL database + + **Recommended services:** + - [AWS RDS][rds] + - [Google Cloud SQL][gke-sql] + - [Azure Database for PostgreSQL][aks-sql] + + **Requirements:** + - Minimum 400 IOPS performance + + +## Monitoring + + + +Monitor key metrics to ensure healthy scaling and identify issues quickly. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +### Control plane health + +Track these `spaces-controller` metrics: + +1. **Total control planes** + + ``` + spaces_control_plane_exists + ``` + + Tracks the total number of control planes in the system. + +2. **Degraded control planes** + + ``` + spaces_control_plane_degraded + ``` + + Returns control planes that don't have a `Synced`, `Ready`, and + `Healthy` state. + +3. **Stuck control planes** + + ``` + spaces_control_plane_stuck + ``` + + Control planes stuck in a provisioning state. + +4. **Deletion issues** + + ``` + spaces_control_plane_deletion_stuck + ``` + + Control planes stuck during deletion. + +### Alerting + +Configure alerts for critical scaling and health metrics: + +- **High error rates**: Alert when 4xx/5xx response rates exceed thresholds +- **Control plane health**: Alert when degraded or stuck control planes exceed acceptable counts + +## Architecture overview + +**Spaces System Components:** + +- **`spaces-router`**: Entry point for all endpoints, dynamically builds routes to control plane API servers +- **`spaces-controller`**: Reconciles Space-level resources, serves webhooks, works with `mxp-controller` for provisioning +- **`spaces-api`**: API for managing groups, control planes, shared secrets, and telemetry objects (accessed only through spaces-router) +- **`spaces-apollo`**: Hosts the Query API, connects to PostgreSQL database populated by `apollo-syncer` pods + + +**Control Plane Components (per control plane):** +- **`mxp-controller`**: Handles provisioning tasks, serves webhooks, installs UXP and `XGQL` +- **`XGQL`**: GraphQL API powering console views +- **`kube-state-metrics`**: Collects usage metrics for billing (updated by `mxp-controller` when CRDs change) +- **`vector`**: Works with `kube-state-metrics` to send usage data to external storage for billing +- **`apollo syncer`**: Syncs `etcd` data into PostgreSQL for the Query API + + +### `up ctx` workflow + + + up ctx workflow diagram + + +### Access a control plane API server via kubectl + + + kubectl workflow diagram + + +### Query API/Apollo + + + query API workflow diagram + + +## See also + +* [Upbound Spaces deployment requirements][deployment] +* [Upbound `etcd` scaling resources][scaling] + +[up-ctx-workflow]: /img/up-ctx-workflow.png +[kubectl]: /img/kubectl-workflow.png +[query-api]: /img/query-api-workflow.png +[spaces-workflow]: /img/up-basic-flow.png +[rds]: https://aws.amazon.com/rds/postgresql/ +[gke-sql]: https://cloud.google.com/kubernetes-engine/docs/tutorials/stateful-workloads/postgresql +[aks-sql]: https://learn.microsoft.com/en-us/azure/aks/deploy-postgresql-ha?tabs=azuredisk +[deployment]: https://docs.upbound.io/spaces/howtos/self-hosted/deployment-reqs/ +[karpenter]: https://docs.aws.amazon.com/eks/latest/best-practices/karpenter.html +[gke-autoscaling]: https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-autoscaler +[aks-autoscaling]: https://learn.microsoft.com/en-us/azure/aks/cluster-autoscaler-overview +[scaling]: https://docs.upbound.io/deploy/self-hosted-spaces/scaling-resources#scaling-etcd-storage diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/controllers.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/controllers.md new file mode 100644 index 000000000..692740638 --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/controllers.md @@ -0,0 +1,389 @@ +--- +title: Controllers +weight: 250 +description: A guide to how to wrap and deploy an Upbound controller into control planes on Upbound. +--- + +:::important +This feature is in private preview for select customers in Upbound Spaces. If you're interested in this feature, please [contact us](https://www.upbound.io/contact-us). +::: + +Upbound's _Controllers_ feature lets you build and deploy control plane software from the Kubernetes ecosystem. With the _Controllers_ feature, you're not limited to just managing resource types defined by Crossplane. Now you can create resources from _CustomResourceDefinitions_ defined by other Kubernetes ecosystem tooling. + +This guide explains how to bundle and deploy control plane software from the Kubernetes ecosystem on a control plane in Upbound. + +## Benefits + +The Controllers feature provides the following benefits: + +* Deploy control plane software from the Kubernetes ecosystem. +* Use your control plane's package manager to handle the lifecycle of the control plane software and define dependencies between package. +* Build powerful compositions that combine both Crossplane and Kubernetes _CustomResources_. + +## How it works + +A _Controller_ is a package type that bundles control plane software from the Kubernetes ecosystem. Examples of such software includes: + +- Kubernetes policy engines +- CI/CD tooling +- Your own private custom controllers defined by your organization + +You build a _Controller_ package by wrapping a helm chart along with its requisite _CustomResourceDefinitions_. Your _Controller_ package gets pushed to an OCI registry, and from there you can apply it to a control plane like you would any other Crossplane package. Your control plane's package manager is responsible for managing the lifecycle of the software once applied. + +## Prerequisites + +Enable the Controllers feature in the Space you plan to run your control plane in: + +- Cloud Spaces: Not available yet +- Connected Spaces: Space administrator must enable this feature +- Disconnected Spaces: Space administrator must enable this feature + +Packaging a _Controller_ requires [up CLI][cli] `v0.39.0` or later. + + + +## Build a _Controller_ package + + + +_Controllers_ are a package type that get administered by your control plane's package manager. + +### Prepare the package + +To define a _Controller_, you need a Helm chart. This guide assumes the control plane software you want to build into a _Controller_ already has a Helm chart available. + +Start by making a working directory to assemble the necessary parts: + +```ini +mkdir controller-package +cd controller-package +``` + +Inside the working directory, pull the Helm chart: + +```shell +export CHART_REPOSITORY= +export CHART_NAME= +export CHART_VERSION= + +helm pull $CHART_NAME --repo $CHART_REPOSITORY --version $CHART_VERSION +``` + +Be sure to update the Helm chart repository, name, and version with your own. + +Move the Helm chart into its own folder: + +```ini +mkdir helm +mv $CHART_NAME-$CHART_VERSION.tgz helm/chart.tgz +``` + +Unpack the CRDs from the Helm chart into their own directory: + +```shell +export RELEASE_NAME= +export RELEASE_NAMESPACE= + +mkdir crds +helm template $RELEASE_NAME helm/chart.tgz -n $RELEASE_NAMESPACE --include-crds | \ + yq e 'select(.kind == "CustomResourceDefinition")' - | \ + yq -s '("crds/" + .metadata.name + ".yaml")' - +``` +Be sure to update the Helm release name, and namespace with your own. + +:::info +The instructions above assume your CRDs get deployed as part of your Helm chart. If they're deployed another way, you need to manually copy your CRDs instead. +::: + +Create a `crossplane.yaml` with your controller metadata: + +```yaml +cat < crossplane.yaml +apiVersion: meta.pkg.upbound.io/v1alpha1 +kind: Controller +metadata: + annotations: + friendly-name.meta.crossplane.io: Controller + meta.crossplane.io/description: | + A brief description of what the controller does. + meta.crossplane.io/license: Apache-2.0 + meta.crossplane.io/maintainer: + meta.crossplane.io/readme: | + An explanation of your controller. + meta.crossplane.io/source: + name: +spec: + packagingType: Helm + helm: + releaseName: + releaseNamespace: + # Value overrides for the helm release can be provided below. + # values: + # foo: bar +EOF +``` + +Your controller's file structure should look like this: + +```ini +. +├── crds +│ ├── your-crd.yaml +│ ├── second-crd.yaml +│ └── another-crd.yaml +├── crossplane.yaml +└── helm + └── chart.tgz +``` + +### Package and push the _Controller_ + +At the root of your controller's working directory, build the contents into an xpkg: + +```ini +up xpkg build +``` + +This causes an xpkg to get saved to your current directory with a name like `controller-f7091386b4c0.xpkg`. + +Push the package to your desired OCI registry: + +```shell +export UPBOUND_ACCOUNT= +export CONTROLLER_NAME= +export CONTROLLER_VERSION= +export XPKG_FILENAME= + +up xpkg push xpkg.upbound.io/$UPBOUND_ACCOUNT/$CONTROLLER_NAME:$CONTROLLER_VERSION -f $XPKG_FILENAME +``` + + + +## Deploy a _Controller_ package + + + +:::important +_Controllers_ are only installable on control planes running Crossplane `v1.19.0` or later. +::: + +Set your kubecontext to the desired control plane in Upbound. Change the package path to the OCI registry you pushed it to. Then, deploy the _Controller_ directly: + +```shell +export CONTROLLER_NAME= +export CONTROLLER_VERSION= + +cat < crossplane.yaml +apiVersion: meta.pkg.upbound.io/v1alpha1 +kind: Controller +metadata: + annotations: + friendly-name.meta.crossplane.io: Controller ArgoCD + meta.crossplane.io/description: | + The ArgoCD Controller enables continuous delivery and declarative configuration + management for Kubernetes applications using GitOps principles. + meta.crossplane.io/license: Apache-2.0 + meta.crossplane.io/maintainer: Upbound Maintainers + meta.crossplane.io/readme: | + ArgoCD is a declarative GitOps continuous delivery tool for Kubernetes that + follows the GitOps methodology to manage infrastructure and application + configurations. + meta.crossplane.io/source: https://github.com/argoproj/argo-cd + name: argocd +spec: + packagingType: Helm + helm: + releaseName: argo-cd + releaseNamespace: argo-system + # values: + # foo: bar +EOF +``` + +Your controller's file structure should look like this: + +```ini +. +├── crds +│ ├── applications.argoproj.io.yaml +│ ├── applicationsets.argoproj.io.yaml +│ └── appprojects.argoproj.io.yaml +├── crossplane.yaml +└── helm + └── chart.tgz +``` + +### Package and push controller-argocd + +At the root of your controller's working directory, build the contents into an xpkg: + +```ini +up xpkg build +``` + +This causes an xpkg to get saved to your current directory with a name like `argocd-f7091386b4c0.xpkg`. + +Push the package to your desired OCI registry: + +```shell +export UPBOUND_ACCOUNT= +export CONTROLLER_NAME=controller-argocd +export CONTROLLER_VERSION=v7.8.8 +export XPKG_FILENAME= + +up xpkg push --create xpkg.upbound.io/$UPBOUND_ACCOUNT/$CONTROLLER_NAME:$CONTROLLER_VERSION -f $XPKG_FILENAME +``` + +### Deploy controller-argocd to a control plane + +Set your kubecontext to the desired control plane in Upbound. Change the package path to the OCI registry you pushed it to. Then, deploy the _Controller_ directly: + +```ini +cat < + +## Frequently asked questions + +
+Can I package any software or are there any prerequisites to be a Controller? + +We define a *Controller* as a software that has at least one Custom Resource Definition (CRD) and a Kubernetes controller for that CRD. This is the minimum requirement to be a *Controller*. We have some checks to enforce this at packaging time. + +
+ +
+How can I package my software as a Controller? + +Currently, we support Helm charts as the underlying package format for *Controllers*. As long as you have a Helm chart, you can package it as a *Controller*. + +If you don't have a Helm chart, you can't deploy the software. We only support Helm charts as the underlying package format for *Controllers*. We may extend this to support other packaging formats like Kustomize in the future. + +
+ +
+Can I package Crossplane XRDs/Compositions as a Helm chart to deploy as a Controller? + +This is not recommended. For packaging Crossplane XRDs/ and Compositions, we recommend using the `Configuration` package format. A helm chart only with Crossplane XRDs/Compositions does not qualify as a *Controller*. + +
+ +
+How can I override the Helm values when deploying a Controller? + +Overriding the Helm values is possible at two levels: +- During packaging time, in the package manifest file. +- At runtime, using a `ControllerRuntimeConfig` resource (similar to Crossplane `DeploymentRuntimeConfig`). + +
+ +
+How can I configure the helm release name and namespace for the controller? + +Right now, it is not possible to configure this at runtime. The package author configures release name and namespace during packaging, so it is hardcoded inside the package. Unlike a regular application that is deployed by a Helm chart, *Controllers* can only be deployed once in a given control plane, so, we hope it should be ok to rely on predefined release names and namespaces. We may consider exposing these in `ControllerRuntimeConfig` later, but, we would like to keep it opinionated unless there are strong reasons to do so. + +
+ +
+Can I deploy more than one instance of a Controller package? + +No, this is not possible. Remember, a *Controller* package introduces CRDs which are cluster-scoped objects. Just like one cannot deploy more than one instance of the same Crossplane Provider package today, it is not possible to deploy more than one instance of a *Controller*. + +
+ +
+Do I need a specific Crossplane version to run Controllers? + +Yes, you need to use Crossplane v1.19.0 or later to use *Controllers*. This is because of the changes in the Crossplane codebase to support third-party package formats in dependencies. + +Spaces `v1.12.0` supports Crossplane `v1.19` in the *Rapid* release channel. + +
+ +
+Can I deploy Controllers outside of an Upbound control plane? With UXP? + +No, *Controllers* are a proprietary package format and are only available for control planes running in Spaces hosting environments in Upbound. + +
+ + +[cli]: /manuals/uxp/overview + diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/ctp-audit-logs.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/ctp-audit-logs.md new file mode 100644 index 000000000..52f52c776 --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/ctp-audit-logs.md @@ -0,0 +1,549 @@ +--- +title: Control plane audit logging +--- + +This guide explains how to enable and configure audit logging for control planes +in Self-Hosted Upbound Spaces. + +Starting in Spaces `v1.14.0`, each control plane contains an API server that +supports audit log collection. You can use audit logging to track creation, +updates, and deletions of Crossplane resources. Control plane audit logs +use observability features to collect audit logs with `SharedTelemetryConfig` and +send logs to an OpenTelemetry (`OTEL`) collector. + +:::info API Version Information +This guide applies to **Spaces v1.14.0 and later** (Self-Hosted only). Audit logging is not available in earlier versions. + +For API specifications on observability resources, see the [Spaces API Reference - v1.14](../../../../../1.14/reference/apis/spaces-api/). details on observability evolution across versions, see the . +::: + +## Prerequisites + +Before you begin, make sure you have: + +* Spaces `v1.14.0` or greater +* Admin access to your Spaces host cluster +* `kubectl` configured to access the host cluster +* `helm` installed +* `yq` installed +* `up` CLI installed and logged in to your organization + +## Enable observability + + +Observability graduated to General Available in `v1.14.0` but is disabled by +default. + + + + + +### Before `v1.14` +To enable the GA Observability feature, upgrade your Spaces installation to `v1.14.0` +or later and update your installation setting to the new flag: + +```diff +helm upgrade spaces upbound/spaces -n upbound-system \ +- --set "features.alpha.observability.enabled=true" ++ --set "observability.enabled=true" +``` + + + +### After `v1.14` + +To enable the GA Observability feature for `v1.14.0` and later, pass the feature +flag: + +```sh +helm upgrade spaces upbound/spaces -n upbound-system \ + --set "observability.enabled=true" + +``` + + + + +To confirm Observability is enabled, run the `helm get values` command: + + +```shell +helm get values --namespace upbound-system spaces | yq .observability +``` + +Your output should return: + +```shell-noCopy + enabled: true +``` + +## Install an observability backend + +:::note +If you already have an observability backend in your environment, skip to the +next section. +::: + + +For this guide, you'll use Grafana's `docker-otel-lgtm` bundle to validate audit log +generation. production environments, configure a dedicated observability +backend like Datadog, Splunk, or an enterprise-grade Grafana stack. + + + +First, make sure your `kubectl` context points to your Spaces host cluster: + +```shell +kubectl config current-context +``` + +The output should return your cluster name. + +Next, install `docker-otel-lgtm` as a deployment using port-forwarding to +connect to Grafana. Create a manifest file and paste the +following configuration: + +```yaml title="otel-lgtm.yaml" +apiVersion: v1 +kind: Namespace +metadata: + name: observability +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: otel-lgtm + name: otel-lgtm + namespace: observability +spec: + ports: + - name: grpc + port: 4317 + protocol: TCP + targetPort: 4317 + - name: http + port: 4318 + protocol: TCP + targetPort: 4318 + - name: grafana + port: 3000 + protocol: TCP + targetPort: 3000 + selector: + app: otel-lgtm +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: otel-lgtm + labels: + app: otel-lgtm + namespace: observability +spec: + replicas: 1 + selector: + matchLabels: + app: otel-lgtm + template: + metadata: + labels: + app: otel-lgtm + spec: + containers: + - name: otel-lgtm + image: grafana/otel-lgtm + ports: + - containerPort: 4317 + - containerPort: 4318 + - containerPort: 3000 +``` + +Next, apply the manifest: + +```shell +kubectl apply --filename otel-lgtm.yaml +``` + +Your output should return the resources: + +```shell +namespace/observability created + service/otel-lgtm created + deployment.apps/otel-lgtm created +``` + +To verify your resources deployed, use `kubectl get` to display resources with +an `ACTIVE` or `READY` status. + +Next, forward the Grafana port: + +```shell +kubectl port-forward svc/otel-lgtm --namespace observability 3000:3000 +``` + +Now you can access the Grafana UI at http://localhost:3000. + + +## Create an audit-enabled control plane + +To enable audit logging for a control plane, you need to label it so the +`SharedTelemetryConfig` can identify and apply audit settings. This section +creates a new control plane with the `audit-enabled: "true"` label. The +`audit-enabled: "true"` label marks this control plane for audit logging. The +`SharedTelemetryConfig` (created in the next section) finds control planes with +this label and enables audit logging on them. + +Create a new manifest file and paste the configuration below: + +
+```yaml title="ctp-audit.yaml" +apiVersion: v1 +kind: Namespace +metadata: + name: audit-test +--- +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + labels: + audit-enabled: "true" + name: ctp1 + namespace: audit-test +spec: + writeConnectionSecretToRef: + name: kubeconfig-ctp1 + namespace: audit-test +``` +
+ +The `metadata.labels` section contains the `audit-enabled` setting. + +Apply the manifest: + +```shell +kubectl apply --filename ctp-audit.yaml +``` + +Confirm your control plane reaches the `READY` status: + +```shell +kubectl get --filename ctp-audit.yaml +``` + +## Create a `SharedTelemetryConfig` + +The `SharedTelemetryConfig` applies to all control plane objects in a namespace +and enables audit logging and routes logs to your `OTEL` endpoint. + +Create a `SharedTelemetryConfig` manifest file and paste the configuration +below: + +
+```yaml title="sharedtelemetryconfig.yaml" +apiVersion: observability.spaces.upbound.io/v1alpha1 +kind: SharedTelemetryConfig +metadata: + name: apiserver-audit + namespace: audit-test +spec: + apiServer: + audit: + enabled: true + exporters: + otlphttp: + endpoint: http://otel-lgtm.observability:4318 + exportPipeline: + logs: [otlphttp] + controlPlaneSelector: + labelSelectors: + - matchLabels: + audit-enabled: "true" +``` +
+ +This configuration: + +* Sets `apiServer.audit.enabled` to `true` +* Configures the `otlphttp` exporter to point to the `docker-otel-lgtm` service +* Uses `controlPlaneSelector` to match any control plane in the namespace with the `audit-enabled` label set to `true` + +:::note +You can configure the `SharedTelemetryConfig` to select control planes in +several ways. more information on control plane selection, see the [control +plane selection][ctp-selection] documentation. +::: + +Apply the `SharedTelemetryConfig`: + +```shell +kubectl apply --filename sharedtelemetryconfig.yaml +``` + +Confirm the configuration selected the control plane: + +```shell +kubectl get --filename sharedtelemetryconfig.yaml +``` + +The output should return `SELECTED` as `1` and `VALIDATED` as `TRUE`. + +For more detailed status information, use `kubectl get`: + +```shell +kubectl get --filename sharedtelemetryconfig.yaml --output yaml | yq .status +``` + +## Generate and monitor audit events + +You enabled telemetry on your new control plane and can now generate events to +test the audit logging. This guide uses the `nop-provider` to simulate resource +operations. + +Switch your `up` context to the new control plane: + +```shell +up ctx /// +``` + +Create a new Provider manifest: + +```yaml title="provider-nop.yaml" +apiVersion: pkg.crossplane.io/v1 + kind: Provider + metadata: + name: crossplane-contrib-provider-nop + spec: + package: xpkg.upbound.io/crossplane-contrib/provider-nop:v0.4.0 +``` + +Apply the provider manifest: + +```shell +kubectl apply --filename provider-nop.yaml +``` + +Verify the provider installed and returns `HEALTHY` status as `TRUE`. + +Apply an example resource to kick off event generation: + + +```shell +kubectl apply --filename https://raw.githubusercontent.com/crossplane-contrib/provider-nop/refs/heads/main/examples/nopresource.yaml +``` + +In your Grafana dashboard, navigate to **Drilldown** > **Logs** under the +Grafana menu. + + +Filter for `controlplane-audit` log messages. + +Create a query to find `create` events on `nopresources` by filtering: + +* The `verb` field for `create` events +* The `objectRef_resource` field to match the Kind `nopresources` + +Review the audit log results. The log stream displays: + +*The client applying the create operation +* The resource kind +* Client details +* The response code + +Expand the example below for an audit log entry: + +
+ Audit log entry + +```json +{ + "level": "Metadata", + "auditID": "51bbe609-14ad-4874-be78-1289c10d506a", + "stage": "ResponseComplete", + "requestURI": "/apis/nop.crossplane.io/v1alpha1/nopresources?fieldManager=kubectl-client-side-apply&fieldValidation=Strict", + "verb": "create", + "user": { + "username": "kubernetes-admin", + "groups": ["system:masters", "system:authenticated"] + }, + "impersonatedUser": { + "username": "upbound:spaces:host:masterclient", + "groups": [ + "system:authenticated", + "upbound:controlplane:admin", + "upbound:spaces:host:system:masters" + ] + }, + "sourceIPs": ["10.244.0.135", "127.0.0.1"], + "userAgent": "kubectl/v1.32.2 (darwin/arm64) kubernetes/67a30c0", + "objectRef": { + "resource": "nopresources", + "name": "example", + "apiGroup": "nop.crossplane.io", + "apiVersion": "v1alpha1" + }, + "responseStatus": { "metadata": {}, "code": 201 }, + "requestReceivedTimestamp": "2025-09-19T23:03:24.540067Z", + "stageTimestamp": "2025-09-19T23:03:24.557583Z", + "annotations": { + "authorization.k8s.io/decision": "allow", + "authorization.k8s.io/reason": "RBAC: allowed by ClusterRoleBinding \"controlplane-admin\" of ClusterRole \"controlplane-admin\" to Group \"upbound:controlplane:admin\"" + } + } +``` +
+ +## Customize the audit policy + +Spaces `v1.14.0` includes a default audit policy. You can customize this policy +by creating a configuration file and passing the values to +`observability.collectors.apiServer.auditPolicy` in the helm values file. + +An example custom audit policy: + +```yaml +observability: + controlPlanes: + apiServer: + auditPolicy: | + apiVersion: audit.k8s.io/v1 + kind: Policy + rules: + # ============================================================================ + # RULE 1: Exclude health check and version endpoints + # ============================================================================ + - level: None + nonResourceURLs: + - '/healthz*' + - '/readyz*' + - /version + # ============================================================================ + # RULE 2: ConfigMaps - Write operations only + # ============================================================================ + - level: Metadata + resources: + - group: "" + resources: + - configmaps + verbs: + - create + - update + - patch + - delete + omitStages: + - RequestReceived + - ResponseStarted + # ============================================================================ + # RULE 3: Secrets - ALL operations + # ============================================================================ + - level: Metadata + resources: + - group: "" + resources: + - secrets + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + omitStages: + - RequestReceived + - ResponseStarted + # ============================================================================ + # RULE 4: Global exclusion of read-only operations + # ============================================================================ + - level: None + verbs: + - get + - list + - watch + # ========================================================================== + # RULE 5: Exclude standard Kubernetes resources from write operation logging + # ========================================================================== + - level: None + resources: + - group: "" + - group: "apps" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "storage.k8s.io" + - group: "batch" + - group: "autoscaling" + - group: "metrics.k8s.io" + - group: "node.k8s.io" + - group: "scheduling.k8s.io" + - group: "coordination.k8s.io" + - group: "discovery.k8s.io" + - group: "events.k8s.io" + - group: "flowcontrol.apiserver.k8s.io" + - group: "internal.apiserver.k8s.io" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "admissionregistration.k8s.io" + verbs: + - create + - update + - patch + - delete + # ============================================================================ + # RULE 6: Catch-all for ALL custom resources and any missed resources + # ============================================================================ + - level: Metadata + verbs: + - create + - update + - patch + - delete + omitStages: + - RequestReceived + - ResponseStarted + # ============================================================================ + # RULE 7: Final catch-all - exclude everything else + # ============================================================================ + - level: None + omitStages: + - RequestReceived + - ResponseStarted +``` +You can apply this policy during Spaces installation or upgrade using the helm values file. + +Audit policies use rules evaluated in order from top to bottom where the first +matching rule applies. Control plane audit policies follow Kubernetes conventions and use the +following logging levels: + +* **None** - Don't log events matching this rule +* **Metadata** - Log request metadata (user, timestamp, resource, verb) but not request or response bodies +* **Request** - Log metadata and request body but not response body +* **RequestResponse** - Log metadata, request body, and response body + +For more information, review the Kubernetes [Auditing] documentation. + +## Disable audit logging + +You can disable audit logging on a control plane by removing it from the +`SharedTelemetryConfig` selector or by deleting the `SharedTelemetryConfig`. + +### Disable for specific control planes + +Remove the `audit-enabled` label from control planes that should stop sending audit logs: + +```bash +kubectl label controlplane --namespace audit-enabled- +``` + +The `SharedTelemetryConfig` no longer selects this control plane, and audit log collection stops. + +### Disable for all control planes + +Delete the `SharedTelemetryConfig` to stop audit logging for all control planes it manages: + +```bash +kubectl delete sharedtelemetryconfig --namespace +``` + +[ctp-selection]: /spaces/howtos/observability/#control-plane-selection +[Auditing]: https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/ diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/declarative-ctps.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/declarative-ctps.md new file mode 100644 index 000000000..2c3e5331b --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/declarative-ctps.md @@ -0,0 +1,110 @@ +--- +title: Declaratively create control planes +sidebar_position: 99 +description: A tutorial to configure a Space with Argo to declaratively create and + manage control planes +--- + +In this tutorial, you learn how to configure [Argo CD][argo-cd] to communicate with a self-hosted Space. This flow allows you to declaratively create and manage control planes from Git. Argo CD is a continuous delivery tool for Kubernetes that you can use to drive GitOps flows for your control plane infrastructure. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. + +For API specifications on ControlPlane resources and their declarative creation, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +## Prerequisites + +To complete this tutorial, you need the following: + +- Have already deployed an Upbound Space. +- Have already deployed an instance of Argo CD on a Kubernetes cluster. + +## Connect your Space to Argo CD + +Fetch the kubeconfig for the Space cluster, the Kubernetes cluster where you installed the Upbound Spaces software. You must add the Space cluster as a context to Argo. + +```ini +export SPACES_CLUSTER_SERVER="https://url" +export SPACES_CLUSTER_NAME="cluster" +``` + +Switch contexts to the Kubernetes cluster where you've installed Argo. Create a secret on the Argo cluster whose data contains the connection details of the Space cluster. + +:::important +Make sure the following commands are executed against your **Argo** cluster, not your Space cluster. +::: + +Run the following command in a terminal: + +```yaml +cat < +When you install a Crossplane provider on a control plane, memory gets consumed +according to the number of custom resources it defines. Upbound [Official Provider families][official-provider-families] provide higher fidelity control +to platform teams to install providers for only the resources they need, +reducing the bloat of needlessly installing unused custom resources. Still, you +must factor provider memory usage into your calculations to ensure you've +rightsized the memory available in your Spaces cluster. + + +:::important +Be careful not to conflate `managed resource` with `custom resource definition`. +The former is an "instance" of an external resource in Crossplane, while the +latter defines the API schema of that resource. +::: + +It's estimated that each custom resource definition consumes ~3 MB of memory. +The calculation is: + +```bash +number_of_managed_resources_defined_in_provider x 3 MB = memory_required +``` + +For example, if you plan to use [provider-aws-ec2][provider-aws-ec2], [provider-aws-s3][provider-aws-s3], and [provider-aws-iam][provider-aws-iam], the resulting calculation is: + +```bash +provider-aws-ec2: 98 x 3 MB = 294 MB +provider-aws-s3: 23 x 3 MB = 69 MB +provider-aws-iam 22 x 3 MB = 66 MB +--- +total memory: 429 MB +``` + +In this scenario, you should budget ~430 MB of memory for provider usage on this control plane. + +:::tip +Do this calculation for each provider you plan to install on your control plane. +Then do this calculation for each control plane you plan to run in your Space. +::: + + +#### Total memory usage + +Add the memory usage from the previous sections. Given the preceding examples, +they result in a recommendation to budget ~1 GB memory for each control plane +you plan to run in the Space. + +:::important + +The 1 GB recommendation is an example. +You should input your own provider requirements to arrive at a final number for +your own deployment. + +::: + +### CPU considerations + +#### Managed resource CPU usage + +The number of managed resources under management by a control plane is the largest contributing factor for CPU usage in a Space. CPU usage scales linearly according to the number of managed resources under management by your control plane. In Upbound's testing, CPU usage requirements _does_ vary from provider to provider. Using the Upbound Official Provider families as a baseline: + + +| Provider | MR create operation (CPU core seconds) | MR update or reconciliation operation (CPU core seconds) | +| ---- | ---- | ---- | +| provider-family-aws | 10 | 2 to 3 | +| provider-family-gcp | 7 | 1.5 | +| provider-family-azure | 7 to 10 | 1.5 to 3 | + + +When resources are in a non-ready state, Crossplane providers reconcile often (as fast as every 15 seconds). Once a resource reaches `READY`, each Crossplane provider defaults to a 10 minute poll interval. Given this, a 16-core machine has `16x10x60 = 9600` CPU core seconds available. Interpreting this table: + +- A single control plane that needs to create 100 AWS MRs concurrently would consume 1000 CPU core seconds, or about 1.5 cores. +- A single control plane that continuously reconciles 100 AWS MRs once they've reached a `READY` state would consume 300 CPU core seconds, or a little under half a core. + +Since `provider-family-aws` has the highest recorded numbers for CPU time required, you can use that as an upper limit in your calculations. + +Using these calculations and extrapolating values, given a 16 core machine, it's recommended you don't exceed a single control plane managing 1000 MRs. Suppose you plan to run 10 control planes, each managing 1000 MRs. You want to make sure your node pool has capacity for 160 cores. If you are using a machine type that has 16 cores per machine, that would mean having a node pool of size 10. If you are using a machine type that has 32 cores per machine, that would mean having a node pool of size 5. + +#### Cloud API latency + +Oftentimes, you are using Crossplane providers to talk to external cloud APIs. Those external cloud APIs often have global API rate limits (examples: [Azure limits][azure-limits], [AWS EC2 limits][aws-ec2-limits]). + +For Crossplane providers built on [Upjet][upjet] (such as Upbound Official Provider families), these providers use Terraform under the covers. They expose some knobs (such as `--max-reconcile-rate`) you can use to tweak reconciliation rates. + +### Resource buffers + +The guidance in the preceding sections explains how to calculate CPU and memory usage requirements for: + +- a set of control planes in a Space +- tuned to the number of providers you plan to use +- according to the number of managed resource instances you plan to have managed by your control planes + +Upbound recommends budgeting an extra buffer of 20% to your resource capacity calculations. The numbers shared in the preceding sections don't account for peaks or surges since they're based off average measurements. Upbound recommends budgeting this buffer to account for these things. + +## Deploying more than one Space + +You are welcome to deploy more than one Space. You just need to make sure you have a 1:1 mapping of Space to Kubernetes clusters. Spaces are by their nature constrained to a single Kubernetes Cluster, which are regional entities. If you want to offer control planes in multiple cloud environments or multiple public clouds entirely, these are justifications for deploying >1 Spaces. + +## Cert-manager + +A Spaces deployment uses the [Certificate Custom Resource] from cert-manager to +provision certificates within the Space. This establishes a nice API boundary +between what your platform may need and the Certificate requirements of a +Space. + + +In the event you would like more control over the issuing Certificate Authority +for your deployment or the deployment of cert-manager itself, this guide is for +you. + + +### Deploying + +An Upbound Space deployment doesn't have any special requirements for the +cert-manager deployment itself. The only expectation is that cert-manager and +the corresponding Custom Resources exist in the cluster. + +You should be free to install cert-manager in the cluster in any way that makes +sense for your organization. You can find some [installation ideas] in the +cert-manager docs. + +### Issuers + +A default Upbound Space install includes a [ClusterIssuer]. This `ClusterIssuer` +is a `selfSigned` issuer that other certificates are minted from. You have a +couple of options available to you for changing the default deployment of the +Issuer: +1. Changing the issuer name. +2. Providing your own ClusterIssuer. + + +#### Changing the issuer name + +The `ClusterIssuer` name is controlled by the `certificates.space.clusterIssuer` +Helm property. You can adjust this during installation by providing the +following parameter (assuming your new name is 'SpaceClusterIssuer'): +```shell +--set "certificates.space.clusterIssuer=SpaceClusterIssuer" +``` + + + +#### Providing your own ClusterIssuer + +To provide your own `ClusterIssuer`, you need to first setup your own +`ClusterIssuer` in the cluster. The cert-manager docs have a variety of options +for providing your own. See the [Issuer Configuration] docs for more details. + +Once you have your own `ClusterIssuer` set up in the cluster, you need to turn +off the deployment of the `ClusterIssuer` included in the Spaces deployment. +To do that, provide the following parameter during installation: +```shell +--set "certificates.provision=false" +``` + +###### Considerations +If your `ClusterIssuer` has a name that's different from the default name that +the Spaces installation expects ('spaces-selfsigned'), you need to also specify +your `ClusterIssuer` name during install using: +```shell +--set "certificates.space.clusterIssuer=" +``` + +## Ingress + +To route requests from an external client (kubectl, ArgoCD, etc) to a +control plane, a Spaces deployment includes a default [Ingress] manifest. In +order to ease getting started scenarios, the current `Ingress` includes +configurations (properties and annotations) that assume that you installed the +commonly used [ingress-nginx ingress controller] in the cluster. This section +walks you through using a different `Ingress`, if that's something that your +organization needs. + +### Default manifest + +An example of what the current `Ingress` manifest included in a Spaces install +is below: + +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: mxe-router-ingress + namespace: upbound-system + annotations: + nginx.ingress.kubernetes.io/use-regex: "true" + nginx.ingress.kubernetes.io/ssl-redirect: "false" + nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" + nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" + nginx.ingress.kubernetes.io/proxy-request-buffering: "off" + nginx.ingress.kubernetes.io/proxy-body-size: "0" + nginx.ingress.kubernetes.io/proxy-http-version: "1.1" + nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + nginx.ingress.kubernetes.io/proxy-ssl-verify: "on" + nginx.ingress.kubernetes.io/proxy-ssl-secret: "upbound-system/mxp-hostcluster-certs" + nginx.ingress.kubernetes.io/proxy-ssl-name: spaces-router + nginx.ingress.kubernetes.io/configuration-snippet: | + more_set_headers "X-Request-Id: $req_id"; + more_set_headers "Request-Id: $req_id"; + more_set_headers "Audit-Id: $req_id"; +spec: + ingressClassName: nginx + tls: + - hosts: + - {{ .Values.ingress.host }} + secretName: mxe-router-tls + rules: + - host: {{ .Values.ingress.host }} + http: + paths: + - path: "/v1/controlPlanes" + pathType: Prefix + backend: + service: + name: spaces-router + port: + name: http +``` + +The notable pieces are: +1. Namespace + + + +This property represents the namespace that the spaces-router is deployed to. +In most cases this is `upbound-system`. + + + +2. proxy-ssl-* annotations + +The spaces-router pod terminates TLS using certificates located in the +mxp-hostcluster-certs `Secret` located in the `upbound-system` `Namespace`. + +3. proxy-* annotations + +Requests coming into the ingress-controller can be variable depending on what +the client is requesting. For example, `kubectl get crds` has different +requirements for the connection compared to a 'watch', for example +`kubectl get pods -w`. The ingress-controller is configured to be able to +account for either scenario. + + +4. configuration-snippets + +These commands add headers to the incoming requests that help with telemetry +and diagnosing problems within the system. + +5. Rules + +Requests coming into the control planes use a `/v1/controlPlanes` prefix and +need to be routed to the spaces-router. + + +### Using a different ingress manifest + +Operators can choose to use an `Ingress` manifest and ingress controller that +makes the most sense for their organization. If they want to turn off deploying +the default `Ingress` manifest, they can do so during installation by providing +the following parameter during installation: +```shell +--set ".Values.ingress.provision=false" +``` + +#### Considerations + + + + + +Operators will need to take into account the following considerations when +disabling the default `Ingress` deployment. + +1. Ensure the custom `Ingress` manifest is placed in the same namespace as the +`spaces-router` pod. +2. Ensure that the ingress is configured to use a `spaces-router` as a secure +backend and that the secret used is the mxp-hostcluster-certs secret. +3. Ensure that the ingress is configured to handle long-lived connections. +4. Ensure that the routing rule sends requests prefixed with +`/v1/controlPlanes` to the `spaces-router` using the `http` port. + + + + + + +[cert-manager]: https://cert-manager.io/ +[Certificate Custom Resource]: https://cert-manager.io/docs/usage/certificate/ +[ClusterIssuer]: https://cert-manager.io/docs/concepts/issuer/ +[ingress-nginx ingress controller]: https://kubernetes.github.io/ingress-nginx/deploy/ +[installation ideas]: https://cert-manager.io/docs/installation/ +[Ingress]: https://kubernetes.io/docs/concepts/services-networking/ingress/ +[Issuer Configuration]: https://cert-manager.io/docs/configuration/ +[official-provider-families]: /manuals/packages/providers/provider-families +[aws-eks]: https://aws.amazon.com/eks/ +[google-cloud-gke]: https://cloud.google.com/kubernetes-engine +[microsoft-aks]: https://azure.microsoft.com/en-us/products/kubernetes-service +[upbound-account]: https://www.upbound.io/register/?utm_source=docs&utm_medium=cta&utm_campaign=docs_spaces +[provider-aws-ec2]: https://marketplace.upbound.io/providers/upbound/provider-aws-ec2 +[provider-aws-s3]: https://marketplace.upbound.io/providers/upbound/provider-aws-s3 +[provider-aws-iam]: https://marketplace.upbound.io/providers/upbound/provider-aws-iam +[azure-limits]: https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/request-limits-and-throttling +[aws-ec2-limits]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/throttling.html#throttling-limits-rate-based +[upjet]: https://github.com/upbound/upjet diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/dr.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/dr.md new file mode 100644 index 000000000..67ecbfecf --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/dr.md @@ -0,0 +1,412 @@ +--- +title: Disaster Recovery +sidebar_position: 13 +description: Configure Space-wide backups for disaster recovery. +--- + +:::info API Version Information +This guide applies to **Spaces v1.9.0 and later** (Self-Hosted only). Space Backups is a alpha feature enabled by default starting in v1.14.0. + +- **v1.9.0-v1.13.x**: Alpha feature (requires manual enablement) +- **v1.14.0+**: GA (enabled by default) + +For version-specific features and backup resources, see the . control-plane backups, see [Backup and Restore](../backup-and-restore.md). +::: + +:::important +For Connected and Disconnected Spaces, this feature requires Spaces `v1.9.0` and, starting with `v1.14.0`, Spaces enables it by default. + +To enable it on versions earlier than `v1.14.0`, set features.alpha.spaceBackup.enabled=true when you install Spaces. + +```bash +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "features.alpha.spaceBackup.enabled=true" +``` +::: + +Upbound's _Space Backups_ is a built-in Space-wide backup and restore feature. This guide explains how to configure Space Backups and how to restore from one of them in case of disaster recovery. + +This feature is meant for Space administrators. Group or Control Plane users can leverage [Shared Backups][shared-backups] to backup and restore their ControlPlanes. + +## Benefits +The Space Backups feature provides the following benefits: + +* Automatic backups for all resources in a Space and all resources in control planes, without any operational overhead. +* Backup schedules. +* Selectors to specify resources to backup. + +## Prerequisites + +Enabled the Space Backups feature in the Space: + +- Cloud Spaces: Not accessible to users. +- Connected Spaces: Space administrator must enable this feature. +- Disconnected Spaces: Space administrator must enable this feature. + +## Configure a Space Backup Config + +[SpaceBackupConfig][spacebackupconfig] is a cluster-scoped resource. This resource configures the storage details and provider. Whenever a backup executes (either by schedule or manually initiated), it references a SpaceBackupConfig to tell it where store the snapshot. + + +### Backup config provider + + +The `spec.objectStorage.provider` and `spec.objectStorage.config` fields configures: + +* The object storage provider +* The path to the provider +* The credentials needed to communicate with the provider + +You can only set one provider. Upbound currently supports AWS, Azure, and GCP as providers. + + +`spec.objectStorage.config` is a freeform map of configuration options for the object storage provider. See [Thanos object storage][thanos-object-storage] for more information on the formats for each supported cloud provider. `spec.bucket` and `spec.provider` overrides the required values in the config. + + +#### AWS as a storage provider + +This example demonstrates how to use AWS as a storage provider for your backups: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupConfig +metadata: + name: default +spec: + objectStorage: + provider: AWS + bucket: spaces-backup-bucket + config: + endpoint: s3.eu-west-2.amazonaws.com + region: eu-west-2 + credentials: + source: Secret + secretRef: + name: bucket-creds + namespace: upbound-system + key: creds +``` + +This example assumes you've already created an S3 bucket called +`spaces-backup-bucket` in the `eu-west-2` AWS region. To access the bucket, +define the account credentials as a Secret in the specified Namespace +(`upbound-system` in this example). + +#### Azure as a storage provider + +This example demonstrates how to use Azure as a storage provider for your backups: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupConfig +metadata: + name: default + namespace: default +spec: + objectStorage: + provider: Azure + bucket: upbound-backups + config: + storage_account: upbackupstore + container: upbound-backups + endpoint: blob.core.windows.net + credentials: + source: Secret + secretRef: + name: bucket-creds + namespace: upbound-system + key: creds +``` + + +This example assumes you've already created an Azure storage account called +`upbackupstore` and blob `upbound-backups`. To access the blob, +define the account credentials as a Secret in the specified Namespace +(`upbound-system` in this example). + + +#### GCP as a storage provider + +This example demonstrates how to use Google Cloud Storage as a storage provider for your backups: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupConfig +metadata: + name: default + namespace: default +spec: + objectStorage: + provider: GCP + bucket: spaces-backup-bucket + credentials: + source: Secret + secretRef: + name: bucket-creds + namespace: upbound-system + key: creds +``` + + +This example assumes you've already created a Cloud bucket called +"spaces-backup-bucket" and a service account with access to this bucket. Define the key file as a Secret in the specified Namespace +(`upbound-system` in this example). + + +## Configure a Space Backup Schedule + + +[SpaceBackupSchedule][spacebackupschedule] is a cluster-scoped resource. This resource defines a backup schedule for the whole Space. + +Below is an example of a Space Backup Schedule running every day. It backs up all groups having `environment: production` labels and all control planes in those groups having `backup: please` labels. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupSchedule +metadata: + name: daily-schedule +spec: + schedule: "@daily" + configRef: + kind: SpaceBackupConfig + name: default + match: + groups: + labelSelectors: + - matchLabels: + environment: production + controlPlanes: + labelSelectors: + - matchLabels: + backup: please +``` + +### Define a schedule + +The `spec.schedule` field is a [Cron-formatted][cron-formatted] string. Some common examples are below: + +| Entry | Description | +| ----------------- | ------------------------------------------------------------------------------------------------- | +| `@hourly` | Run once an hour. | +| `@daily` | Run once a day. | +| `@weekly` | Run once a week. | +| `0 0/4 * * *` | Run every 4 hours. | +| `0/15 * * * 1-5` | Run every fifteenth minute on Monday through Friday. | +| `@every 1h30m10s` | Run every 1 hour, 30 minutes, and 10 seconds. Hour is the largest measurement of time for @every. | + +### Suspend a schedule + +Use `spec.suspend` field to suspend the schedule. It creates no new backups, but allows running backups to complete. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupSchedule +metadata: + name: daily-schedule +spec: + suspend: true +... +``` + +### Garbage collect backups when the schedule gets deleted + +Set the `spec.useOwnerReferencesInBackup` to garbage collect associated `SpaceBackup` when a `SpaceBackupSchedule` gets deleted. If set to true, backups are garbage collected when the schedule gets deleted. + +### Set the time to live + +Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. + +The time to live is a duration, for example, `168h` for 7 days. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupSchedule +metadata: + name: daily-schedule +spec: + ttl: 168h # Backup is garbage collected after 7 days +... +``` + +## Selecting space resources to backup + +By default, a SpaceBackup selects all groups and, for each of them, all control planes, secrets, and any other group-scoped resources. + +By setting `spec.match`, you can include only specific groups, control planes, secrets, or other Space resources in the backup. + +By setting `spec.exclude`, you can filter out some matched Space API resources from the backup. + +### Including space resources in a backup + +Different fields are available to include resources based on labels or names: +- `spec.match.groups` to include only some groups in the backup. +- `spec.match.controlPlanes` to include only some control planes in the backup. +- `spec.match.secrets` to include only some secrets in the backup. +- `spec.match.extras` to include only some extra resources in the backup. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackup +metadata: + name: my-backup +spec: + configRef: + kind: SpaceBackupConfig + name: default + match: + groups: + labelSelectors: + - matchLabels: + environment: production + controlPlanes: + labelSelectors: + - matchLabels: + backup: please + secrets: + names: + - my-secret + extras: + - apiGroup: "spaces.upbound.io" + kind: "SharedBackupConfig" + names: + - my-shared-backup +``` + +### Excluding Space resources from the backup + +Use the `spec.exclude` field to exclude matched Space API resources from the backup. + +Different fields are available to exclude resources based on labels or names: +- `spec.exclude.groups` to exclude some groups from the backup. +- `spec.exclude.controlPlanes` to exclude some control planes from the backup. +- `spec.exclude.secrets` to exclude some secrets from the backup. +- `spec.exclude.extras` to exclude some extra resources from the backup. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackup +metadata: + name: my-backup +spec: + ttl: 168h # Backup is garbage collected after 7 days + configRef: + kind: SpaceBackupConfig + name: default + match: + groups: + labelSelectors: + - matchLabels: + environment: production + exclude: + groups: + names: + - not-this-one-please +``` + +### Exclude resources in control planes' backups + +By default, it backs up all resources in a selected control plane. + +Use the `spec.controlPlaneBackups.excludedResources` field to exclude resources from control planes' backups. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackup +metadata: + name: my-backup +spec: + ttl: 168h # Backup is garbage collected after 7 days + configRef: + kind: SpaceBackupConfig + name: default + controlPlaneBackups: + excludedResources: + - secrets + - buckets.s3.aws.upbound.io +``` + +## Create a manual backup + +[SpaceBackup][spacebackup] is a cluster-scoped resource that causes a single backup to occur for the whole Space. + +Below is an example of a manual SpaceBackup: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackup +metadata: + name: my-backup +spec: + configRef: + kind: SpaceBackupConfig + name: default + deletionPolicy: Delete +``` + + +The backup specification `DeletionPolicy` defines backup deletion actions, +including the deletion of the backup file from the bucket. The `Deletion Policy` +value defaults to `Orphan`. Set it to `Delete` to remove uploaded files +in the bucket. +For more information on the backup and restore process, review the [Spaces API documentation][spaces-api-documentation]. + +### Set the time to live + +Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackup +metadata: + name: my-backup +spec: + ttl: 168h # Backup is garbage collected after 7 days +... +``` + +## Restore from a space backup + +Space Backup and Restore focuses only on disaster recovery. The restore procedure assumes a new Space installation with no existing resources. The restore procedure is idempotent, so you can run it multiple times without any side effects in case of failures. + +To restore a Space from an existing Space Backup, follow these steps: + +1. Install Spaces from scratch as needed. +2. Create a `SpaceBackupConfig` as needed to access the SpaceBackup from the object storage, for example named `my-backup-config`. +3. Select the backup you want to restore from, for example `my-backup`. +4. Run the following command to restore the Space: + +```shell +export SPACE_BACKUP_CONFIG=my-backup-config +export SPACE_BACKUP=my-backup +kubectl exec -ti -n upbound-system deployments/spaces-controller -c spaces -- hyperspace restore $SPACE_BACKUP $SPACE_BACKUP_CONFIG +``` + +### Restore specific control planes + +:::important +This feature is available from Spaces v1.11. +::: + +Instead of restoring the whole Space, you can choose to restore specific control planes +from a backup using the `--controlplanes` flag. You can also use +the `--skip-space-restore` flag to skip restoring Space objects. +This allows Spaces admins to restore individual control planes without +needing to restore the entire Space. + +```shell +export SPACE_BACKUP_CONFIG=my-backup-config +export SPACE_BACKUP=my-backup +kubectl exec -ti -n upbound-system deployments/spaces-controller -c spaces +-- hyperspace restore $SPACE_BACKUP $SPACE_BACKUP_CONFIG --controlplanes default/ctp1,default/ctp2 --skip-space-restore +``` + + +[shared-backups]: /spaces/howtos/self-hosted/workload-id/backup-restore-config/ +[spacebackupconfig]: /reference/apis/spaces-api/v1_9 +[thanos-object-storage]: https://thanos.io/tip/thanos/storage.md/ +[spacebackupschedule]: /reference/apis/spaces-api/v1_9 +[cron-formatted]: https://en.wikipedia.org/wiki/Cron +[spacebackup]: /reference/apis/spaces-api/v1_9 +[spaces-api-documentation]: /reference/apis/spaces-api/v1_9 + diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/gitops-with-argocd.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/gitops-with-argocd.md new file mode 100644 index 000000000..004247a10 --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/gitops-with-argocd.md @@ -0,0 +1,142 @@ +--- +title: GitOps with ArgoCD in Self-Hosted Spaces +sidebar_position: 80 +description: Set up GitOps workflows with Argo CD in self-hosted Spaces +plan: "business" +--- + +:::info Deployment Model +This guide applies to **self-hosted Spaces** deployments. For Upbound Cloud Spaces, see [GitOps with Upbound Control Planes](/spaces/howtos/cloud-spaces/gitops-on-upbound/). +::: + +GitOps is an approach for managing a system by declaratively describing desired resources' configurations in Git and using controllers to realize the desired state. Upbound's control planes are compatible with this pattern and it's strongly recommended you integrate GitOps in the platforms you build on Upbound. + + +## Integrate with Argo CD + + +[Argo CD][argo-cd] is a project in the Kubernetes ecosystem commonly used for +GitOps. You can use it in tandem with Upbound control planes to achieve GitOps +flows. The sections below explain how to integrate these tools with Upbound. + +### Configure connection secrets for control planes + +You can configure control planes to write their connection details to a secret. +Do this by setting the +[`spec.writeConnectionSecretToRef`][spec-writeconnectionsecrettoref] field in a +control plane manifest. For example: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: ctp1 + namespace: default +spec: + writeConnectionSecretToRef: + name: kubeconfig-ctp1 + namespace: default +``` + + +### Configure Argo CD + + +To configure Argo CD for Annotation resource tracking, edit the Argo CD +ConfigMap in the Argo CD namespace. Add `application.resourceTrackingMethod: +annotation` to the data section as below. + +Next, configure the [auto respect RBAC for the Argo CD +controller][auto-respect-rbac-for-the-argo-cd-controller-1]. By default, Argo CD +attempts to discover some Kubernetes resource types that don't exist in a +control plane. You must configure Argo CD to respect the cluster's RBAC rules so +that Argo CD can sync. Add `resource.respectRBAC: normal` to the data section as +below. + +```bash +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-cm +data: + ... + application.resourceTrackingMethod: annotation + resource.respectRBAC: normal +``` + +:::tip +The `resource.respectRBAC` configuration above tells Argo to respect RBAC for +_all_ cluster contexts. If you're using an Argo CD instance to manage more than +only control planes, you should consider changing the `clusters` string match +for the configuration to apply only to control planes. For example, if every +control plane context name followed the convention of being named +`controlplane-`, you could set the string match to be `controlplane-*` +::: + + +### Create a cluster context definition + + +Once the control plane is ready, extract the following values from the secret +containing the kubeconfig: + +```bash +kubeconfig_content=$(kubectl get secrets kubeconfig-ctp1 -n default -o jsonpath='{.data.kubeconfig}' | base64 -d) +server=$(echo "$kubeconfig_content" | grep 'server:' | awk '{print $2}') +bearer_token=$(echo "$kubeconfig_content" | grep 'token:' | awk '{print $2}') +ca_data=$(echo "$kubeconfig_content" | grep 'certificate-authority-data:' | awk '{print $2}') +``` + +Generate a new secret in the cluster where you installed Argo, using the prior +values extracted: + +```yaml +cat < + +import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; + + + + +:::important +This feature is only available for select Business Critical customers. You can't +set up your own Managed Space without the assistance of Upbound. If you're +interested in this deployment mode, please [contact us][contact]. +::: + + + +A Managed Space deployed on AWS is a single-tenant deployment of a control plane +space in your AWS organization in an isolated sub-account. With Managed Spaces, +you can use the same API, CLI, and Console that Upbound offers, with the benefit +of running entirely in a cloud account that you own and Upbound manages for you. + +The following guide walks you through setting up a Managed Space in your AWS +organization. If you have any questions while working through this guide, +contact your Upbound Account Representative for help. + + + + + +A Managed Space deployed on GCP is a single-tenant deployment of a control plane +space in your GCP organization in an isolated project. With Managed Spaces, you +can use the same API, CLI, and Console that Upbound offers, with the benefit of +running entirely in a cloud account that you own and Upbound manages for you. + +The following guide walks you through setting up a Managed Space in your GCP +organization. If you have any questions while working through this guide, +contact your Upbound Account Representative for help. + + + + +## Managed Space on your cloud architecture + + + +A Managed Space is a deployment of the Upbound Spaces software inside an +Upbound-controlled sub-account in your AWS cloud environment. The Spaces +software runs in this sub-account, orchestrated by Kubernetes. Backups and +billing data get stored inside bucket or blob storage in the same sub-account. +The control planes deployed and controlled by the Spaces software runs on the +Kubernetes cluster which gets deployed into the sub-account. + +The diagram below illustrates the high-level architecture of Upbound Managed Spaces: + +![Upbound Managed Spaces arch](/img/managed-arch-aws.png) + +The Spaces software gets deployed on an EKS Cluster in the region of your +choice. This EKS cluster is where your control planes are ultimately run. +Upbound also deploys buckets, 1 for the collection of the billing data and 1 for +control plane backups. + +Upbound doesn't have access to other sub-accounts nor your organization-level +settings in your cloud environment. Outside of your cloud organization, Upbound +runs the Upbound Console, which includes the Upbound API and web application, +including the dashboard you see at `console.upbound.io`. By default, all +connections are encrypted, but public. Optionally, you also have the option to +use private network connectivity through [AWS PrivateLink][aws-privatelink]. + + + + + + +A Managed Space is a deployment of the Upbound Spaces software inside an +Upbound-controlled project in your GCP cloud environment. The Spaces software +runs in this project, orchestrated by Kubernetes. Backups and billing data get +stored inside bucket or blob storage in the same project. The control planes +deployed and controlled by the Spaces software runs on the Kubernetes cluster +which gets deployed into the project. + +The diagram below illustrates the high-level architecture of Upbound Managed Spaces: + +![Upbound Managed Spaces arch](/img/managed-arch-gcp.png) + +The Spaces software gets deployed on a GKE Cluster in the region of your choice. +This GKE cluster is where your control planes are ultimately run. Upbound also +deploys cloud buckets, 1 for the collection of the billing data and 1 for +control plane backups. + +Upbound doesn't have access to other projects nor your organization-level +settings in your cloud environment. Outside of your cloud organization, Upbound +runs the Upbound Console, which includes the Upbound API and web application, +including the dashboard you see at `console.upbound.io`. By default, all +connections are encrypted, but public. Optionally, you also have the option to +use private network connectivity through [GCP Private Service +Connect][gcp-private-service-connect]. + + + +## Prerequisites + +- An organization created on Upbound + + + +- You should have a preexisting AWS organization to complete this guide. +- You must create a new AWS sub-account. Read the [AWS documentation][aws-documentation] to learn how to create a new sub-account in an existing organization on AWS. + +After the sub-account information gets provided to Upbound, **don't change it +any further.** Any changes made to the sub-account or the resources created by +Upbound for the purposes of the Managed Space deployments voids the SLA you have +with Upbound. If you want to make configuration changes, contact your Upbound +Solutions Architect. + + + + + +- You should have a preexisting GCP organization with an active Cloud Billing account to complete this guide. +- You must create a new GCP project. Read the [GCP documentation][gcp-documentation] to learn how to create a new project in an existing organization on GCP. + +After the project information gets provided to Upbound, **don't change it any +further.** Any changes made to the project or the resources created by Upbound +for the purposes of the Managed Space deployments voids the SLA you have with +Upbound. If you want to make configuration changes, contact your Upbound +Solutions Architect. + + + + + +## Set up cross-account management + +Upbound supports using AWS Key Management Service with cross-account IAM +permissions. This enables the isolation of keys so the infrastructure operated +by Upbound has limited access to symmetric keys. + +In the KMS key's account, apply the baseline key policy: + +```json +{ + "Sid": "Allow Upbound to use this key", + "Effect": "Allow", + "Principal": { + "AWS": ["[Managed Space sub-account ID]"] + }, + "Action": ["kms:Encrypt", "kms:Decrypt", "kms:ReEncrypt*", "kms:GenerateDataKey*", "kms:DescribeKey"], + "Resource": "*" +} +``` + +You need another key policy to let the sub-account create persistent resources +with the KMS key: + +```json +{ + "Sid": "Allow attachment of persistent resources for an Upbound Managed Space", + "Effect": "Allow", + "Principal": { + "AWS": "[Managed Space sub-account ID]" + }, + "Action": ["kms:CreateGrant", "kms:ListGrants", "kms:RevokeGrant"], + "Resource": "*", + "Condition": { + "Bool": { + "kms:GrantIsForAWSResource": "true" + } + } +} +``` + +### Configure PrivateLink + +By default, all connections to the Upbound Console are encrypted, but public. +AWS PrivateLink is a feature that allows VPC peering whereby your traffic +doesn't traverse the public internet. To have this configured, contact your +Upbound Account Representative. + + + + + +## Enable APIs + +Enable the following APIs in the new project: + +- Kubernetes Engine API +- Cloud Resource Manager API +- Compute Engine API +- Cloud DNS API + +:::tip +Read how to enable APIs in a GCP project [here][here]. +::: + +## Create a service account + +Create a service account in the new project. Name the service account, +upbound-sa. Give the service account the following roles: + +- Compute Admin +- Project IAM Admin +- Service Account Admin +- DNS Administrator +- Editor + +Select the service account you just created. Select keys. Add a new key and +select JSON. The key gets downloaded to your machine. Save this for later. + +## Create a DNS Zone + +Create a DNS Zone, set the **Zone type** to `Public`. + +### Configure Private Service Connect + +By default, all connections to the Upbound Console are encrypted, but public. +GCP Private Service Connect is a feature that allows VPC peering whereby your +traffic doesn't traverse the public internet. To have this configured, contact +your Upbound Account Representative. + + + +## Provide information to Upbound + +Once these policies get attached to the key, tell your Upbound Account +Representative, providing them the following: + + + +- the full ARN of the KMS key. +- the name of the organization that you created in Upbound. Use the up CLI command, `up org list`, so see this information. +- Confirmation of which region in AWS you want the deployment to target. + + + + + +- The service account JSON key +- The NS records associated with the DNS name created in the last step. +- the name of the organization that you created in Upbound. Use the up CLI command, `up org list`, so see this information. +- Confirmation of which region in GCP you want the deployment to target. + + + +Once Upbound has this information, the request gets processed in a business day. + +## Use your Managed Space + +Once the Managed Space gets deployed, you can see it in the Space selector when browsing your environment on [`console.upbound.io`][console-upbound-io]. + + + + +[contact]: https://www.upbound.io/contact-us +[aws-privatelink]: #configure-privatelink +[aws-documentation]: https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_create.html#orgs_manage_accounts_create-new +[gcp-private-service-connect]: #configure-private-service-connect +[gcp-documentation]: https://cloud.google.com/resource-manager/docs/creating-managing-organization +[here]: https://cloud.google.com/apis/docs/getting-started#enabling_apis +[console-upbound-io]: https://console.upbound.io/ diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/oidc-configuration.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/oidc-configuration.md new file mode 100644 index 000000000..cbef4dc42 --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/oidc-configuration.md @@ -0,0 +1,289 @@ +--- +title: Configure OIDC +sidebar_position: 20 +description: Configure OIDC in your Space +--- +:::important +This guide is only applicable for administrators who've deployed self-hosted Spaces. general RBAC in Upbound, read [Upbound RBAC][upbound-rbac]. +::: + +Upbound uses the Kubernetes [Structured Authentication Configuration][structured-auth-config] to validate OIDC tokens sent to the API. Upbound stores this +configuration as a `ConfigMap` and authenticates with the Upbound router +component during installation with Helm. + +This guide walks you through how to create and apply an authentication +configuration to validate Upbound with an external identity provider. Each +section focuses on a specific part of the configuration file. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. + +For details on authentication and access control across versions, see the . related platform authentication features, see the [Platform manual](../../../../platform/). +::: + +## Creating the `AuthenticationConfiguration` file + +First, create a file called `config.yaml` with an `AuthenticationConfiguration` +kind. The `AuthenticationConfiguration` is the initial authentication structure +necessary for Upbound to communicate with your chosen identity provider. + +```yaml +apiVersion: apiserver.config.k8s.io/v1beta1 +kind: AuthenticationConfiguration +jwt: +- issuer: + url: oidc-issuer-url + audiences: + - oidc-client-id + claimMappings: # optional + username: + claim: oidc-username-claim + prefix: oidc-username-prefix + groups: + claim: oidc-groups-claim + prefix: oidc-groups-prefix +``` + + +For detailed configuration options, including the CEL-based token validation, +review the feature [documentation][structured-auth-config]. + + +The `AuthenticationConfiguration` allows you to configure multiple JWT +authenticators as separate issuers. + +### Configure an issuer + +The `jwt` array requires an `issuer` specification and typically contains: + +- A `username` claim mapping +- A `groups` claim mapping +Optionally, the configuration may also include: +- A set of claim validation rules +- A set of user validation rules + +The `issuer` URL must be unique across all configured authenticators. + +```yaml +issuer: + url: https://example.com + discoveryUrl: https://discovery.example.com/.well-known/openid-configuration + certificateAuthority: |- + + audiences: + - client-id-a + - client-id-b + audienceMatchPolicy: MatchAny +``` + +By default, the authenticator assumes the OIDC Discovery URL is +`{issuer.url}/.well-known/openid-configuration`. Most identity providers follow +this structure, and you can omit the `discoveryUrl` field. To use a separate +discovery service, specify the full path to the discovery endpoint in this +field. + +If the CA for the Issuer isn't public, provide the PEM encoded CA for the Discovery URL. + +At least one of the `audiences` entries must match the `aud` claim in the JWT. +For OIDC tokens, this is the Client ID of the application attempting to access +the Upbound API. Having multiple values set allows the same configuration to +apply to multiple client applications, for example the `kubectl` CLI and an +Internal Developer Portal. + +If you specify multiple `audiences` , `audienceMatchPolicy` must equal `MatchAny`. + +### Configure `claimMappings` + +#### Username claim mapping + +By default, the authenticator uses the `sub` claim as the user name. To override this, either: + +- specify *both* `claim` and `prefix`. `prefix` may be explicitly set to the empty string. +or + +- specify a CEL `expression` to calculate the user name. + +```yaml +claimMappings: + username: + claim: "sub" + prefix: "keycloak" + # + expression: 'claims.username + ":external-user"' +``` + + +#### Groups claim mapping + +By default, this configuration doesn't map groups, unless you either: + +- specify both `claim` and `prefix`. `prefix` may be explicitly set to the empty string. +or + +- specify a CEL `expression` that returns a string or list of strings. + + +```yaml +claimMappings: + groups: + claim: "groups" + prefix: "" + # + expression: 'claims.roles.split(",")' +``` + + +### Validation rules + + +Validation rules are outside the scope of this document. Review the +[documentation][structured-auth-config] for more information. Examples include +using CEL expressions to validate authentication such as: + + +- Validating that a token claim has a specific value +- Validating that a token has a limited lifetime +- Ensuring usernames and groups don't contain reserved prefixes + +## Required claims + +To interact with Space and ControlPlane APIs, users must have the `upbound.io/aud` claim set to one of the following: + +| Upbound.io Audience | Notes | +| -------------------------------------------------------- | -------------------------------------------------------------------- | +| `[]` | No Access to Space-level or ControlPlane APIs | +| `['upbound:spaces:api']` | This Identity is only for Space-level APIs | +| `['upbound:spaces:controlplanes']` | This Identity is only for ControlPlane APIs | +| `['upbound:spaces:api', 'upbound:spaces:controlplanes']` | This Identity is for both Space-level and ControlPlane APIs | + + +You can set this claim in two ways: + +- In the identity provider mapped in the ID token. +- Inject in the authenticator with the `jwt.claimMappings.extra` array. + +For example: +```yaml +apiVersion: apiserver.config.k8s.io/v1beta1 +kind: AuthenticationConfiguration +jwt: +- issuer: + url: https://keycloak:8443/realms/master + certificateAuthority: |- + + audiences: + - master-realm + audienceMatchPolicy: MatchAny + claimMappings: + username: + claim: "preferred_username" + prefix: "keycloak:" + groups: + claim: "groups" + prefix: "" + extra: + - key: 'upbound.io/aud' + valueExpression: "['upbound:spaces:controlplanes', 'upbound:spaces:api']" +``` + +## Install the `AuthenticationConfiguration` + +Once you create an `AuthenticationConfiguration` file, specify this file as a +`ConfigMap` in the host cluster for the Upbound Space. + +```sh +kubectl create configmap -n upbound-system --from-file=config.yaml=./path/to/config.yaml +``` + + +To enable OIDC authentication and disable Upbound IAM when installing the Space, +reference the configuration and pass an empty value to the Upbound IAM issuer +parameter: + + +```sh +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "authentication.structuredConfig=" \ + --set "router.controlPlane.extraArgs[0]=--upbound-iam-issuer-url=" +``` + +## Configure RBAC + + +In this scenario, the external identity provider handles authentication, but +permissions for Spaces and ControlPlane APIs use standard RBAC objects. + +### Spaces APIs + +The Spaces APIs include: +```yaml +- apiGroups: + - spaces.upbound.io + resources: + - controlplanes + - sharedexternalsecrets + - sharedsecretstores + - backups + - backupschedules + - sharedbackups + - sharedbackupconfigs + - sharedbackupschedules +- apiGroups: + - observability.spaces.upbound.io + resources: + - sharedtelemetryconfigs +``` + +### ControlPlane APIs + + + +Crossplane specifies three [roles][crossplane-managed-clusterroles] for a +ControlPlane: admin, editor, and viewer. These map to the verbs `admin`, `edit`, +and `view` on the `controlplanes/k8s` resource in the `spaces.upbound.io` API +group. + + +### Control access + +The `groups` claim in the `AuthenticationConfiguration` allows you to control +resource access when you create a `ClusterRoleBinding`. A `ClusterRole` defines +the role parameters and a `ClusterRoleBinding` subject. + +The example below allows `admin` permissions for all ControlPlanes to members of +the `ctp-admins` group: + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: allow-ctp-admin +rules: +- apiGroups: + - spaces.upbound.io + resources: + - controlplanes/k8s + verbs: + - admin +``` + +ctp-admins ClusterRoleBinding +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: allow-ctp-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: allow-ctp-admin +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: Group + name: ctp-admins +``` + +[structured-auth-config]: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#using-authentication-configuration +[crossplane-managed-clusterroles]: https://github.com/crossplane/crossplane/blob/master/design/design-doc-rbac-manager.md#managed-rbac-clusterroles +[upbound-rbac]: /manuals/platform/concepts/authorization/upbound-rbac diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/proxies-config.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/proxies-config.md new file mode 100644 index 000000000..3802e4cb0 --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/proxies-config.md @@ -0,0 +1,31 @@ +--- +title: Proxied configuration +sidebar_position: 20 +description: Configure Upbound within a proxied environment +--- + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments in proxied environments. Proxy configuration procedures are consistent across versions. + +For version-specific deployment considerations, see the . +::: + + + +When you install Upbound with Helm in a proxied environment, please update the specified registry with your internal registry. + + + +```bash +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "authentication.hubIdentities=true" \ + --set "authorization.hubRBAC=true" \ + --set "registry=registry.company.corp/spaces" \ + --set "controlPlanes.uxp.registryOverride=registry.company.corp/xpkg.upbound.io" \ + --set "controlPlanes.uxp.repository=registry.company.corp/spaces" \ + --wait +``` diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/query-api.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/query-api.md new file mode 100644 index 000000000..c112e9001 --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/query-api.md @@ -0,0 +1,396 @@ +--- +title: Deploy Query API infrastructure +weight: 130 +description: Query API +aliases: + - /all-spaces/self-hosted-spaces/query-api + - /self-hosted-spaces/query-api + - all-spaces/self-hosted-spaces/query-api +--- + + + + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). Query API has evolved across versions: + +- **Cloud Spaces**: Available since v1.6 (enabled by default) +- **Self-Hosted**: Available since v1.8 (requires manual enablement) + +For details on Query API availability across versions, see the . +::: + +:::important + +This feature is in preview. The Query API is available in the Cloud Space offering in `v1.6` and enabled by default. + +This is a requirement to be able to connect a Space since `v1.8.0`, and is off by default, see below to enable it. + +::: + +Upbound's Query API allows users to inspect objects and resources within their control planes. The read-only `up alpha query` and `up alpha get` CLI commands allow you to gather information on your control planes in a fast and efficient package. These commands follow the [`kubectl` conventions][kubectl-conventions] for filtering, sorting, and retrieving information from your Space. + +Query API requires a PostgreSQL database to store the data. You can use the default PostgreSQL instance provided by Upbound or bring your own PostgreSQL instance. + +## Managed setup + +:::tip +If you don't have specific requirements for your setup, Upbound recommends following this approach. +::: + +To enable this feature, set `features.alpha.apollo.enabled=true` and `apollo.apollo.storage.postgres.create=true` when installing Spaces. + +However, you need to install CloudNativePG (`CNPG`) to provide the PostgreSQL instance. You can let the `up` CLI do this for you, or install it manually. + +For more customization, see the [Helm chart reference][helm-chart-reference]. You can modify the number +of PostgreSQL instances, pooling instances, storage size, and more. + +If you have specific requirements not addressed in the Helm chart, see below for more information on how to bring your own [PostgreSQL setup][postgresql-setup]. + +### Using the up CLI + +Before you begin, make sure you have the most recent version of the [`up` CLI installed][up-cli-installed]. + +To enable this feature, set `features.alpha.apollo.enabled=true` and `apollo.apollo.storage.postgres.create=true` when installing Spaces: + +```bash +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "features.alpha.apollo.enabled=true" \ + --set "apollo.apollo.storage.postgres.create=true" +``` + +`up space init` and `up space upgrade` install CloudNativePG automatically, if needed. + +### Helm chart + +If you are installing the Helm chart in some other way, you can manually install CloudNativePG in one of the [supported ways][supported-ways], for example: + +```shell +kubectl apply --server-side -f \ + https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.24.1/cnpg-1.24.1.yaml +kubectl rollout status -n cnpg-system deployment cnpg-controller-manager -w --timeout 120s +``` + +Next, install the Spaces Helm chart with the necessary values, for example: + +```shell +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + ... + --set "features.alpha.apollo.enabled=true" \ + --set "apollo.apollo.storage.postgres.create=true" \ + --wait +``` + +## Self-hosted PostgreSQL configuration + + +If your workflow requires more customization, you can provide your own +PostgreSQL instance and configure credentials manually. + +Using your own PostgreSQL instance requires careful architecture consideration. +Review the architecture and requirements guidelines. + +### Architecture + +The Query API architecture uses three components, other than a PostgreSQL database: +* **Apollo Syncers**: Watching `ETCD` for changes and syncing them to PostgreSQL. One, or more, per control plane. +* **Apollo Server**: Serving the Query API out of the data in PostgreSQL. One, or more, per Space. + +The default setup also uses the `PgBouncer` connection pooler to manage connections from the syncers. +```mermaid +graph LR + User[User] + + subgraph Cluster["Cluster (Spaces)"] + direction TB + Apollo[apollo] + + subgraph ControlPlanes["Control Planes"] + APIServer[API Server] + Syncer[apollo-syncer] + end + end + + PostgreSQL[(PostgreSQL)] + + User -->|requests| Apollo + + Apollo -->|connects| PostgreSQL + Apollo -->|creates schemas & users| PostgreSQL + + Syncer -->|watches| APIServer + Syncer -->|writes| PostgreSQL + + PostgreSQL -->|data| Apollo + + style PostgreSQL fill:#e1f5ff,stroke:#333,stroke-width:2px,color:#000 + style Apollo fill:#ffe1e1,stroke:#333,stroke-width:2px,color:#000 + style Cluster fill:#f0f0f0,stroke:#333,stroke-width:2px,color:#000 + style ControlPlanes fill:#fff,stroke:#666,stroke-width:1px,stroke-dasharray: 5 5,color:#000 +``` + + +Each component needs to connect to the PostgreSQL database. + +In the event of database issues, you can provide a new database and the syncers +automatically repopulate the data. + +### Requirements + +* A PostgreSQL 16 instance or cluster. +* A database, for example named `upbound`. +* **Optional**: A dedicated user for the Apollo Syncers, otherwise the Spaces Controller generates a dedicated set of credentials per syncer with the necessary permissions, for example named `syncer`. +* A dedicated **superuser or admin account** for the Apollo Server. +* **Optional**: A connection pooler, like PgBouncer, to manage connections from the Apollo Syncers. If you didn't provide the optional users, you might have to configure the pooler to allow users to connect using the same credentials as PostgreSQL. +* **Optional**: A read replica for the Apollo Syncers to connect to, to reduce load on the primary database, this might cause a slight delay in the data being available through the Query API. + +Below you can find examples of setups to get you started, you can mix and match the examples to suit your needs. + +### In-cluster setup + +:::tip + +If you don't have strong opinions on your setup, but still want full control on +the resources created for some unsupported customizations, Upbound recommends +the in-cluster setup. + +::: + +For more customization than the managed setup, you can use CloudNativePG for +PostgreSQL in the same cluster. + +For in-cluster setup, manually deploy the operator in one of the [supported ways][supported-ways-1], for example: + +```shell +kubectl apply --server-side -f \ + https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.24.1/cnpg-1.24.1.yaml +kubectl rollout status -n cnpg-system deployment cnpg-controller-manager -w --timeout 120s +``` + +Then create a `Cluster` and `Pooler` in the `upbound-system` namespace, for example: + +```shell +kubectl create ns upbound-system + +kubectl apply -f - < + +### External setup + + +:::tip + +If you want to run your PostgreSQL instance outside the cluster, but are fine with credentials being managed by the `apollo` user, this is the suggested way to proceed. + +::: + +When using this setup, you must manually create the required Secrets in the +`upbound-system` namespace. The `apollo` user must have permissions to create +schemas and users. + +```shell + +kubectl create ns upbound-system + +# A Secret containing the necessary credentials to connect to the PostgreSQL instance +kubectl create secret generic spaces-apollo-pg-app -n upbound-system \ + --from-literal=password=supersecret + +# A Secret containing the necessary CA certificate to verify the connection to the PostgreSQL instance +kubectl create secret generic spaces-apollo-pg-ca -n upbound-system \ + --from-file=ca.crt=/path/to/ca.crt +``` + +Next, install Spaces with the necessary settings: + +```shell +export PG_URL=your-postgres-host:5432 +export PG_POOLED_URL=your-pgbouncer-host:5432 # this could be the same as above + +helm upgrade --install ... \ + --set "features.alpha.apollo.enabled=true" \ + --set "apollo.apollo.storage.postgres.create=false" \ + --set "apollo.apollo.storage.postgres.connection.url=$PG_URL" \ + --set "apollo.apollo.storage.postgres.connection.credentials.secret.name=spaces-apollo-pg-app" \ + --set "apollo.apollo.storage.postgres.connection.credentials.format=basicauth" \ + --set "apollo.apollo.storage.postgres.connection.ca.name=spaces-apollo-pg-ca" \ + --set "apollo.apollo.storage.postgres.connection.syncer.url=$PG_POOLED_URL" +``` + +### External setup with all custom credentials + +For custom credentials with Apollo Syncers or Server, create a new secret in the +`upbound-system` namespace: + +```shell +export APOLLO_SYNCER_USER=syncer +export APOLLO_SERVER_USER=apollo + +kubectl create ns upbound-system + +# A Secret containing the necessary credentials to connect to the PostgreSQL instance +kubectl create secret generic spaces-apollo-pg-app -n upbound-system \ + --from-literal=password=supersecret + +# A Secret containing the necessary CA certificate to verify the connection to the PostgreSQL instance +kubectl create secret generic spaces-apollo-pg-ca -n upbound-system \ + --from-file=ca.crt=/path/to/ca.crt + +# A Secret containing the necessary credentials for the Apollo Syncers to connect to the PostgreSQL instance. +# These will be used by all Syncers in the Space. +kubectl create secret generic spaces-apollo-pg-syncer -n upbound-system \ + --from-literal=username=$APOLLO_SYNCER_USER \ + --from-literal=password=supersecret + +# A Secret containing the necessary credentials for the Apollo Server to connect to the PostgreSQL instance. +kubectl create secret generic spaces-apollo-pg-apollo -n upbound-system \ + --from-literal=username=$APOLLO_SERVER_USER \ + --from-literal=password=supersecret +``` + +Next, install Spaces with the necessary settings: + +```shell +export PG_URL=your-postgres-host:5432 +export PG_POOLED_URL=your-pgbouncer-host:5432 # this could be the same as above + +helm ... \ + --set "features.alpha.apollo.enabled=true" \ + --set "apollo.apollo.storage.postgres.create=false" \ + --set "apollo.apollo.storage.postgres.connection.url=$PG_URL" \ + --set "apollo.apollo.storage.postgres.connection.credentials.secret.name=spaces-apollo-pg-app" \ + --set "apollo.apollo.storage.postgres.connection.credentials.format=basicauth" \ + --set "apollo.apollo.storage.postgres.connection.ca.name=spaces-apollo-pg-ca" \ + --set "apollo.apollo.storage.postgres.connection.syncer.url=$PG_POOLED_URL" \ + + #. the syncers + --set "apollo.apollo.storage.postgres.connection.syncer.credentials.format=basicauth" \ + --set "apollo.apollo.storage.postgres.connection.syncer.credentials.user=$APOLLO_SYNCER_USER" \ + --set "apollo.apollo.storage.postgres.connection.syncer.credentials.secret.name=spaces-apollo-pg-syncer" \ + + #. the server + --set "apollo.apollo.storage.postgres.connection.apollo.credentials.format=basicauth" \ + --set "apollo.apollo.storage.postgres.connection.apollo.credentials.user=$APOLLO_SERVER_USER" \ + --set "apollo.apollo.storage.postgres.connection.apollo.credentials.secret.name=spaces-apollo-pg-apollo" \ + --set "apollo.apollo.storage.postgres.connection.apollo.url=$PG_POOLED_URL" +``` + + +## Using the Query API + + +See the [Query API documentation][query-api-documentation] for more information on how to use the Query API. + + + + +[postgresql-setup]: #self-hosted-postgresql-configuration +[up-cli-installed]: /manuals/cli/overview +[query-api-documentation]: /spaces/howtos/query-api + +[helm-chart-reference]: /reference/helm-reference +[kubectl-conventions]: https://kubernetes.io/docs/reference/kubectl/generated/kubectl_get/ +[supported-ways]: https://cloudnative-pg.io/documentation/current/installation_upgrade/ +[supported-ways-1]: https://cloudnative-pg.io/documentation/current/installation_upgrade/ +[cloudnativepg-documentation]: https://cloudnative-pg.io/documentation/1.24/storage/#configuration-via-a-pvc-template +[postgresql-cluster]: https://cloudnative-pg.io/documentation/1.24/resource_management/ +[pooler]: https://cloudnative-pg.io/documentation/1.24/connection_pooling/#pod-templates +[postgresql-cluster-2]: https://cloudnative-pg.io/documentation/1.24/replication/ +[pooler-3]: https://cloudnative-pg.io/documentation/1.24/connection_pooling/#high-availability-ha +[postgresql-cluster-4]: https://cloudnative-pg.io/documentation/1.24/operator_capability_levels/#override-of-operand-images-through-the-crd +[pooler-5]: https://cloudnative-pg.io/documentation/1.24/connection_pooling/#pod-templates +[cloudnativepg-documentation-6]: https://cloudnative-pg.io/documentation/1.24/postgresql_conf/ diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/scaling-resources.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/scaling-resources.md new file mode 100644 index 000000000..7bb04d2c2 --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/scaling-resources.md @@ -0,0 +1,184 @@ +--- +title: Scaling vCluster and etcd Resources +weight: 950 +description: A guide for scaling vCluster and etcd resources in self-hosted Spaces +aliases: + - /all-spaces/self-hosted-spaces/scaling-resources + - /spaces/scaling-resources +--- + +In large workloads or control plane migration, you may performance impacting +resource constraints. This guide explains how to scale vCluster and `etcd` +resources for optimal performance in your self-hosted Space. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Scaling procedures are consistent across versions. + +For version-specific resource requirements and capacity planning, see the . +::: + +## Signs of resource constraints + +You may need to scale your vCluster or `etcd` resources if you observe: + +- API server timeout errors such as `http: Handler timeout` +- Error messages about `too many requests` and requests to `try again later` +- Operations like provider installation failing with errors like `cannot apply provider package secret` +- vCluster pods experiencing continuous restarts +- API performance degrades with high resource volume + + +## Scaling vCluster resources + + +The vCluster component handles Kubernetes API requests for your control planes. +Deployments with multiple control planes or providers may exceed default resource allocations. + +```yaml +# Default settings +controlPlanes.vcluster.resources.limits.cpu: "3000m" +controlPlanes.vcluster.resources.limits.memory: "3960Mi" +controlPlanes.vcluster.resources.requests.cpu: "170m" +controlPlanes.vcluster.resources.requests.memory: "1320Mi" +``` + +For larger workloads, like migrating from an existing control plane with several +providers, increase these resource limits in your Spaces `values.yaml` file. + +```yaml +controlPlanes: + vcluster: + resources: + limits: + cpu: "4000m" # Increase to 4 cores + memory: "6Gi" # Increase to 6GB memory + requests: + cpu: "500m" # Increase baseline CPU request + memory: "2Gi" # Increase baseline memory request +``` + +## Scaling `etcd` storage + +Kubernetes relies on `etcd` performance, which can lead to IOPS (input/output +operations per second) bottlenecks. Upbound allocates `50Gi` volumes for `etcd` +in cloud environments to ensure adequate IOPS performance. + +```yaml +# Default setting +controlPlanes.etcd.persistence.size: "5Gi" +``` + +For production environments or when migrating large control planes, increase +`etcd` volume size and specify an appropriate storage class: + +```yaml +controlPlanes: + etcd: + persistence: + size: "50Gi" # Recommended for production + storageClassName: "fast-ssd" # Use a high-performance storage class +``` + +### Storage class considerations + +For AWS: +- Use GP3 volumes with adequate IOPS +-. AWS GP3 volumes, IOPS scale with volume size (3000 IOPS baseline) +-. optimal performance, provision at least 32Gi to support up to 16,000 IOPS + +For GCP and Azure: +- Use SSD-based persistent disk types for optimal performance +- Consider premium storage options for high-throughput workloads + +## Scaling Crossplane resources + +Crossplane manages provider resources in your control planes. You may need to increase provider resources for larger deployments: + +```yaml +# Default settings +controlPlanes.uxp.resourcesCrossplane.requests.cpu: "370m" +controlPlanes.uxp.resourcesCrossplane.requests.memory: "400Mi" +``` + + +For environments with many providers or managed resources: + + +```yaml +controlPlanes: + uxp: + resourcesCrossplane: + limits: + cpu: "1000m" # Add CPU limit + memory: "1Gi" # Add memory limit + requests: + cpu: "500m" # Increase CPU request + memory: "512Mi" # Increase memory request +``` + +## High availability configuration + +For production environments, enable High Availability mode to ensure resilience: + +```yaml +controlPlanes: + ha: + enabled: true +``` + +## Best practices for migration scenarios + +When migrating from existing control planes into a self-hosted Space: + +1. **Pre-scale resources**: Scale up resources before performing the migration +2. **Monitor resource usage**: Watch resource consumption during and after migration with `kubectl top pods` +3. **Scale incrementally**: If issues persist, increase resources incrementally until performance stabilizes +4. **Consider storage performance**: `etcd` is sensitive to storage I/O performance + +## Helm values configuration + +Apply these settings through your Spaces Helm values file: + +```yaml +controlPlanes: + vcluster: + resources: + limits: + cpu: "4000m" + memory: "6Gi" + requests: + cpu: "500m" + memory: "2Gi" + etcd: + persistence: + size: "50Gi" + storageClassName: "gp3" # Use your cloud provider's fast storage class + uxp: + resourcesCrossplane: + limits: + cpu: "1000m" + memory: "1Gi" + requests: + cpu: "500m" + memory: "512Mi" + ha: + enabled: true #. production environments +``` + +Apply the configuration using Helm: + +```bash +helm upgrade --install spaces oci://xpkg.upbound.io/spaces-artifacts/spaces \ + -f values.yaml \ + -n upbound-system +``` + +## Considerations + +- **Provider count**: Each provider adds resource overhead - consider using provider families to optimize resource usage +- **Managed resources**: The number of managed resources impacts CPU usage more than memory +- **Vertical pod autoscaling**: Consider using vertical pod autoscaling in Kubernetes to automatically adjust resources based on usage +- **Storage performance**: Storage performance is as important as capacity for etcd +- **Network latency**: Low-latency connections between components improve performance + + diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/self-hosted-spaces-deployment.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/self-hosted-spaces-deployment.md new file mode 100644 index 000000000..e549e3939 --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/self-hosted-spaces-deployment.md @@ -0,0 +1,461 @@ +--- +title: Deployment Workflow +sidebar_position: 3 +description: A quickstart guide for Upbound Spaces +tier: "business" +--- +import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; + + + + + +This guide deploys a self-hosted Upbound cluster in AWS. + + + + + +This guide deploys a self-hosted Upbound cluster in Azure. + + + + + +This guide deploys a self-hosted Upbound cluster in GCP. + + + +Disconnected Spaces allows you to host control planes in your preferred environment. + +## Prerequisites + +To get started deploying your own Disconnected Space, you need: + +- An Upbound organization account string, provided by your Upbound account representative +- A `token.json` license, provided by your Upbound account representative + + + +- An AWS account and the AWS CLI + + + + + +- An Azure account and the Azure CLI + + + + + +- An GCP account and the GCP CLI + + + +:::important +Disconnected Spaces are a business critical feature of Upbound and requires a license token to successfully complete the installation. [Contact Upbound][contact-upbound] if you want to try out Upbound with Disconnected Spaces. +::: + +## Provision the hosting environment + +### Create a cluster + + + +Configure the name and target region you want the EKS cluster deployed to. + +```ini +export SPACES_CLUSTER_NAME=upbound-space-quickstart +export SPACES_REGION=us-east-1 +``` + +Provision a 3-node cluster using eksctl. + +```bash +cat < + + + +Configure the name and target region you want the AKS cluster deployed to. + +```ini +export SPACES_RESOURCE_GROUP_NAME=upbound-space-quickstart +export SPACES_CLUSTER_NAME=upbound-space-quickstart +export SPACES_LOCATION=westus +``` + +Provision a new Azure resource group. + +```bash +az group create --name ${SPACES_RESOURCE_GROUP_NAME} --location ${SPACES_LOCATION} +``` + +Provision a 3-node cluster. + +```bash +az aks create -g ${SPACES_RESOURCE_GROUP_NAME} -n ${SPACES_CLUSTER_NAME} \ + --enable-managed-identity \ + --node-count 3 \ + --node-vm-size Standard_D4s_v4 \ + --enable-addons monitoring \ + --enable-msi-auth-for-monitoring \ + --generate-ssh-keys \ + --network-plugin kubenet \ + --network-policy calico +``` + +Get the kubeconfig of your AKS cluster. + +```bash +az aks get-credentials --resource-group ${SPACES_RESOURCE_GROUP_NAME} --name ${SPACES_CLUSTER_NAME} +``` + + + + + +Configure the name and target region you want the GKE cluster deployed to. + +```ini +export SPACES_PROJECT_NAME=upbound-spaces-project +export SPACES_CLUSTER_NAME=upbound-spaces-quickstart +export SPACES_LOCATION=us-west1-a +``` + +Create a new project and set it as the current project. + +```bash +gcloud projects create ${SPACES_PROJECT_NAME} +gcloud config set project ${SPACES_PROJECT_NAME} +``` + +Provision a 3-node cluster. + +```bash +gcloud container clusters create ${SPACES_CLUSTER_NAME} \ + --enable-network-policy \ + --num-nodes=3 \ + --zone=${SPACES_LOCATION} \ + --machine-type=e2-standard-4 +``` + +Get the kubeconfig of your GKE cluster. + +```bash +gcloud container clusters get-credentials ${SPACES_CLUSTER_NAME} --zone=${SPACES_LOCATION} +``` + + + +## Configure the pre-install + +### Set your Upbound organization account details + +Set your Upbound organization account string as an environment variable for use in future steps + +```ini +export UPBOUND_ACCOUNT= +``` + +### Set up pre-install configurations + +Export the path of the license token JSON file provided by your Upbound account representative. + +```ini {copy-lines="2"} +# Change the path to where you saved the token. +export SPACES_TOKEN_PATH="/path/to/token.json" +``` + +Set the version of Spaces software you want to install. + +```ini +export SPACES_VERSION= +``` + +Set the router host and cluster type. The `SPACES_ROUTER_HOST` is the domain name that's used to access the control plane instances. It's used by the ingress controller to route requests. + +```ini +export SPACES_ROUTER_HOST="proxy.upbound-127.0.0.1.nip.io" +``` + +:::important +Make sure to replace the placeholder text in `SPACES_ROUTER_HOST` and provide a real domain that you own. +::: + + +## Install the Spaces software + + +### Install cert-manager + +Install cert-manager. + +```bash +kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.11.0/cert-manager.yaml +kubectl wait deployment -n cert-manager cert-manager-webhook --for condition=Available=True --timeout=360s +``` + + + +### Install ALB Load Balancer + +```bash +helm install aws-load-balancer-controller aws-load-balancer-controller --namespace kube-system \ + --repo https://aws.github.io/eks-charts \ + --set clusterName=${SPACES_CLUSTER_NAME} \ + --set serviceAccount.create=false \ + --set serviceAccount.name=aws-load-balancer-controller \ + --wait +``` + + + +### Install ingress-nginx + +Starting with Spaces v1.10.0, you need to configure the ingress-nginx +controller to allow SSL-passthrough mode. You can do so by passing the +`--enable-ssl-passthrough=true` command-line option to the controller. +The following Helm install command enables this with the `controller.extraArgs` +parameter: + + + +```bash +helm upgrade --install ingress-nginx ingress-nginx \ + --create-namespace --namespace ingress-nginx \ + --repo https://kubernetes.github.io/ingress-nginx \ + --version 4.12.1 \ + --set 'controller.service.type=LoadBalancer' \ + --set 'controller.extraArgs.enable-ssl-passthrough=true' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-type=external' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-scheme=internet-facing' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-nlb-target-type=ip' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-healthcheck-protocol=http' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-healthcheck-path=/healthz' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-healthcheck-port=10254' \ + --wait +``` + + + + + +```bash +helm upgrade --install ingress-nginx ingress-nginx \ + --create-namespace --namespace ingress-nginx \ + --repo https://kubernetes.github.io/ingress-nginx \ + --version 4.12.1 \ + --set 'controller.service.type=LoadBalancer' \ + --set 'controller.extraArgs.enable-ssl-passthrough=true' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/azure-load-balancer-health-probe-request-path=/healthz' \ + --wait +``` + + + + + +```bash +helm upgrade --install ingress-nginx ingress-nginx \ + --create-namespace --namespace ingress-nginx \ + --repo https://kubernetes.github.io/ingress-nginx \ + --version 4.12.1 \ + --set 'controller.service.type=LoadBalancer' \ + --set 'controller.extraArgs.enable-ssl-passthrough=true' \ + --wait +``` + + + +### Install Upbound Spaces software + +Create an image pull secret so that the cluster can pull Upbound Spaces images. + +```bash +kubectl create ns upbound-system +kubectl -n upbound-system create secret docker-registry upbound-pull-secret \ + --docker-server=https://xpkg.upbound.io \ + --docker-username="$(jq -r .accessId $SPACES_TOKEN_PATH)" \ + --docker-password="$(jq -r .token $SPACES_TOKEN_PATH)" +``` + +Log in with Helm to be able to pull chart images for the installation commands. + +```bash +jq -r .token $SPACES_TOKEN_PATH | helm registry login xpkg.upbound.io -u $(jq -r .accessId $SPACES_TOKEN_PATH) --password-stdin +``` + +Install the Spaces software. + +```bash +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "authentication.hubIdentities=true" \ + --set "authorization.hubRBAC=true" \ + --wait +``` + +### Create a DNS record + +:::important +If you chose to create a public ingress, you also need to create a DNS record for the load balancer of the public facing ingress. Do this before you create your first control plane. +::: + +Create a DNS record for the load balancer of the public facing ingress. To get the address for the Ingress, run the following: + + + +```bash +kubectl get ingress \ + -n upbound-system mxe-router-ingress \ + -o jsonpath='{.status.loadBalancer.ingress[0].hostname}' +``` + + + + + +```bash +kubectl get ingress \ + -n upbound-system mxe-router-ingress \ + -o jsonpath='{.status.loadBalancer.ingress[0].ip}' +``` + + + + + +```bash +kubectl get ingress \ + -n upbound-system mxe-router-ingress \ + -o jsonpath='{.status.loadBalancer.ingress[0].ip}' +``` + + + +If the preceding command doesn't return a load balancer address then your provider may not have allocated it yet. Once it's available, add a DNS record for the `ROUTER_HOST` to point to the given load balancer address. If it's an IPv4 address, add an A record. If it's a domain name, add a CNAME record. + +## Configure the up CLI + +With your kubeconfig pointed at the Kubernetes cluster where you installed +Upbound Spaces, create a new profile in the `up` CLI. This profile interacts +with your Space: + +```bash +up profile create --use ${SPACES_CLUSTER_NAME} --type=disconnected --organization ${UPBOUND_ACCOUNT} +``` + +Optionally, log in to your Upbound account using the new profile so you can use the Upbound Marketplace with this profile as well: + +```bash +up login +``` + + +## Connect to your Space + + +Use `up ctx` to create a kubeconfig context pointed at your new Space: + +```bash +up ctx disconnected/$(kubectl config current-context) +``` + +## Create your first control plane + +You can now create a control plane with the `up` CLI: + +```bash +up ctp create ctp1 +``` + +You can also create a control plane with kubectl: + +```yaml +cat < +```yaml +observability: + spacesCollector: + env: + - name: API_KEY + valueFrom: + secretKeyRef: + name: my-secret + key: api-key + config: + exporters: + otlphttp: + endpoint: "" + headers: + api-key: ${env:API_KEY} + exportPipeline: + logs: + - otlphttp + metrics: + - otlphttp + traces: + - otlphttp +``` + + +You can export metrics, logs, and traces from your Crossplane installation, Spaces +infrastructure (controller, API, router, etc.), provider-helm, and +provider-kubernetes. + +### Router metrics + +The Spaces router component uses Envoy as a reverse proxy and exposes detailed +metrics about request handling, circuit breakers, and connection pooling. +Upbound collects these metrics in your Space after you enable Space-level +observability. + +Envoy metrics in Upbound include: + +- **Upstream cluster metrics** - Request status codes, timeouts, retries, and latency for traffic to control planes and services +- **Circuit breaker metrics** - Connection and request circuit breaker state for both `DEFAULT` and `HIGH` priority levels +- **Downstream listener metrics** - Client connections and requests received +- **HTTP connection manager metrics** - End-to-end HTTP request processing and latency + +For a complete list of available router metrics and example PromQL queries, see the [Router metrics reference][router-ref]. + +### Router tracing + +The Spaces router generates distributed traces through OpenTelemetry integration, +providing end-to-end visibility into request flow across the system. Use these +traces to debug latency issues, understand request paths, and correlate errors +across services. + +The router uses: + +- **Protocol**: OTLP (OpenTelemetry Protocol) over gRPC +- **Service name**: `spaces-router` +- **Transport**: TLS-encrypted connection to telemetry collector + +#### Trace configuration + +Enable tracing and configure the sampling rate with the following Helm values: + +```yaml +observability: + enabled: true + tracing: + enabled: true + sampling: + rate: 0.1 # Sample 10% of new traces (0.0-1.0) +``` + +The sampling behavior depends on whether a parent trace context exists: + +- **With parent context**: If a `traceparent` header is present, the parent's + sampling decision is respected, enabling proper distributed tracing across services. +- **Root spans**:. new traces without a parent, Envoy samples based on + `x-request-id` hashing. The default sampling rate is 10%. + +#### TLS configuration for external collectors + +To send traces to an external OTLP collector, configure the endpoint and TLS settings: + +```yaml +observability: + enabled: true + tracing: + enabled: true + endpoint: "otlp-gateway.example.com" + port: 443 + tls: + caBundleSecretRef: "custom-ca-secret" +``` + +If `caBundleSecretRef` is set, the router uses the CA bundle from the referenced +Kubernetes secret. The secret must contain a key named `ca.crt` with the +PEM-encoded CA bundle. If not set, the router uses the Spaces CA for the +in-cluster collector. + +#### Custom trace tags + +The router adds custom tags to every span to enable filtering and grouping by +control plane: + +| Tag | Source | Description | +|-----|--------|-------------| +| `controlplane.id` | `x-upbound-mxp-id` header | Control plane UUID | +| `controlplane.name` | `x-upbound-mxp-host` header | Internal vcluster hostname | +| `hostcluster.id` | `x-upbound-hostcluster-id` header | Host cluster identifier | + +These tags enable queries like "show all slow requests to control plane X" or +"find errors for control planes in host cluster Y." + +#### Example trace + +The following example shows the attributes from a successful GET request: + +```text +Span: ingress +├─ Service: spaces-router +├─ Duration: 8.025ms +├─ Attributes: +│ ├─ http.method: GET +│ ├─ http.status_code: 200 +│ ├─ upstream_cluster: ctp-b2b37aaa-ee55-492c-ba0c-4d561a6325fa-api-cluster +│ ├─ controlplane.id: b2b37aaa-ee55-492c-ba0c-4d561a6325fa +│ ├─ controlplane.name: vcluster.mxp-b2b37aaa-ee55-492c-ba0c-4d561a6325fa-system +│ └─ response_size: 1827 +``` + +## Available metrics + +Space-level observability collects metrics from multiple infrastructure components: + +### Infrastructure component metrics + +- Crossplane controller metrics +- Spaces controller, API, and router metrics +- Provider metrics (provider-helm, provider-kubernetes) + +### Router metrics + +The router component exposes Envoy proxy metrics for monitoring traffic flow and +service health. Key metric categories include: + +- `envoy_cluster_upstream_rq_*` - Upstream request metrics (status codes, timeouts, retries, latency) +- `envoy_cluster_circuit_breakers_*` - Circuit breaker state and capacity +- `envoy_listener_downstream_*` - Client connection and request metrics +- `envoy_http_downstream_*` - HTTP request processing metrics + +Example query to monitor total request rate: + +```promql +sum(rate(envoy_cluster_upstream_rq_total{job="spaces-router-envoy"}[5m])) +``` + +Example query for P95 latency: + +```promql +histogram_quantile( + 0.95, + sum by (le) ( + rate(envoy_cluster_upstream_rq_time_bucket{job="spaces-router-envoy"}[5m]) + ) +) +``` + +For detailed router metrics documentation and more query examples, see the [Router metrics reference][router-ref]. + + +## OpenTelemetryCollector image + + +Control plane (`SharedTelemetry`) and Space observability deploy the same custom +OpenTelemetry Collector image. The OpenTelemetry Collector image supports +`otlhttp`, `datadog`, and `debug` exporters. + +For more information on observability configuration, review the [Helm chart reference][helm-chart-reference]. + +## Observability in control planes + +Read the [observability documentation][observability-documentation] to learn +about the features Upbound offers for collecting telemetry from control planes. + + +## Router metrics reference {#router-ref} + +To avoid overwhelming observability tools with hundreds of Envoy metrics, an +allow-list filters metrics to only the following metric families. + +### Upstream cluster metrics + +Metrics tracking requests sent from Envoy to configured upstream clusters. +Individual control planes, spaces-api, and other services are each considered +an upstream cluster. Use these metrics to monitor service health, identify +upstream errors, and measure backend latency. + +| Metric | Description | +|--------|-------------| +| `envoy_cluster_upstream_rq_xx_total` | HTTP status codes (2xx, 3xx, 4xx, 5xx) with label `envoy_response_code_class` | +| `envoy_cluster_upstream_rq_timeout_total` | Requests that timed out waiting for upstream | +| `envoy_cluster_upstream_rq_retry_limit_exceeded_total` | Requests that exhausted retry attempts | +| `envoy_cluster_upstream_rq_total` | Total upstream requests | +| `envoy_cluster_upstream_rq_time_bucket` | Latency histogram (for P50/P95/P99 calculations) | +| `envoy_cluster_upstream_rq_time_sum` | Sum of request durations | +| `envoy_cluster_upstream_rq_time_count` | Count of requests | + +### Circuit breaker metrics + + + +Metrics tracking circuit breaker state and remaining capacity. Circuit breakers +prevent cascading failures by limiting connections and concurrent requests to +unhealthy upstreams. Two priority levels exist: `DEFAULT` for watch requests and +`HIGH` for API requests. + + +| Name | Description | +|--------|-------------| +| `envoy_cluster_circuit_breakers_default_cx_open` | `DEFAULT` priority connection circuit breaker open (gauge) | +| `envoy_cluster_circuit_breakers_default_rq_open` | `DEFAULT` priority request circuit breaker open (gauge) | +| `envoy_cluster_circuit_breakers_default_remaining_cx` | Available `DEFAULT` priority connections (gauge) | +| `envoy_cluster_circuit_breakers_default_remaining_rq` | Available `DEFAULT` priority request slots (gauge) | +| `envoy_cluster_circuit_breakers_high_cx_open` | `HIGH` priority connection circuit breaker open (gauge) | +| `envoy_cluster_circuit_breakers_high_rq_open` | `HIGH` priority request circuit breaker open (gauge) | +| `envoy_cluster_circuit_breakers_high_remaining_cx` | Available `HIGH` priority connections (gauge) | +| `envoy_cluster_circuit_breakers_high_remaining_rq` | Available `HIGH` priority request slots (gauge) | + +### Downstream listener metrics + +Metrics tracking requests received from clients such as kubectl and API consumers. +Use these metrics to monitor client connection patterns, overall request volume, +and responses sent to external users. + +| Name | Description | +|--------|-------------| +| `envoy_listener_downstream_rq_xx_total` | HTTP status codes for responses sent to clients | +| `envoy_listener_downstream_rq_total` | Total requests received from clients | +| `envoy_listener_downstream_cx_total` | Total connections from clients | +| `envoy_listener_downstream_cx_active` | Currently active client connections (gauge) | + + + +### HTTP connection manager metrics + + +Metrics from Envoy's HTTP connection manager tracking end-to-end request +processing. These metrics provide a comprehensive view of the HTTP request +lifecycle including status codes and client-perceived latency. + +| Name | Description | +|--------|-------------| +| `envoy_http_downstream_rq_xx` | HTTP status codes (note: no `_total` suffix for this metric family) | +| `envoy_http_downstream_rq_total` | Total HTTP requests received | +| `envoy_http_downstream_rq_time_bucket` | Downstream request latency histogram | +| `envoy_http_downstream_rq_time_sum` | Sum of downstream request durations | +| `envoy_http_downstream_rq_time_count` | Count of downstream requests | + +[router-ref]: #router-ref +[observability-documentation]: /spaces/howtos/observability +[opentelemetry-collector]: https://opentelemetry.io/docs/collector/ +[opentelemetry-operator]: https://opentelemetry.io/docs/kubernetes/operator/ +[helm-chart-reference]: /reference/helm-reference diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/spaces-management.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/spaces-management.md new file mode 100644 index 000000000..3df61c306 --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/spaces-management.md @@ -0,0 +1,219 @@ +--- +title: Interacting with Disconnected Spaces +sidebar_position: 10 +description: Common operations in Spaces +--- + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Spaces management procedures are consistent across versions. + +For version compatibility details, see the . +::: + +## Spaces management + +### Create a Space + +To install an Upbound Space into a cluster, it's recommended you dedicate an entire Kubernetes cluster for the Space. You can use [up space init][up-space-init] to install an Upbound Space. Below is an example: + +```bash +up space init "v1.9.0" +``` +:::tip +For a full guide to get started with Spaces, read the [quickstart][quickstart] guide: +::: + +You can also install the helm chart for Spaces directly. In order for a Spaces install to succeed, you must install some prerequisites first and configure them. This includes: + +- UXP +- provider-helm and provider-kubernetes +- cert-manager + +Furthermore, the Spaces chart requires a pull secret, which Upbound must provide to you. + +```bash +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "v1.9.0" \ + --set "ingress.host=your-host.com" \ + --set "clusterType=eks" \ + --set "account=your-upbound-account" \ + --wait +``` +For a complete tutorial of the helm install, read one of the deployment guides for [AWS][aws], [Azure][azure] , or [GCP][gcp] which cover the step-by-step process. + +### Upgrade a Space + +To upgrade a Space from one version to the next, use [up space upgrade][up-space-upgrade]. Spaces supports upgrading from version `ver x.N.*` to version `ver x.N+1.*`. + +```bash +up space upgrade "v1.9.0" +``` + +You can also upgrade a Space by manually bumping the Helm chart version. Before +upgrading, review the release notes for any breaking changes or +special requirements: + +1. Review the release notes for the target version in the [Spaces Release Notes][spaces-release-notes] +2. Upgrade the Space by updating the helm chart version: + +```bash +helm -n upbound-system upgrade spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "v1.9.0" \ + --reuse-values \ + --wait +``` + +For major version upgrades or configuration changes, extract your current values +and adjust: + +```bash +# Extract current values to a file +helm -n upbound-system get values spaces > spaces-values.yaml + +# Upgrade with modified values +helm -n upbound-system upgrade spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "v1.9.0" \ + -f spaces-values.yaml \ + --wait +``` + +### Downgrade a Space + +To rollback a Space from one version to the previous, use [up space upgrade][up-space-upgrade-1]. Spaces supports downgrading from version `ver x.N.*` to version `ver x.N-1.*`. + +```bash +up space upgrade --rollback +``` + +You can also downgrade a Space manually using Helm by specifying an earlier version: + +```bash +helm -n upbound-system upgrade spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "v1.8.0" \ + --reuse-values \ + --wait +``` + +When downgrading, make sure to: +1. Check the [release notes][release-notes] for specific downgrade instructions +2. Verify compatibility between the downgraded Space and any control planes +3. Back up any critical data before proceeding + +### Uninstall a Space + +To uninstall a Space from a Kubernetes cluster, use [up space destroy][up-space-destroy]. A destroy operation uninstalls core components and orphans control planes and their associated resources. + +```bash +up space destroy +``` + +## Control plane management + +You can manage control planes in a Space via the [up CLI][up-cli] or the Spaces-local Kubernetes API. When you install a Space, it defines new a API type, `kind: Controlplane`, that you can use to create and manage control planes in the Space. + +### Create a control plane + +To create a control plane in a Space using `up`, run the following: + +```bash +up ctp create ctp1 +``` + +You can also declare a new control plane like the example below and apply it to your Spaces cluster: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: ctp1 + namespace: default +spec: + writeConnectionSecretToRef: + name: kubeconfig-ctp1 + namespace: default +``` + +This manifest: + +- Creates a new control plane in the space called `ctp1`. +- Publishes the kubeconfig to connect to the control plane to a secret in the Spaces cluster, called `kubeconfig-ctp1` + +### Connect to a control plane + +To connect to a control plane in a Space using `up`, run the following: + +```bash +up ctp connect new-control-plane +``` + +The command changes your kubeconfig's current context to the control plane you specify. If you want to change your kubeconfig back to a previous context, run: + +```bash +up ctp disconnect +``` + +If you configured your control plane to publish connection details, you can also access it this way. Once the control plane is ready, use the secret (containing connection details) to connect to the API server of your control plane. + +```bash +kubectl get secret -n default -o jsonpath='{.data.kubeconfig}' | base64 -d > /tmp/.yaml +``` + +Reference the kubeconfig whenever you want to interact directly with the API server of the control plane (vs the Space's API server): + +```bash +kubectl get providers --kubeconfig=/tmp/.yaml +``` + +### Configure a control plane + +Spaces offers a built-in feature that allows you to connect a control plane to a Git source. This experience is like when a control plane runs in [Upbound's SaaS environment][upbound-s-saas-environment]. Upbound recommends using the built-in Git integration to drive configuration of your control planes in a Space. + +Learn more in the [Spaces Git integration][spaces-git-integration] documentation. + +### List control planes + +To list all control planes in a Space using `up`, run the following: + +```bash +up ctp list +``` + +Or you can use Kubernetes-style semantics to list the control plane: + +```bash +kubectl get controlplanes +``` + + +### Delete a control plane + +To delete a control plane in a Space using `up`, run the following: + +```bash +up ctp delete ctp1 +``` + +Or you can use Kubernetes-style semantics to delete the control plane: + +```bash +kubectl delete controlplane ctp1 +``` + + +[up-space-init]: /reference/cli-reference +[quickstart]: / +[aws]: /spaces/howtos/self-hosted/self-hosted-spaces-deployment +[azure]:/spaces/howtos/self-hosted/self-hosted-spaces-deployment +[gcp]:/spaces/howtos/self-hosted/self-hosted-spaces-deployment +[up-space-upgrade]: /reference/cli-reference +[spaces-release-notes]: /reference/release-notes/spaces +[up-space-upgrade-1]: /reference/cli-reference +[release-notes]: /reference/release-notes/spaces +[up-space-destroy]: /reference/cli-reference +[up-cli]: /reference/cli-reference +[upbound-s-saas-environment]: /spaces/howtos/self-hosted/spaces-management +[spaces-git-integration]: /spaces/howtos/self-hosted/gitops diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/troubleshooting.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/troubleshooting.md new file mode 100644 index 000000000..8d1ca6517 --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/troubleshooting.md @@ -0,0 +1,132 @@ +--- +title: Troubleshooting +sidebar_position: 100 +description: A guide for troubleshooting an issue that occurs in a Space +--- + +Find guidance below on how to find solutions for issues you encounter when deploying and using an Upbound Space. Use the tips below as a supplement to the observability metrics discussed in the [Observability][observability] page. + +## General tips + +Most issues fall into two general categories: + +1. issues with the Spaces management plane +2. issues on a control plane + +If your control plane doesn't reach a `Ready` state, it's indicative of the former. If your control plane is in a created and running state, but resources aren't reconciling, it's indicative of the latter. + +### Spaces component layout + +Run `kubectl get pods -A` against the cluster hosting a Space. You should see a variety of pods across several namespaces. It should look something like this: + +```bash +NAMESPACE NAME READY STATUS RESTARTS AGE +cert-manager cert-manager-6d6769565c-mc5df 1/1 Running 0 25m +cert-manager cert-manager-cainjector-744bb89575-nw4fg 1/1 Running 0 25m +cert-manager cert-manager-webhook-759d6dcbf7-ps4mq 1/1 Running 0 25m +ingress-nginx ingress-nginx-controller-7f8ccfccc6-6szlp 1/1 Running 0 25m +kube-system coredns-5d78c9869d-4p477 1/1 Running 0 26m +kube-system coredns-5d78c9869d-pdxt6 1/1 Running 0 26m +kube-system etcd-kind-control-plane 1/1 Running 0 26m +kube-system kindnet-8s7pq 1/1 Running 0 26m +kube-system kube-apiserver-kind-control-plane 1/1 Running 0 26m +kube-system kube-controller-manager-kind-control-plane 1/1 Running 0 26m +kube-system kube-proxy-l68r8 1/1 Running 0 26m +kube-system kube-scheduler-kind-control-plane 1/1 Running 0 26m +local-path-storage local-path-provisioner-6bc4bddd6b-qsdjt 1/1 Running 0 26m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system coredns-5dc69d6447-f56rh-x-kube-system-x-vcluster 1/1 Running 0 21m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system crossplane-6b6d67bc66-6b8nx-x-upbound-system-x-vcluster 1/1 Running 0 20m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system crossplane-rbac-manager-78f6fc7cb4-pjkhc-x-upbound-s-12253c3c4e 1/1 Running 0 20m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system kube-state-metrics-7f8f4dcc5b-8p8c4 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system mxp-gateway-68f546b9c8-xnz5j-x-upbound-system-x-vcluster 1/1 Running 0 20m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system mxp-ksm-config-54655667bb-hv9br 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system mxp-readyz-5f7f97d967-b98bw 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system otlp-collector-56d7d46c8d-g5sh5-x-upbound-system-x-vcluster 1/1 Running 0 20m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-67c9fb8959-ppb2m 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-api-6bfbccc49d-ffgpj 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-controller-7cc6855656-8c46b 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-etcd-0 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vector-754b494b84-wljw4 1/1 Running 0 22m +mxp-system mxp-charts-chartmuseum-7587f77558-8tltb 1/1 Running 0 23m +upbound-system crossplane-b4dc7b4c9-6hjh5 1/1 Running 0 25m +upbound-system crossplane-contrib-provider-helm-ce18dd03e6e4-7945d8985-4gcwr 1/1 Running 0 24m +upbound-system crossplane-contrib-provider-kubernetes-1f1e32c1957d-577756gs2x4 1/1 Running 0 24m +upbound-system crossplane-rbac-manager-d8cb49cbc-gbvvf 1/1 Running 0 25m +upbound-system spaces-controller-6647677cf9-5zl5q 1/1 Running 0 24m +upbound-system spaces-router-bc78c96d7-kzts2 2/2 Running 0 24m +``` + +What you are seeing is: + +- Pods in the `upbound-system` namespace are components required to run the management plane of the Space. This includes the `spaces-controller`, `spaces-router`, and install of UXP. +- Pods in the `mxp-{GUID}-system` namespace are components that collectively power a control plane. Notable call outs include pod names that look like `vcluster-api-{GUID}` and `vcluster-controller-{GUID}`, which are integral components of a control plane. +- Pods in other notable namespaces, including `cert-manager` and `ingress-nginx`, are prerequisite components that support a Space's successful operation. + + + +### Troubleshooting tips for the Spaces management plane + +Start by getting the status of all the pods in a Space: + +1. Make sure the current context of your kubeconfig points at the Kubernetes cluster hosting your Space +2. Get the status of all the pods in the Space: +```bash +kubectl get pods -A +``` +3. Scan the `Status` column to see if any of the pods report a status besides `Running`. +4. Scan the `Restarts` column to see if any of the pods have restarted. +5. If you notice a Status other than `Running` or see pods that restarted, you should investigate their events by running +```bash +kubectl describe pod -n +``` + +Next, inspect the status of objects and releases: + +1. Make sure the current context of your kubeconfig points at the Kubernetes cluster hosting your Space +2. Inspect the objects in your Space. If any are unhealthy, describe those objects to get the events: +```bash +kubectl get objects +``` +3. Inspect the releases in your Space. If any are unhealthy, describe those releases to get the events: +```bash +kubectl get releases +``` + +### Troubleshooting tips for control planes in a Space + +General troubleshooting in a control plane starts by fetching the events of the control plane: + +1. Make sure the current context of your kubeconfig points at the Kubernetes cluster hosting your Space +2. Run the following to fetch your control planes. +```bash +kubectl get ctp +``` +3. Describe the control plane by providing its name, found in the preceding instruction. +```bash +kubectl describe controlplanes.spaces.upbound.io +``` + +## Issues + + +### Your control plane is stuck in a 'creating' state + +#### Error: unknown field "ports" in io.k8s.api.networking.v1.NetworkPolicySpec + +This error is emitted by a Helm release named `control-plane-host-policies` attempting to be installed by the Spaces software. The full error is: + +_CannotCreateExternalResource failed to install release: unable to build kubernetes objects from release manifest: error validating "": error validating data: ValidationError(NetworkPolicy.spec): unknown field "ports" in io.k8s.api.networking.v1.NetworkPolicySpec_ + +This error may be caused by running a Space on an earlier version of Kubernetes than is supported (`v1.26 or later`). To resolve this issue, upgrade the host Kubernetes cluster version to 1.25 or later. + +### Your Spaces install fails + +#### Error: You tried to install a Space on a previous Crossplane installation + +If you try to install a Space on an existing cluster that previously had Crossplane or UXP on it, you may encounter errors. Due to how the Spaces installer tests for the presence of UXP, it may detect orphaned CRDs that weren't cleaned up by the previous uninstall of Crossplane. You may need to manually [remove old Crossplane CRDs][remove-old-crossplane-crds] for the installer to properly detect the UXP prerequisite. + + + + +[observability]: /spaces/howtos/observability +[remove-old-crossplane-crds]: https://docs.crossplane.io/latest/guides/uninstall-crossplane/ diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/use-argo.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/use-argo.md new file mode 100644 index 000000000..d58f7db44 --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/use-argo.md @@ -0,0 +1,228 @@ +--- +title: Use ArgoCD Plugin +sidebar_position: 15 +description: A guide for integrating Argo with control planes in a Space. +aliases: + - /all-spaces/self-hosted-spaces/use-argo + - /deploy/disconnected-spaces/use-argo-flux + - /all-spaces/self-hosted-spaces/use-argo-flux + - /connect/use-argo +--- + + +:::info API Version Information +This guide covers the ArgoCD Plugin, a preview feature available in all supported versions (v1.9-v1.14+) for self-hosted deployments. + +For details on GitOps patterns and related features across versions, see [Automation and GitOps Overview](/spaces/howtos/automation-and-gitops/overview/). +::: + +:::important +This feature is in preview and is off by default. To enable, set `features.alpha.argocdPlugin.enabled=true` when installing Spaces: + +```bash +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "features.alpha.argocdPlugin.enabled=true" +``` +::: + +Spaces provides an optional plugin to assist with integrating a control plane in a Space with Argo CD. You must enable the plugin for the entire Space at Spaces install or upgrade time. The plugin's job is to propagate the connection details of each control plane in a Space to Argo CD. By default, Upbound stores these connection details in a Kubernetes secret named after the control plane. To run Argo CD across multiple namespaces, Upbound recommends enabling the `features.alpha.argocdPlugin.useUIDFormatForCTPSecrets` flag to use a UID-based format for secret names to avoid conflicts. + +:::tip +For general guidance on integrating Upbound with GitOps flows, see [GitOps with Control Planes][gitops-with-control-planes]. +::: + +## On cluster Argo CD + +If you are running Argo CD on the same cluster as the Space, run the following to enable the plugin: + + + + + + +```bash {hl_lines="3-4"} +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "features.alpha.argocdPlugin.enabled=true" \ + --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ + --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" +``` + + + + + +```bash {hl_lines="7-8"} +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "features.alpha.argocdPlugin.enabled=true" \ + --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ + --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ + --wait +``` + + + + + + +The important flags are: + +- `features.alpha.argocdPlugin.enabled=true` +- `features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true` +- `features.alpha.argocdPlugin.target.secretNamespace=argocd` + +The first flag enables the feature and the second indicates the namespace on the cluster where you installed Argo CD. + +Be sure to [configure Argo][configure-argo] after it's installed. + +## External cluster Argo CD + +If you are running Argo CD on an external cluster from where you installed your Space, you need to provide some extra flags: + + + + + + +```bash {hl_lines="3-7"} +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "features.alpha.argocdPlugin.enabled=true" \ + --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ + --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ + --set "features.alpha.argocdPlugin.target.externalCluster.enabled=true" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig" +``` + + + + + +```bash {hl_lines="7-11"} +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "features.alpha.argocdPlugin.enabled=true" \ + --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ + --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ + --set "features.alpha.argocdPlugin.target.externalCluster.enabled=true" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig" \ + --wait +``` + + + + + +```bash +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "features.alpha.argocdPlugin.enabled=true" \ + --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ + --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ + --set "features.alpha.argocdPlugin.target.externalCluster.enabled=true" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig" \ + --wait +``` + +The extra flags are: + +- `features.alpha.argocdPlugin.target.externalCluster.enabled=true` +- `features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true` +- `features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster` +- `features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig` + +These flags tell the plugin (running in Spaces) where your Argo CD instance is. After you've done this at install-time, you also need to create a `Secret` on the Spaces cluster. This secret must contain a kubeconfig pointing to your Argo CD instance. The secret needs to be in the same namespace as the `spaces-controller`, which is `upbound-system`. + +Once you enable the plugin and configure it, the plugin automatically propagates connection details for your control planes to your Argo CD instance. You can then target the control plane and use Argo to sync Crossplane-related objects to it. + +Be sure to [configure Argo][configure-argo-1] after it's installed. + +## Configure Argo + +Argo's default configuration causes it to try to query for resource kinds that don't exist in control planes. You should configure Argo's [general configmap][general-configmap] to include the resource group/kinds which make sense in the context of control planes. example, the concept of `nodes` isn't exposed in control planes. + +To configure Argo CD, connect to the cluster where you've installed it and edit the configmap: + +```bash +kubectl edit configmap argocd-cm -n argocd +``` + +Adjust the resource inclusions and exclusions under the `data` field of the configmap: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-cm + namespace: argocd +data: + resource.exclusions: | + - apiGroups: + - "*" + kinds: + - "*" + clusters: + - "*" + resource.inclusions: | + - apiGroups: + - "*" + kinds: + - Provider + - Configuration + clusters: + - "*" +``` + +The preceding configuration causes Argo to exclude syncing **all** resource group/kinds--except Crossplane `providers` and `configurations`--for **all** control planes. You're encouraged to adjust the `resource.inclusions` to include the types that make sense for your control plane, such as an `XRD` you've built with Crossplane. You're also encouraged to customize the `clusters` pattern to selectively apply these exclusions/inclusions to control planes (for example, `control-plane-prod-*`). + +## Control plane connection secrets + +To deploy control planes through Argo CD, you need to configure the `writeConnectionSecretToRef` field in your control plane spec. This field specifies where to store the control plane's `kubeconfig` and makes connection details available to Argo CD. + +### Basic Configuration + +In your control plane manifest, include the `writeConnectionSecretToRef` field: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: my-control-plane + namespace: my-control-plane-group +spec: + writeConnectionSecretToRef: + name: kubeconfig-my-control-plane + namespace: my-control-plane-group + # ... other control plane configuration +``` + +### Parameters + +The `writeConnectionSecretToRef` field requires two parameters: + +- `name`: A unique name for the secret containing the kubeconfig (`kubeconfig-my-control-plane`) +- `namespace`: The Kubernetes namespace where you store the secret, which must match the metadata namespace. The system copies it into the `argocd` namespace when you set the `features.alpha.argocdPlugin.target.secretNamespace=argocd` configuration parameter. + +Control plane labels automatically propagate to the connection secret, which allows you to use label selectors in Argo CD for automated discovery and management. + +This configuration enables Argo CD to automatically discover and manage resources on your control planes. + + +[gitops-with-control-planes]: /spaces/howtos/cloud-spaces/gitops +[configure-argo]: #configure-argo +[configure-argo-1]: #configure-argo +[general-configmap]: https://argo-cd.readthedocs.io/en/stable/operator-manual/argocd-cm-yaml/ diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/workload-id/_category_.json b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/workload-id/_category_.json new file mode 100644 index 000000000..c5ecc93f6 --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/workload-id/_category_.json @@ -0,0 +1,11 @@ +{ + "label": "Workload Identity Configuration", + "position": 2, + "collapsed": true, + "customProps": { + "plan": "business" + } + +} + + diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/workload-id/backup-restore-config.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/workload-id/backup-restore-config.md new file mode 100644 index 000000000..935ca69ec --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/workload-id/backup-restore-config.md @@ -0,0 +1,384 @@ +--- +title: Backup and Restore Workload ID +weight: 1 +description: Configure workload identity for Spaces Backup and Restore +--- +import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; + + + + + + + +Workload-identity authentication lets you use access policies to grant temporary +AWS credentials to your Kubernetes pod with a service account. Assigning IAM roles and service accounts allows the pod to assume the IAM role dynamically and much more securely than static credentials. + +This guide walks you through creating an IAM trust role policy and applying it +to your EKS cluster to handle backup and restore storage. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary Azure credentials to your Kubernetes pod based on +a service account. Assigning managed identities and service accounts allows the pod to +authenticate with Azure resources dynamically and much more securely than static credentials. + +This guide walks you through creating a managed identity and federated credential for your AKS +cluster to handle backup and restore storage. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary GCP credentials to your Kubernetes pod based on +a service account. Assigning IAM roles and service accounts allows the pod to +access cloud resources dynamically and much more securely than static credentials. + +This guide walks you through configuring workload identity for your GKE +cluster to handle backup and restore storage. + + + +## Prerequisites + + +To set up a workload-identity, you'll need: + + +- A self-hosted Space cluster +- Administrator access in your cloud provider +- Helm and `kubectl` + +## About the backup and restore component + +The `mxp-controller` component handles backup and restore workloads. It needs to +access your cloud storage to store and retrieve backups. By default, this +component runs in each control plane's host namespace. + +## Configuration + + + +Upbound supports workload-identity configurations in AWS with IAM Roles for +Service Accounts and EKS pod identity association. + +#### IAM Roles for Service Accounts (IRSA) + +With IRSA, you can associate a Kubernetes service account in an EKS cluster with +an AWS IAM role. Upbound authenticates workloads with that service account as +the IAM role using temporary credentials instead of static role credentials. +IRSA relies on AWS `AssumeRoleWithWebIdentity` `STS` to exchange OIDC ID tokens with +the IAM role's temporary credentials. IRSA uses the `eks.amazon.aws/role-arn` +annotation to link the service account and the IAM role. + +First, create an IAM role with appropriate permissions to access your S3 bucket: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:ListBucket", + "s3:DeleteObject" + ], + "Resource": [ + "arn:aws:s3:::${YOUR_BACKUP_BUCKET}", + "arn:aws:s3:::${YOUR_BACKUP_BUCKET}/*" + ] + } + ] +} +``` + +Next, ensure your EKS cluster has an OIDC identity provider: + +```shell +eksctl utils associate-iam-oidc-provider --cluster ${YOUR_CLUSTER_NAME} --approve +``` + +Configure the IAM role trust policy with the namespace for each +provisioned control plane. + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:oidc-provider/${YOUR_OIDC_PROVIDER}" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + "${YOUR_OIDC_PROVIDER}:aud": "sts.amazonaws.com", + "${YOUR_OIDC_PROVIDER}:sub": "system:serviceaccount:${YOUR_NAMESPACE}:mxp-controller" + } + } + } + ] +} +``` + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the Backup and Restore component: + +```shell +--set controlPlanes.mxpController.serviceAccount.annotations."eks\.amazonaws\.com/role-arn"="${SPACES_BR_IAM_ROLE_ARN}" +``` + +This command allows the backup and restore component to authenticate with your +dedicated IAM role in your EKS cluster environment. + +#### EKS pod identities + +Upbound also supports EKS Pod Identity configuration. EKS Pod Identities allow +you to create a pod identity association with your Kubernetes namespace, a +service account, and an IAM role, which allows the EKS control plane to +automatically handle the credential exchange. + +First, create an IAM role with appropriate permissions to access your S3 bucket: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:ListBucket", + "s3:DeleteObject" + ], + "Resource": [ + "arn:aws:s3:::${YOUR_BACKUP_BUCKET}", + "arn:aws:s3:::${YOUR_BACKUP_BUCKET}/*" + ] + } + ] +} +``` + +When you install or upgrade your Space with Helm, add the backup/restore values: + +```shell +helm upgrade spaces spaces-helm-chart \ + --set "billing.enabled=true" \ + --set "backup.enabled=true" \ + --set "backup.storage.provider=aws" \ + --set "backup.storage.aws.region= ${YOUR_AWS_REGION}" \ + --set "backup.storage.aws.bucket= ${YOUR_BACKUP_BUCKET}" +``` + +After Upbound provisions your control plane, create a Pod Identity Association +with the `aws` CLI: + +```shell +aws eks create-pod-identity-association \ + --cluster-name ${YOUR_CLUSTER_NAME} \ + --namespace ${YOUR_CONTROL_PLANE_NAMESPACE} \ + --service-account mxp-controller \ + --role-arn arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/backup-restore-role +``` + + + + + +Upbound supports workload-identity configurations in Azure with Azure's built-in +workload identity feature. + +#### Prepare your cluster + +First, enable the OIDC issuer and workload identity in your AKS cluster: + +```shell +az aks update --resource-group ${YOUR_RESOURCE_GROUP} --name ${YOUR_AKS_CLUSTER_NAME} --enable-oidc-issuer --enable-workload-identity +``` + +Next, find and store the OIDC issuer URL as an environment variable: + +```shell +export AKS_OIDC_ISSUER="$(az aks show --name ${YOUR_AKS_CLUSTER_NAME} --resource-group ${YOUR_RESOURCE_GROUP} --query "oidcIssuerProfile.issuerUrl" --output tsv)" +``` + +#### Create a User-Assigned Managed Identity + +Create a new managed identity to associate with the backup and restore component: + +```shell +az identity create --name backup-restore-identity --resource-group ${YOUR_RESOURCE_GROUP} --location ${YOUR_LOCATION} +``` + +Retrieve the client ID and store it as an environment variable: + +```shell +export USER_ASSIGNED_CLIENT_ID="$(az identity show --name backup-restore-identity --resource-group ${YOUR_RESOURCE_GROUP} --query clientId -otsv)" +``` + +Grant the managed identity you created to access your Azure Storage account: + +```shell +az role assignment create \ + --role "Storage Blob Data Contributor" \ + --assignee ${USER_ASSIGNED_CLIENT_ID} \ + --scope /subscriptions/${YOUR_SUBSCRIPTION_ID}/resourceGroups/${YOUR_RESOURCE_GROUP}/providers/Microsoft.Storage/storageAccounts/${YOUR_STORAGE_ACCOUNT} +``` + +#### Apply the managed identity role + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the backup and restore component: + +```shell +--set controlPlanes.mxpController.serviceAccount.annotations."azure\.workload\.identity/client-id"="${YOUR_USER_ASSIGNED_CLIENT_ID}" +--set controlPlanes.mxpController.pod.customLabels."azure\.workload\.identity/use"="true" +``` + +#### Create a Federated Identity credential + +```shell +az identity federated-credential create \ + --name backup-restore-federated-identity \ + --identity-name backup-restore-identity \ + --resource-group ${YOUR_RESOURCE_GROUP} \ + --issuer ${AKS_OIDC_ISSUER} \ + --subject system:serviceaccount:${YOUR_CONTROL_PLANE_NAMESPACE}:mxp-controller +``` + + + + + +Upbound supports workload-identity configurations in GCP with IAM principal +identifiers and service account impersonation. + +#### Prepare your cluster + +First, enable Workload Identity Federation on your GKE cluster: + +```shell +gcloud container clusters update ${YOUR_CLUSTER_NAME} \ + --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ + --region=${YOUR_REGION} +``` + +#### Create a Google Service Account + +Create a service account for the backup and restore component: + +```shell +gcloud iam service-accounts create backup-restore-sa \ + --display-name "Backup Restore Service Account" \ + --project ${YOUR_PROJECT_ID} +``` + +Grant the service account access to your Google Cloud Storage bucket: + +```shell +gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ + --member "serviceAccount:backup-restore-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" \ + --role "roles/storage.objectAdmin" +``` + +#### Configure Workload Identity + +Create an IAM binding to grant the Kubernetes service account access to the Google service account: + +```shell +gcloud iam service-accounts add-iam-policy-binding \ + backup-restore-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com \ + --role roles/iam.workloadIdentityUser \ + --member "serviceAccount:${YOUR_PROJECT_ID}.svc.id.goog[${YOUR_CONTROL_PLANE_NAMESPACE}/mxp-controller]" +``` + +#### Apply the service account configuration + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the backup and restore component: + +```shell +--set controlPlanes.mxpController.serviceAccount.annotations."iam\.gke\.io/gcp-service-account"="backup-restore-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" +``` + + + +## Verify your configuration + +After you apply the configuration use `kubectl` to verify the service account +has the correct annotation: + +```shell +kubectl get serviceaccount mxp-controller -n ${YOUR_CONTROL_PLANE_NAMESPACE} -o yaml +``` + +Verify the `mxp-controller` pod is running: + +```shell +kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep mxp-controller +``` + +## Restart workload + +You must manually restart a workload's pod when you add the workload identity annotations to the running pod's service account. + + + +This restart enables the EKS pod identity webhook to inject the necessary +environment for using IRSA. + + + + + +This restart enables the workload identity webhook to inject the necessary +environment for using Azure workload identity. + + + + + +This restart enables the workload identity webhook to inject the necessary +environment for using GCP workload identity. + + + +```shell +kubectl rollout restart deployment mxp-controller -n ${YOUR_CONTROL_PLANE_NAMESPACE} +``` + +## Use cases + + +Configuring backup and restore with workload identity eliminates the need for +static credentials in your cluster and the overhead of credential rotation. +These benefits are helpful in: + +* Disaster recovery scenarios +* Control plane migration +* Compliance requirements +* Rollbacks after unsuccessful upgrades + +## Next steps + +Now that you have a workload identity configured for the backup and restore +component, visit the [Backup Configuration][backup-restore-guide] documentation. + +Other workload identity guides are: +* [Billing][billing] +* [Shared Secrets][secrets] + +[backup-restore-guide]: /spaces/howtos/backup-and-restore +[billing]: /spaces/howtos/self-hosted/workload-id/billing-config +[secrets]: /spaces/howtos/self-hosted/workload-id/eso-config diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/workload-id/billing-config.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/workload-id/billing-config.md new file mode 100644 index 000000000..323a6122f --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/workload-id/billing-config.md @@ -0,0 +1,454 @@ +--- +title: Billing Workload ID +weight: 1 +description: Configure workload identity for Spaces Billing +--- +import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; + + + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary AWS credentials to your Kubernetes pod based on +a service account. Assigning IAM roles and service accounts allows the pod to +assume the IAM role dynamically and much more securely than static credentials. + +This guide walks you through creating an IAM trust role policy and applying it to your EKS +cluster for billing in your Space cluster. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary Azure credentials to your Kubernetes pod based on +a service account. Assigning managed identities and service accounts allows the pod to +authenticate with Azure resources dynamically and much more securely than static credentials. + +This guide walks you through creating a managed identity and federated credential for your AKS +cluster for billing in your Space cluster. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary GCP credentials to your Kubernetes pod based on +a service account. Assigning IAM roles and service accounts allows the pod to +access cloud resources dynamically and much more securely than static +credentials. + +This guide walks you through configuring workload identity for your GKE +cluster's billing component. + + + +## Prerequisites + + +To set up a workload-identity, you'll need: + + +- A self-hosted Space cluster +- Administrator access in your cloud provider +- Helm and `kubectl` + +## About the billing component + +The `vector.dev` component handles billing metrics collection in spaces. It +stores account data in your cloud storage. By default, this component runs in +each control plane's host namespace. + +## Configuration + + + +Upbound supports workload-identity configurations in AWS with IAM Roles for +Service Accounts and EKS pod identity association. + +#### IAM Roles for Service Accounts (IRSA) + +With IRSA, you can associate a Kubernetes service account in an EKS cluster with +an AWS IAM role. Upbound authenticates workloads with that service account as +the IAM role using temporary credentials instead of static role credentials. +IRSA relies on AWS `AssumeRoleWithWebIdentity` `STS` to exchange OIDC ID tokens with +the IAM role's temporary credentials. IRSA uses the `eks.amazon.aws/role-arn` +annotation to link the service account and the IAM role. + +**Create an IAM role and trust policy** + +First, create an IAM role appropriate permissions to access your S3 bucket: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:ListBucket", + "s3:DeleteObject" + ], + "Resource": [ + "arn:aws:s3:::${YOUR_BILLING_BUCKET}", + "arn:aws:s3:::${YOUR_BILLING_BUCKET}/*" + ] + } + ] +} +``` + +You must configure the IAM role trust policy with the exact match for each +provisioned control plane. An example of a trust policy for a single control +plane is below: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:oidc-provider/${YOUR_OIDC_PROVIDER}" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + ":aud": "sts.amazonaws.com", + ":sub": "system:serviceaccount:${YOUR_NAMESPACE}:vector" + } + } + } + ] +} +``` + +**Configure the EKS OIDC provider** + +Next, ensure your EKS cluster has an OIDC identity provider: + +```shell +eksctl utils associate-iam-oidc-provider --cluster ${YOUR_CLUSTER_NAME} --approve +``` + +**Apply the IAM role** + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the Billing component: + +```shell +--set "billing.enabled=true" +--set "billing.storage.provider=aws" +--set "billing.storage.aws.region=${YOUR_AWS_REGION}" +--set "billing.storage.aws.bucket=${YOUR_BILLING_BUCKET}" +--set "billing.storage.secretRef.name=" +--set controlPlanes.vector.serviceAccount.customAnnotations."eks\.amazonaws\.com/role-arn"="arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_BILLING_ROLE_NAME}" +``` + +:::important +You **must** set the `billing.storage.secretRef.name` to an empty string to +enable workload identity for the billing component +::: + +#### EKS pod identities + +Upbound also supports EKS Pod Identity configuration. EKS Pod Identities allow +you to create a pod identity association with your Kubernetes namespace, a +service account, and an IAM role, which allows the EKS control plane to +automatically handle the credential exchange. + +**Create an IAM role** + +First, create an IAM role appropriate permissions to access your S3 bucket: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:ListBucket" + ], + "Resource": [ + "arn:aws:s3:::${YOUR_BILLING_BUCKET}", + "arn:aws:s3:::${YOUR_BILLING_BUCKET}/*" + ] + } + ] +} +``` + +**Configure your Space with Helm** + +When you install or upgrade your Space with Helm, add the billing values: + +```shell +helm upgrade spaces spaces-helm-chart \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=aws" \ + --set "billing.storage.aws.region=${YOUR_AWS_REGION}" \ + --set "billing.storage.aws.bucket=${YOUR_BILLING_BUCKET}" \ + --set "billing.storage.secretRef.name=" +``` + +**Create a Pod Identity Association** + +After Upbound provisions your control plane, create a Pod Identity Association +with the `aws` CLI: + +```shell +aws eks create-pod-identity-association \ + --cluster-name ${YOUR_CLUSTER_NAME} \ + --namespace ${YOUR_CONTROL_PLANE_NAMESPACE} \ + --service-account vector \ + --role-arn arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_BILLING_ROLE_NAME} +``` + + + + + +Upbound supports workload-identity configurations in Azure with Azure's built-in +workload identity feature. + +First, enable the OIDC issuer and workload identity in your AKS cluster: + +```shell +az aks update --resource-group ${YOUR_RESOURCE_GROUP} --name ${YOUR_AKS_CLUSTER_NAME} --enable-oidc-issuer --enable-workload-identity +``` + +Next, find and store the OIDC issuer URL as an environment variable: + +```shell +export AKS_OIDC_ISSUER="$(az aks show --name ${YOUR_AKS_CLUSTER_NAME} --resource-group ${YOUR_RESOURCE_GROUP} --query "oidcIssuerProfile.issuerUrl" --output tsv)" +``` + +Create a new managed identity to associate with the billing component: + +```shell +az identity create --name billing-identity --resource-group ${YOUR_RESOURCE_GROUP} --location ${YOUR_LOCATION} +``` + +Retrieve the client ID and store it as an environment variable: + +```shell +export USER_ASSIGNED_CLIENT_ID="$(az identity show --name billing-identity --resource-group ${YOUR_RESOURCE_GROUP} --query clientId -otsv)" +``` + +Grant the managed identity you created to access your Azure Storage account: + +```shell +az role assignment create --role "Storage Blob Data Contributor" --assignee $USER_ASSIGNED_CLIENT_ID --scope /subscriptions/${YOUR_SUBSCRIPTION_ID}/resourceGroups/${YOUR_RESOURCE_GROUP}/providers/Microsoft.Storage/storageAccounts/${YOUR_STORAGE_ACCOUNT} +``` + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the billing component: + +```shell +--set "billing.enabled=true" +--set "billing.storage.provider=azure" +--set "billing.storage.azure.storageAccount=${SPACES_BILLING_STORAGE_ACCOUNT}" +--set "billing.storage.azure.container=${SPACES_BILLING_STORAGE_CONTAINER}" +--set "billing.storage.secretRef.name=" +--set controlPlanes.vector.serviceAccount.customAnnotations."azure\.workload\.identity/client-id"="${SPACES_BILLING_APP_ID}" +--set controlPlanes.vector.pod.customLabels."azure\.workload\.identity/use"="true" +``` + +Create a federated credential to establish trust between the managed identity +and your AKS OIDC provider: + +```shell +az identity federated-credential create \ + --name billing-federated-identity \ + --identity-name billing-identity \ + --resource-group ${YOUR_RESOURCE_GROUP} \ + --issuer ${AKS_OIDC_ISSUER} \ + --subject system:serviceaccount:${YOUR_CONTROL_PLANE_NAMESPACE}:vector +``` + + + + + +Upbound supports workload-identity configurations in GCP with IAM principal +identifiers or service account impersonation. + +#### IAM principal identifiers + +IAM principal identifiers allow you to grant permissions directly to +Kubernetes service accounts without additional annotation. Upbound recommends +this approach for ease-of-use and flexibility. + +First, enable Workload Identity Federation on your GKE cluster: + +```shell +gcloud container clusters update ${YOUR_CLUSTER_NAME} \ + --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ + --region=${YOUR_REGION} +``` + +Next, configure your Spaces installation with the Spaces Helm chart parameters: + +```shell +--set "billing.enabled=true" +--set "billing.storage.provider=gcp" +--set "billing.storage.gcp.bucket=${YOUR_BILLING_BUCKET}" +--set "billing.storage.secretRef.name=" +``` + +:::important +You **must** set the `billing.storage.secretRef.name` to an empty string to +enable workload identity for the billing component. +::: + +Grant the necessary permissions to your Kubernetes service account: + +```shell +gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ + --member="principalSet://iam.googleapis.com/projects/${YOUR_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${YOUR_PROJECT_ID}.svc.id.goog/attribute.kubernetes_namespace/${YOUR_CONTROL_PLANE_NAMESPACE}/attribute.kubernetes_service_account/vector" \ + --role="roles/storage.objectAdmin" +``` + +Enable uniform bucket-level access on your storage bucket: + +```shell +gcloud storage buckets update gs://${YOUR_BILLING_BUCKET} --uniform-bucket-level-access +``` + +#### Service account impersonation + +Service account impersonation allows you to link a Kubernetes service account to +a GCP service account. The Kubernetes service account assumes the permissions of +the GCP service account you specify. + +Enable workload id federation on your GKE cluster: + +```shell +gcloud container clusters update ${YOUR_CLUSTER_NAME} \ + --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ + --region=${YOUR_REGION} +``` + +Next, create a dedicated service account for your billing operations: + +```shell +gcloud iam service-accounts create billing-sa \ + --project=${YOUR_PROJECT_ID} +``` + +Grant storage permissions to the service account you created: + +```shell +gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ + --member="serviceAccount:billing-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" \ + --role="roles/storage.objectAdmin" +``` + +Link the Kubernetes service account to the GCP service account: + +```shell +gcloud iam service-accounts add-iam-policy-binding \ + billing-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com \ + --role="roles/iam.workloadIdentityUser" \ + --member="serviceAccount:${YOUR_PROJECT_ID}.svc.id.goog[${YOUR_CONTROL_PLANE_NAMESPACE}/vector]" +``` + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the billing component: + +```shell +--set "billing.enabled=true" +--set "billing.storage.provider=gcp" +--set "billing.storage.gcp.bucket=${YOUR_BILLING_BUCKET}" +--set "billing.storage.secretRef.name=" +--set controlPlanes.vector.serviceAccount.customAnnotations."iam\.gke\.io/gcp-service-account"="billing-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" +``` + + + +## Verify your configuration + +After you apply the configuration use `kubectl` to verify the service account +has the correct annotation: + +```shell +kubectl get serviceaccount vector -n ${YOUR_CONTROL_PLANE_NAMESPACE} -o yaml +``` + +Verify the `vector` pod is running: + +```shell +kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep vector +``` + +## Restart workload + + + +You must manually restart a workload's pod when you add the +`eks.amazonaws.com/role-arn key` annotation to the running pod's service +account. + +This restart enables the EKS pod identity webhook to inject the necessary +environment for using IRSA. + + + + + +You must manually restart a workload's pod when you add the workload identity annotations to the running pod's service account. + +This restart enables the workload identity webhook to inject the necessary +environment for using Azure workload identity. + + + + + +GCP workload identity doesn't require pod restarts after configuration changes. +If you do need to restart the workload, use the `kubectl` command to force the +component restart: + + + +```shell +kubectl rollout restart deployment vector +``` + + +## Use cases + + +Using workload identity authentication for billing eliminates the need for static +credentials in your cluster as well as the overhead of credential rotation. +These benefits are helpful in: + +* Resource usage tracking across teams/projects +* Cost allocation for multi-tenant environments +* Financial auditing requirements +* Capacity billing and resource optimization +* Automated billing workflows + +## Next steps + +Now that you have workload identity configured for the billing component, visit +the [Billing guide][billing-guide] for more information. + +Other workload identity guides are: +* [Backup and restore][backuprestore] +* [Shared Secrets][secrets] + +[billing-guide]: /spaces/howtos/self-hosted/billing +[backuprestore]: /spaces/howtos/self-hosted/workload-id/backup-restore-config +[secrets]: /spaces/howtos/self-hosted/workload-id/eso-config diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/workload-id/eso-config.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/workload-id/eso-config.md new file mode 100644 index 000000000..c1418c171 --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/workload-id/eso-config.md @@ -0,0 +1,503 @@ +--- +title: Shared Secrets Workload ID +weight: 1 +description: Configure workload identity for Spaces Shared Secrets +--- +import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; + + + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary AWS credentials to your Kubernetes pod based on +a service account. Assigning IAM roles and service accounts allows the pod to +assume the IAM role dynamically and much more securely than static credentials. + +This guide walks you through creating an IAM trust role policy and applying it to your EKS +cluster for secret sharing with Kubernetes. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary Azure credentials to your Kubernetes pod based on +a service account. Assigning managed identities and service accounts allows the pod to +authenticate with Azure resources dynamically and much more securely than static credentials. + +This guide walks you through creating a managed identity and federated credential for your AKS +cluster for shared secrets in your Space cluster. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary GCP credentials to your Kubernetes pod based on +a service account. Assigning IAM roles and service accounts allows the pod to +access cloud resources dynamically and much more securely than static +credentials. + +This guide walks you through configuring workload identity for your GKE +cluster's Shared Secrets component. + + + +## Prerequisites + + +To set up a workload-identity, you'll need: + + +- A self-hosted Space cluster +- Administrator access in your cloud provider +- Helm and `kubectl` + + +## About the Shared Secrets component + + + + +The External Secrets Operator (ESO) runs in each control plane's host namespace as `external-secrets-controller`. It needs to access +your external secrets management service like AWS Secrets Manager. + +To configure your shared secrets workflow controller, you must: + +* Annotate the Kubernetes service account to associate it with a cloud-side + principal (such as an IAM role, service account, or enterprise application). The workload must then + use this service account. +* Label the workload (pod) to allow the injection of a temporary credential set, + enabling authentication. + + + + + +The External Secrets Operator (ESO) component runs in each control plane's host +namespace as `external-secrets-controller`. It synchronizes secrets from +external APIs into Kubernetes secrets. Shared secrets allow you to manage +credentials outside your Kubernetes cluster while making them available to your +application + + + + + +The External Secrets Operator (ESO) component runs in each control plane's host +namespace as `external-secrets-controller`. It synchronizes secrets from +external APIs into Kubernetes secrets. Shared secrets allow you to manage +credentials outside your Kubernetes cluster while making them available to your +application + + + +## Configuration + + + +Upbound supports workload-identity configurations in AWS with IAM Roles for +Service Accounts or EKS pod identity association. + +#### IAM Roles for Service Accounts (IRSA) + +With IRSA, you can associate a Kubernetes service account in an EKS cluster with +an AWS IAM role. Upbound authenticates workloads with that service account as +the IAM role using temporary credentials instead of static role credentials. +IRSA relies on AWS `AssumeRoleWithWebIdentity` `STS` to exchange OIDC ID tokens with +the IAM role's temporary credentials. IRSA uses the `eks.amazon.aws/role-arn` +annotation to link the service account and the IAM role. + +**Create an IAM role and trust policy** + +First, create an IAM role with appropriate permissions to access AWS Secrets Manager: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "secretsmanager:GetSecretValue", + "secretsmanager:DescribeSecret", + "ssm:GetParameter" + ], + "Resource": [ + "arn:aws:secretsmanager:${YOUR_REGION}:${YOUR_AWS_ACCOUNT_ID}:secret:${YOUR_SECRET_PREFIX}*", + "arn:aws:ssm:${YOUR_REGION}:${YOUR_AWS_ACCOUNT_ID}:parameter/${YOUR_PARAMETER_PREFIX}*" + ] + } + ] +} +``` + +You must configure the IAM role trust policy with the exact match for each +provisioned control plane. An example of a trust policy for a single control +plane is below: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:oidc-provider/${YOUR_OIDC_PROVIDER}" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + ":aud": "sts.amazonaws.com" + }, + "StringLike": { + ":sub": "system:serviceaccount:*:external-secrets-controller" + } + } + } + ] +} +``` + +**Configure the EKS OIDC provider** + +Next, ensure your EKS cluster has an OIDC identity provider: + +```shell +eksctl utils associate-iam-oidc-provider --cluster ${YOUR_CLUSTER_NAME} --approve +``` + +**Apply the IAM role** + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the shared secrets component: + +```yaml +--set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."eks\.amazonaws\.com/role-arn"="arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_ESO_ROLE_NAME}" +``` + +This command allows the shared secrets component to authenticate with your +dedicated IAM role in your EKS cluster environment. + +#### EKS pod identities + +Upbound also supports EKS Pod Identity configuration. EKS Pod Identities allow +you to create a pod identity association with your Kubernetes namespace, a +service account, and an IAM role, which allows the EKS control plane to +automatically handle the credential exchange. + +**Create an IAM role** + +First, create an IAM role with appropriate permissions to access AWS Secrets Manager: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "secretsmanager:GetSecretValue", + "secretsmanager:DescribeSecret", + "ssm:GetParameter" + ], + "Resource": [ + "arn:aws:secretsmanager:${YOUR_AWS_REGION}:${YOUR_AWS_ACCOUNT_ID}:secret:${YOUR_SECRET_PREFIX}*", + "arn:aws:ssm:${YOUR_AWS_REGION}:${YOUR_AWS_ACCOUNT_ID}:parameter/${YOUR_PARAMETER_PREFIX}*" + ] + } + ] +} +``` + +**Configure your Space with Helm** + +When you install or upgrade your Space with Helm, add the shared secrets value: + +```shell +helm upgrade spaces spaces-helm-chart \ + --set "sharedSecrets.enabled=true" +``` + +**Create a Pod Identity Association** + +After Upbound provisions your control plane, create a Pod Identity Association +with the `aws` CLI: + +```shell +aws eks create-pod-identity-association \ + --cluster-name ${YOUR_CLUSTER_NAME} \ + --namespace ${YOUR_CONTROL_PLANE_NAMESPACE} \ + --service-account external-secrets-controller \ + --role-arn arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_ROLE_NAME} +``` + + + + + +Upbound supports workload-identity configurations in Azure with Azure's built-in +workload identity feature. + +First, enable the OIDC issuer and workload identity in your AKS cluster: + +```shell +az aks update --resource-group ${YOUR_RESOURCE_GROUP} --name ${YOUR_AKS_CLUSTER_NAME} --enable-oidc-issuer --enable-workload-identity +``` + +Next, find and store the OIDC issuer URL as an environment variable: + +```shell +export AKS_OIDC_ISSUER="$(az aks show --name ${YOUR_AKS_CLUSTER_NAME} --resource-group ${YOUR_RESOURCE_GROUP} --query "oidcIssuerProfile.issuerUrl" --output tsv)" +``` + +Create a new managed identity to associate with the shared secrets component: + +```shell +az identity create --name secrets-identity --resource-group ${YOUR_RESOURCE_GROUP} --location ${YOUR_LOCATION} +``` + +Retrieve the client ID and store it as an environment variable: + +```shell +export USER_ASSIGNED_CLIENT_ID="$(az identity show --name secrets-identity --resource-group ${YOUR_RESOURCE_GROUP} --query clientId -otsv)" +``` + +Grant the managed identity you created to access your Azure Storage account: + +```shell +az keyvault set-policy --name ${YOUR_KEY_VAULT_NAME} \ + --resource-group ${YOUR_RESOURCE_GROUP} \ + --object-id $(az identity show --name secrets-identity --resource-group ${YOUR_RESOURCE_GROUP} --query principalId -otsv) \ + --secret-permissions get list +``` + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the shared secrets component: + +```shell +--set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."azure\.workload\.identity/client-id"="${USER_ASSIGNED_CLIENT_ID}" +--set controlPlanes.sharedSecrets.pod.customLabels."azure\.workload\.identity/use"="true" +``` + +Next, create a federated credential to establish trust between the managed identity +and your AKS OIDC provider: + +```shell +az identity federated-credential create \ + --name secrets-federated-identity \ + --identity-name secrets-identity \ + --resource-group ${YOUR_RESOURCE_GROUP} \ + --issuer ${AKS_OIDC_ISSUER} \ + --subject system:serviceaccount:${YOUR_CONTROL_PLANE_NAMESPACE}:external-secrets-controller +``` + + + + + +Upbound supports workload-identity configurations in GCP with IAM principal +identifiers or service account impersonation. + +#### IAM principal identifiers + +IAM principal identifiers allow you to grant permissions directly to +Kubernetes service accounts without additional annotation. Upbound recommends +this approach for ease-of-use and flexibility. + +First, enable Workload Identity Federation on your GKE cluster: + +```shell +gcloud container clusters update ${YOUR_CLUSTER_NAME} \ + --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ + --region=${YOUR_REGION} +``` + +Next, grant the necessary permissions to your Kubernetes service account: + +```shell +gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ + --member="principalSet://iam.googleapis.com/projects/${YOUR_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${YOUR_PROJECT_ID}.svc.id.goog/attribute.kubernetes_namespace/${YOUR_CONTROL_PLANE_NAMESPACE}/attribute.kubernetes_service_account/external-secrets-controller" \ + --role="roles/secretmanager.secretAccessor" +``` + +#### Service account impersonation + +Service account impersonation allows you to link a Kubernetes service account to +a GCP service account. The Kubernetes service account assumes the permissions of +the GCP service account you specify. + +Enable workload id federation on your GKE cluster: + +```shell +gcloud container clusters update ${YOUR_CLUSTER_NAME} \ + --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ + --region=${YOUR_REGION} +``` + +Next, create a dedicated service account for your secrets operations: + +```shell +gcloud iam service-accounts create secrets-sa \ + --project=${YOUR_PROJECT_ID} +``` + +Grant secret access permissions to the service account you created: + +```shell +gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ + --member="serviceAccount:secrets-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" \ + --role="roles/secretmanager.secretAccessor" +``` + +Link the Kubernetes service account to the GCP service account: + +```shell +gcloud iam service-accounts add-iam-policy-binding \ + secrets-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com \ + --role="roles/iam.workloadIdentityUser" \ + --member="serviceAccount:${YOUR_PROJECT_ID}.svc.id.goog[${YOUR_CONTROL_PLANE_NAMESPACE}/external-secrets-controller]" +``` + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the shared secrets component: + +```shell +--set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."iam\.gke\.io/gcp-service-account"="secrets-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" +``` + + + +## Verify your configuration + +After you apply the configuration use `kubectl` to verify the service account +has the correct annotation: + +```shell +kubectl get serviceaccount external-secrets-controller -n ${YOUR_CONTROL_PLANE_NAMESPACE} -o yaml +``` + + + +Verify the `external-secrets` pod is running correctly: + +```shell +kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep external-secrets +``` + + + + + +Verify the External Secrets Operator pod is running correctly: + +```shell +kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep external-secrets +``` + + + + + +Verify the `external-secrets` pod is running correctly: + +```shell +kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep external-secrets +``` + + + +## Restart workload + + + +You must manually restart a workload's pod when you add the +`eks.amazonaws.com/role-arn key` annotation to the running pod's service +account. + +This restart enables the EKS pod identity webhook to inject the necessary +environment for using IRSA. + + + + + +You must manually restart a workload's pod when you add the workload identity annotations to the running pod's service account. + +This restart enables the workload identity webhook to inject the necessary +environment for using Azure workload identity. + + + + + +GCP workload identity doesn't require pod restarts after configuration changes. +If you do need to restart the workload, use the `kubectl` command to force the +component restart: + + + +```shell +kubectl rollout restart deployment external-secrets +``` + +## Use cases + + + + +Shared secrets with workload identity eliminates the need for static credentials +in your cluster. These benefits are particularly helpful in: + +* Secure application credentials management +* Database connection string storage +* API token management +* Compliance with secret rotation security standards +* Multi-environment configuration with centralized secret management + + + + + +Using workload identity authentication for shared secrets eliminates the need for static +credentials in your cluster as well as the overhead of credential rotation. +These benefits are particularly helpful in: + +* Secure application credentials management +* Database connection string storage +* API token management +* Compliance with secret rotation security standards + + + + + +Configuring the external secrets operator with workload identity eliminates the need for +static credentials in your cluster and the overhead of credential rotation. +These benefits are particularly helpful in: + +* Secure application credentials management +* Database connection string storage +* API token management +* Compliance with secret rotation security standards + + + +## Next steps + +Now that you have workload identity configured for the shared secrets component, visit +the [Shared Secrets][eso-guide] guide for more information. + +Other workload identity guides are: +* [Backup and restore][backuprestore] +* [Billing][billing] + +[eso-guide]: /spaces/howtos/secrets-management +[backuprestore]: /spaces/howtos/self-hosted/workload-id/backup-restore-config +[billing]: /spaces/howtos/self-hosted/workload-id/billing-config diff --git a/spaces_versioned_docs/version-v1.14/howtos/simulations.md b/spaces_versioned_docs/version-v1.14/howtos/simulations.md new file mode 100644 index 000000000..26cb0e657 --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/howtos/simulations.md @@ -0,0 +1,110 @@ +--- +title: Simulate changes to your Control Plane Projects +sidebar_position: 100 +description: Use the Up CLI to mock operations before deploying to your environments. +--- + +:::info API Version Information +This guide covers Simulations, available in v1.10+ (GA since v1.13). version-specific availability and features, see the . + +For API specifications on the Simulations CRD, see the [Spaces API Reference](../../../reference/apis/spaces-api/). +::: + +:::important +The Simulations feature is in private preview. more information, [reach out to Upbound][reach-out-to-upbound]. +::: + +Control plane simulations allow you to preview changes to your resources before +applying them to your control planes. Like a plan or dry-run operation, +simulations expose the impact of updates to compositions or claims without +changing your actual resources. + +A control plane simulation creates a temporary copy of your control plane and +returns a preview of the desired changes. The simulation change plan helps you +reduce the risk of unexpected behavior based on your changes. + +## Simulation benefits + +Control planes are dynamic systems that automatically reconcile resources to +match your desired state. Simulations provide visibility into this +reconciliation process by showing: + + +* New resources to create +* Existing resources to change +* Existing resources to delete +* How configuration changes propagate through the system + +These insights are crucial when planning complex changes or upgrading Crossplane +packages. + +## Requirements + +Simulations are available to select customers on Upbound Cloud with Team +Tier or higher. more information, [reach out to Upbound][reach-out-to-upbound-1]. + +## How to simulate your control planes + +Before you start a simulation, build your project and use the `up +project run` command to run your control plane. + +Use the `up project simulate` command with your control plane name to start the +simulation: + +```ini {copy-lines="all"} +up project simulate --complete-after=60s --terminate-on-finish +``` + +The `complete-after` flag determines how long to run the simulation before it completes and calculates the results. Depending on the change, a simulation may not complete within your defined interval leaving unaffected resources as `unchanged`. + +The `terminate-on-finish` flag terminates the simulation after the time +you set - deleting the control plane that ran the simulation. + +At the end of your simulation, your CLI returns: +* A summary of the resources created, modified, or deleted +* Diffs for each resource affected + +## View your simulation in the Upbound Console +You can also view your simulation results in the Upbound Console: + +1. Navigate to your base control plane in the Upbound Console +2. Select the "Simulations" tab in the menu +3. Select a simulation object for a change list of all + resources affected. + +The Console provides visual indications of changes: + +- Created Resources: Marked with green +- Modified Resources: Marked with yellow +- Deleted Resources: Marked with red +- Unchanged Resources: Displayed in gray + +![Upbound Console Simulation](/img/simulations.png) + +## Considerations + +Simulations is a **private preview** feature. + +Be aware of the following limitations: + +- Simulations can't predict the exact behavior of external systems due to the + complexity and non-deterministic reconciliation pattern in Crossplane. + +- The only completion criteria for a simulation is time. Your simulation may not + receive a conclusive result within that interval. Upbound recommends the + default `60s` value. + +- Providers don't run in simulations. Simulations can't compose resources that + rely on the status of Managed Resources. + + +The Upbound team is working to improve these limitations. Your feedback is always appreciated. + +## Next steps + +For more information, follow the [tutorial][tutorial] on Simulations. + + +[tutorial]: /manuals/cli/howtos/simulations +[reach-out-to-upbound]: https://www.upbound.io/contact-us +[reach-out-to-upbound-1]: https://www.upbound.io/contact-us diff --git a/spaces_versioned_docs/version-v1.14/overview/_category_.json b/spaces_versioned_docs/version-v1.14/overview/_category_.json new file mode 100644 index 000000000..54bb16430 --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/overview/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Overview", + "position": 0 +} diff --git a/spaces_versioned_docs/version-v1.14/overview/index.md b/spaces_versioned_docs/version-v1.14/overview/index.md new file mode 100644 index 000000000..7b79f6e44 --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/overview/index.md @@ -0,0 +1,14 @@ +--- +title: Spaces Overview +sidebar_position: 0 +--- + +# Upbound Spaces + +Welcome to the Upbound Spaces documentation. This section contains comprehensive documentation for Spaces API and Spaces operations across all supported versions (v1.9 through v1.15). + +## Get Started + +- **[Concepts](/spaces/concepts/control-planes)** - Core concepts for Spaces +- **[How-To Guides](/spaces/howtos/auto-upgrade)** - Step-by-step guides for operating Spaces +- **[API Reference](/spaces/reference/)** - API specifications and resources diff --git a/spaces_versioned_docs/version-v1.14/reference/_category_.json b/spaces_versioned_docs/version-v1.14/reference/_category_.json new file mode 100644 index 000000000..4a6a139c4 --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/reference/_category_.json @@ -0,0 +1,5 @@ +{ + "label": "Spaces API", + "position": 1, + "collapsed": true +} diff --git a/spaces_versioned_docs/version-v1.14/reference/index.md b/spaces_versioned_docs/version-v1.14/reference/index.md new file mode 100644 index 000000000..5e68b0768 --- /dev/null +++ b/spaces_versioned_docs/version-v1.14/reference/index.md @@ -0,0 +1,72 @@ +--- +title: Spaces API Reference +description: Documentation for the Spaces API resources (v1.15 - Latest) +sidebar_position: 1 +--- +import CrdDocViewer from '@site/src/components/CrdViewer'; + + +This page documents the Custom Resource Definitions (CRDs) for the Spaces API. + + +## Control Planes +### Control Planes + + +## Observability +### Shared Telemetry Configs + + +## `pkg` +### Controller Revisions + + +### Controller Runtime Configs + + +### Controllers + + +### Remote Configuration Revisions + + +### Remote Configurations + + +## Policy +### Shared Upbound Policies + + +## References +### Referenced Objects + + +## Scheduling +### Environments + + +## Secrets +### Shared External Secrets + + +### Shared Secret Stores + + +## Simulations + + +## Spaces Backups +### Backups + + +### Backup Schedules + + +### Shared Backup Configs + + +### Shared Backups + + +### Shared Backup Schedules + diff --git a/spaces_versioned_docs/version-v1.15/concepts/_category_.json b/spaces_versioned_docs/version-v1.15/concepts/_category_.json new file mode 100644 index 000000000..4b8667e29 --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/concepts/_category_.json @@ -0,0 +1,7 @@ +{ + "label": "Concepts", + "position": 2, + "collapsed": true +} + + diff --git a/spaces_versioned_docs/version-v1.15/concepts/control-planes.md b/spaces_versioned_docs/version-v1.15/concepts/control-planes.md new file mode 100644 index 000000000..7066343de --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/concepts/control-planes.md @@ -0,0 +1,227 @@ +--- +title: Control Planes +weight: 1 +description: An overview of control planes in Upbound +--- + + +Control planes in Upbound are fully isolated Crossplane control plane instances that Upbound manages for you. This means: + +- the underlying lifecycle of infrastructure (compute, memory, and storage) required to power your instance. +- scaling of the infrastructure. +- the maintenance of the core Crossplane components that make up a control plane. + +This lets users focus on building their APIs and operating their control planes, while Upbound handles the rest. Each control plane has its own dedicated API server connecting users to their control plane. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). + +For detailed information about Control Plane API specifications and CRD fields, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +## Control plane architecture + +![Managed Control Plane Architecture](/img/mcp.png) + +Along with underlying infrastructure, Upbound manages the Crossplane system components. You don't need to manage the Crossplane API server or core resource controllers because Upbound manages your control plane lifecycle from creation to deletion. + +### Crossplane API + +Each control plane offers a unified endpoint. You interact with your control plane through Kubernetes and Crossplane API calls. Each control plane runs a Kubernetes API server to handle API requests. You can make API calls in the following ways: + +- Direct calls: HTTP/gRPC +- Indirect calls: the up CLI, Kubernetes clients such as kubectl, or the Upbound Console. + +Like in Kubernetes, the API server is the hub for all communication for the control plane. All internal components such as system processes and provider controllers act as clients of the API server. + +Your API requests tell Crossplane your desired state for the resources your control plane manages. Crossplane attempts to constantly maintain that state. Crossplane lets you configure objects in the API either imperatively or declaratively. + +### Crossplane versions and features + +Upbound automatically upgrades Crossplane system components on control planes to new Crossplane versions for updated features and improvements in the open source project. With [automatic upgrades][automatic-upgrades], you choose the cadence that Upbound automatically upgrades the system components in your control plane. You can also choose to manually upgrade your control plane to a different Crossplane version. + +For detailed information on versions and upgrades, refer to the [release notes][release-notes] and the automatic upgrade documentation. If you don't enroll a control plane in a release channel, Upbound doesn't apply automatic upgrades. + +Features considered "alpha" in Crossplane are by default not supported in a control plane unless otherwise specified. + +### Hosting environments + +Every control plane in Upbound belongs to a [control plane group][control-plane-group]. Control plane groups are a logical grouping of one or more control planes with shared objects (such as secrets or backup configuration). Every group resides in a [Space][space] in Upbound, which are hosting environments for control planes. + +Think of a Space as being conceptually the same as an AWS, Azure, or GCP region. Regardless of the Space type you run a control plane in, the core experience is identical. + +## Management + +### Create a control plane + +You can create a new control plane from the Upbound Console, [up CLI][up-cli], or with Kubernetes clients such as `kubectl`. + + + + + +To use the CLI, run the following: + +```shell +up ctp create +``` + +To learn more about control plane-related commands in `up`, go to the [CLI reference][cli-reference] documentation. + + + +You can create and manage control planes declaratively in Upbound. Before you +begin, ensure you're logged into Upbound and set the correct context: + +```bash +up login +# Example: acmeco/upbound-gcp-us-west-1/default +up ctx ${yourOrganization}/${yourSpace}/${yourGroup} +```` + +```yaml +#controlplane-a.yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: controlplane-a +spec: + crossplane: + autoUpgrade: + channel: Rapid +``` + +```bash +kubectl apply -f controlplane-a.yaml +``` + + + + + +### Connect directly to your control plane + +Each control plane offers a unified endpoint. You interact with your control plane through Kubernetes and Crossplane API calls. Each control plane runs a Kubernetes API server to handle API requests. + +You can connect to a control plane's API server directly via the up CLI. Use the [`up ctx`][up-ctx] command to set your kubeconfig's current context to a control plane: + +```shell +# Example: acmeco/upbound-gcp-us-west-1/default/ctp1 +up ctx ${yourOrganization}/${yourSpace}/${yourGroup}/${yourControlPlane} +``` + +To disconnect from your control plane and revert your kubeconfig's current context to the previous entry, run the following: + +```shell +up ctx .. +``` + +You can also generate a `kubeconfig` file for a control plane with [`up ctx -f`][up-ctx-f]. + +```shell +up ctx ${yourOrganization}/${yourSpace}/${yourGroup}/${yourControlPlane} -f - > ctp-kubeconfig.yaml +``` + +:::tip +To learn more about how to use `up ctx` to navigate different contexts in Upbound, read the [CLI documentation][cli-documentation]. +::: + +## Configuration + +When you create a new control plane, Upbound provides you with a fully isolated instance of Crossplane. Configure your control plane by installing packages that extend its capabilities, like to create and manage the lifecycle of new types of infrastructure resources. + +You're encourage to install any available Crossplane package type (Providers, Configurations, Functions) available in the [Upbound Marketplace][upbound-marketplace] on your control planes. + +### Install packages + +Below are a couple ways to install Crossplane packages on your control plane. + + + + + + +Use the `up` CLI to install Crossplane packages from the [Upbound Marketplace][upbound-marketplace-1] on your control planes. Connect directly to your control plane via `up ctx`. Then, to install a provider: + +```shell +up ctp provider install xpkg.upbound.io/upbound/provider-family-aws +``` + +To install a Configuration: + +```shell +up ctp configuration install xpkg.upbound.io/upbound/platform-ref-aws +``` + +To install a Function: + +```shell +up ctp function install xpkg.upbound.io/crossplane-contrib/function-kcl +``` + + +You can use kubectl to directly apply any Crossplane manifest. Below is an example for installing a Crossplane provider: + +```yaml +cat < + + + +For production-grade scenarios, it's recommended you configure your control plane declaratively via Git plus a Continuous Delivery (CD) Engine such as Argo. guidance on this topic, read [GitOps with control planes][gitops-with-control-planes]. + + + + + + +### Configure Crossplane ProviderConfigs + +#### ProviderConfigs with OpenID Connect + +Use OpenID Connect (`OIDC`) to authenticate to Upbound control planes without credentials. OIDC lets your control plane exchange short-lived tokens directly with your cloud provider. Read how to [connect control planes to external services][connect-control-planes-to-external-services] to learn more. + +#### Generic ProviderConfigs + +The Upbound Console doesn't allow direct editing of ProviderConfigs that don't support `Upbound` authentication. To edit these ProviderConfigs on your control plane, connect to the control plane directly by following the instructions in the previous section and using `kubectl`. + +### Configure secrets + +Upbound gives users the ability to configure the synchronization of secrets from external stores into control planes. Configure this capability at the group-level, explained in the [Spaces documentation][spaces-documentation]. + +### Configure backups + +Upbound gives users the ability to configure backup schedules, take impromptu backups, and conduct self-service restore operations. Configure this capability at the group-level, explained in the [Spaces documentation][spaces-documentation-1]. + +### Configure telemetry + + +Upbound gives users the ability to configure the collection of telemetry (logs, metrics, and traces) in their control planes. Using Upbound's built-in [OTEL][otel] support, you can stream this data out to your preferred observability solution. Configure this capability at the group-level, explained in the [Spaces documentation][spaces-documentation-2]. + + + +[automatic-upgrades]: /spaces/howtos/auto-upgrade +[release-notes]: https://github.com/upbound/universal-crossplane/releases +[control-plane-group]: /spaces/concepts/groups +[space]: /spaces/overview +[up-cli]: /reference/cli-reference +[cli-reference]: /reference/cli-reference +[up-ctx]: /reference/cli-reference +[up-ctx-f]: /reference/cli-reference +[cli-documentation]: /manuals/cli/concepts/contexts +[upbound-marketplace]: https://marketplace.upbound.io +[upbound-marketplace-1]: https://marketplace.upbound.io +[gitops-with-control-planes]: /spaces/howtos/cloud-spaces/gitops +[connect-control-planes-to-external-services]: /manuals/platform/howtos/oidc +[spaces-documentation]: /spaces/howtos/secrets-management +[spaces-documentation-1]: /spaces/howtos/backup-and-restore +[otel]: https://otel.com +[spaces-documentation-2]: /spaces/howtos/observability diff --git a/spaces_versioned_docs/version-v1.15/concepts/deployment-modes.md b/spaces_versioned_docs/version-v1.15/concepts/deployment-modes.md new file mode 100644 index 000000000..f5e718f88 --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/concepts/deployment-modes.md @@ -0,0 +1,53 @@ +--- +title: Deployment Modes +sidebar_position: 10 +description: An overview of deployment modes for Spaces +--- + +Upbound Spaces can be deployed and used in a variety of modes: + +- **Cloud Spaces:** Multi-tenant Upbound-hosted, Upbound-managed Space environment. Cloud Spaces provide a typical SaaS experience. +- **[Dedicated Spaces][dedicated-spaces]:** Single-tenant Upbound-hosted, Upbound-managed Space environment. Dedicated Spaces provide a SaaS experience, with additional isolation guarantees that your workloads run in a fully isolated context. +- **[Managed Spaces][managed-spaces]:** Single-tenant customer-hosted, Upbound-managed Space environment. Managed Spaces provide a SaaS-like experience, with additional guarantees of all hosting infrastructure being served from your own cloud account. +- **[Self-Hosted Spaces][self-hosted-spaces]:** Single-tenant customer-hosted, customer-managed Space environment. This is a fully self-hosted, self-managed software experience for using Spaces. Upbound delivers the Spaces software and you run it yourself. + +The Upbound platform uses a federated model to connect each Space back to a +central service called the [Upbound Console][console], which is deployed and +managed by Upbound. + +By default, customers have access to a set of Cloud Spaces. + +## Supported clouds + +You can use host Upbound Spaces on Amazon Web Services (AWS), Microsoft Azure, +and Google Cloud Platform (GCP). Regardless of the hosting platform, you can use +Spaces to deploy control planes that manage the lifecycle of your resources. + +## Supported regions + +This table lists the cloud service provider regions supported by Upbound. + +### GCP + +| Region | Location | +| --- | --- | +| `us-west-1` | Western US (Oregon) +| `us-central-1` | Central US (Iowa) +| `eu-west-3` | Eastern Europe (Frankfurt) + +### AWS + +| Region | Location | +| --- | --- | +| `us-east-1` | Eastern US (Northern Virginia) + +### Azure + +| Region | Location | +| --- | --- | +| `us-east-1` | Eastern US (Iowa) + +[dedicated-spaces]: /spaces/howtos/cloud-spaces/dedicated-spaces-deployment +[managed-spaces]: /spaces/howtos/self-hosted/managed-spaces-deployment +[self-hosted-spaces]: /spaces/howtos/self-hosted/self-hosted-spaces-deployment +[console]: /manuals/console/upbound-console/ diff --git a/spaces_versioned_docs/version-v1.15/concepts/groups.md b/spaces_versioned_docs/version-v1.15/concepts/groups.md new file mode 100644 index 000000000..d2ccacdb3 --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/concepts/groups.md @@ -0,0 +1,115 @@ +--- +title: Control Plane Groups +sidebar_position: 2 +description: An introduction to the Control Plane Groups in Upbound +plan: "enterprise" +--- + + + +In Upbound, Control Plane Groups (or just, 'groups') are a logical grouping of one or more control planes with shared resources like [secrets][secrets] or [backups][backups]. It's a mechanism for isolating these groups of resources within a single [Space][space]. All role-based access control in Upbound happens at the control plane group-level. + +## When to use multiple groups + +You should use groups in environments where there's a need to have Crossplane manage infrastructure across multiple cloud accounts or projects. For users who only need to deploy and manage resources in a couple cloud accounts, you shouldn't need to think about groups at all. + +Groups are a way to divide access in Upbound between multiple teams. Think of a group as being analogous to a Kubernetes _namespace_. + +## The 'default' group + +Every Cloud Space in Upbound has a group named _default_ available. + +## Working with groups + +### View groups + +You can list groups in a Space using: + +```shell +up group list +``` + +If you're operating in a single-tenant Space and have access to the underlying cluster, you can list namespaces that have the group label: + +```shell +kubectl get namespaces -l spaces.upbound.io/group=true +``` + +### Set the group for a request + +Several commands in _up_ have a group context. To set the group for a request, use the `--group` flag: + +```shell +up ctp list --group=team1 +``` +```shell +up ctp create new-ctp --group=team2 +``` + +### Set the group preference + +The _up_ CLI operates upon a single [Upbound context][upbound-context]. Whatever context gets set is then used as the preference for other commands. An Upbound context is capable of pointing at a variety of altitudes: + +1. A Space in Upbound +2. A group within a Space +3. a control plane within a group + +To set the group preference, use `up ctx` to choose a group as your preferred Upbound context. For example: + +```shell +# This sets the context for the up CLI to the default group in an Upbound-managed Cloud Space (gcp-us-west-1) for an organization called 'acmeco' +up ctx acmeco/upbound-gcp-us-west-1/default/ +``` + +### Create a group + +To create a group, login to Upbound and set your context to your desired Space: + +```shell +up login +up ctx '/' +# Example: up ctx acmeco/upbound-gcp-us-west-1 +``` + + +Create a group: + +```shell +up group create my-new-group +``` + +### Delete a group + +To delete a group, login to Upbound and set your context to your desired Space: + +```shell +up login +up ctx '/' +# Example: up ctx acmeco/upbound-gcp-us-west-1 +``` + +Delete a group: + +```shell +up group delete my-new-group +``` + +### Protected groups + +Once a control plane gets created in a group, Upbound enforces a protection policy on the group. Upbound prevents accidental deletion of the group. To delete a group that has control planes in it, you should first delete all control planes in the group. + +## Groups in the context of single-tenant Spaces + +Upbound offers a variety of deployment models to use the product. If you deploy your own single-tenant Upbound Space (whether connected or disconnected), you're self-hosting Upbound software in a Kubernetes cluster. In these environments, a control plane group maps to a corresponding namespace in the cluster which hosts the Space. + +Most Kubernetes clusters come with some set of predefined namespaces. Because a group maps to a corresponding Kubernetes namespace, whenever a group gets created, there too must be a Kubernetes namespace accordingly. When the Spaces software is newly installed, no groups exist. You _can_ elevate a Kubernetes namespace to become a group by doing the following: + +1. Creating a group with the same name as a preexisting Kubernetes namespace +2. Creating a control plane in a preexisting Kubernetes namespace +3. Labeling a Kubernetes namespace with the label `spaces.upbound.io/group=true` + + +[secrets]: /spaces/howtos/secrets-management +[backups]: /spaces/howtos/self-hosted/workload-id/backup-restore-config/ +[space]: /spaces/overview +[upbound-context]: /manuals/cli/concepts/contexts diff --git a/spaces_versioned_docs/version-v1.15/howtos/_category_.json b/spaces_versioned_docs/version-v1.15/howtos/_category_.json new file mode 100644 index 000000000..d3a8547aa --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/_category_.json @@ -0,0 +1,7 @@ +{ + "label": "How-tos", + "position": 3, + "collapsed": true +} + + diff --git a/spaces_versioned_docs/version-v1.15/howtos/api-connector.md b/spaces_versioned_docs/version-v1.15/howtos/api-connector.md new file mode 100644 index 000000000..a14468f52 --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/api-connector.md @@ -0,0 +1,413 @@ +--- +title: API Connector +weight: 90 +description: Connect Kubernetes clusters to remote Crossplane control planes for resource synchronization +aliases: + - /api-connector + - /concepts/api-connector +--- +:::info API Version Information +This guide covers API Connector, a preview feature available in all supported versions (v1.9-v1.14+). + +For related API specifications and available resources, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +:::warning +API Connector is currently in **Preview**. The feature is under active +development and subject to breaking changes. Use for testing and evaluation +purposes only. +::: + +API Connector enables seamless integration between Kubernetes application +clusters consuming APIs and remote Crossplane control planes providing and +reconciling APIs. + +You can use the API Connector to decouple where Crossplane is running (for +example in an Upbound control plane), and where APIs are consumed +(for example in an existing Kubernetes cluster). This gives you flexibility and +consistency in your control plane operations. + + + +Unlike the [Control Plane Connector](ctp-connector.md) which offers only +coarse-grained connectivity between app clusters and a control plane, API +connector offers fine-grained configuration of which APIs get offered along with +multi-cluster connectivity. + +## Architecture overview + +![API Connector Architecture](/img/api-connector.png) + +API Connector uses a **provider-consumer** model: + +- **Provider control plane**: The Upbound control plane that provides APIs and manages infrastructure. +- **Consumer cluster**: Any Kubernetes cluster where its users wants to use APIs provided by the provider control plane, without having to run Crossplane. API connector gets installed in the consumer cluster, and bidirectionally syncs API objects to the provider. + +### Key components + +**Custom Resource Definitions (CRDs)**: + + +- `ClusterConnection`: Establishes a connection from the consumer to the provider cluster. Pulls bindable CRD APIs from the provider into the consumer cluster for use. + +- `ClusterAPIBinding`: Instructs API connector to sync all API objects cluster-wide with a given API group to a given provider cluster. +- `APIBinding`: Namespaced version of `ClusterAPIBinding`. Instructs API connector to sync API objects within a given namespace and with a given API group to a given provider cluster. + + +## Prerequisites + +Before using API Connector, ensure: + +1. **Consumer cluster** has network access to the provider control plane +1. You have an license to use API connector. If you are unsure, [contact Upbound][contact] or your sales representative. + +This guide walks through how to automate connecting your cluster to an Upbound +control plane. You can also manually configure the API Connector. + +## Publishing APIs in the provider cluster + + + + +First, log in to your provider control plane, and choose which CRD APIs you want +to make accessible to the consumer cluster's. API connector only syncs +these "bindable" CRDs. + + + + + + +Use the `up` CLI to login: + +```bash +up login +``` + +Connect to your control plane: + +```bash +up ctx +``` + +Check what CRDs are available: + +```bash +kubectl get crds +``` + + +Label all CRDs you want to publish with the bindable label: + + +```bash +kubectl label crd 'connect.upbound.io/bindable'='true' --overwrite +``` + + + + +Change context to the provider cluster: +```bash +kubectl config set-context +``` + +Check what CRDs are available: +```bash +kubectl get crds +``` + + +Label all CRDs you want to publish with the bindable label + +```bash +kubectl label crd 'connect.upbound.io/bindable'='true' --overwrite +``` + + + +## Installation + + + + +The up CLI provides the simplest installation method with automatic +configuration: + +Make sure the current Kubeconfig context is set to the **provider control plane** +```bash +up ctx + +up controlplane api-connector install --consumer-kubeconfig [OPTIONS] +``` + +The command: +1. creates a Robot account (named ``) in the Upbound Cloud organization ``, +1. Gives the created robot account `admin` permissions to the provider control plane `` +1. Generates a JWT token for the robot account, and stores it in a Kubernetes Secret in the consumer cluster. +1. Installs the API connector Helm chart in the consumer cluster. +1. Creates a `ClusterConnection` object in the consumer cluster, referring to the newly generated Secret, so that API connector can authenticate successfully to the provider control plane. +1. API connector pulls all published CRDs from the previous step into the consumer cluster. + +**Example**: +```bash +up controlplane api-connector install \ + --consumer-kubeconfig ~/.kube/config \ + --consumer-context my-cluster \ + --upbound-token +``` + +This command uses provided token to authenticate with the **Provider control plane** +and create a `ClusterConnection` resource in the **Consumer cluster** to connect to the +**Provider control plane**. + +**Key Options**: +- `--consumer-kubeconfig`: Path to consumer cluster kubeconfig (required) +- `--consumer-context`: Context name for consumer cluster (required) +- `--name`: Custom name for connection resources (optional) +- `--upbound-token`: API token for authentication (optional) +- `--upgrade`: Upgrade existing installation (optional) +- `--version`: Specific version to install (optional) + + + + +For manual installation or custom configurations: + +```bash +helm upgrade --install api-connector oci://xpkg.upbound.io/spaces-artifacts/api-connector \ + --namespace upbound-system \ + --create-namespace \ + --version \ + --set consumerClusterDisplayName= +``` + +### Authentication methods + +API Connector supports two authentication methods: + + + + +For Upbound Spaces integration: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: spaces-secret + namespace: upbound-system +type: Opaque +stringData: + token: + organization: + spacesBaseURL: + controlPlaneGroupName: + controlPlaneName: +``` + + + +For direct cluster access: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: provider-kubeconfig + namespace: upbound-system +type: Opaque +data: + kubeconfig: +``` + + + + +### Connection setup + +Create a `ClusterConnection` to establish connectivity: + + + + +```yaml +apiVersion: connect.upbound.io/v1alpha1 +kind: ClusterConnection +metadata: + name: spaces-connection + namespace: upbound-system +spec: + secretRef: + kind: UpboundRobotToken + name: spaces-secret + namespace: upbound-system + crdManagement: + pullBehavior: Pull +``` + + + + +```yaml +apiVersion: connect.upbound.io/v1alpha1 +kind: ClusterConnection +metadata: + name: provider-connection + namespace: upbound-system +spec: + secretRef: + kind: KubeConfig + name: provider-kubeconfig + namespace: upbound-system + crdManagement: + pullBehavior: Pull +``` + + + + + + + +### Configuration + +Bind APIs to make them available in your consumer cluster: + +```yaml +apiVersion: connect.upbound.io/v1alpha1 +kind: ClusterAPIBinding +metadata: + name: +spec: + connectionRef: + kind: ClusterConnection + name: # Or --name value +``` + + + + +The `ClusterAPIBinding` name must match the **Resource.Group** (name of the CustomResourceDefinition) of the CRD you want to bind. + + + + +## Usage example + +After configuration, you can create API objects (in the consumer cluster) that +will be synchronized to the provider cluster: + +```yaml +apiVersion: nop.example.org/v1alpha1 +kind: NopResource +metadata: + name: my-resource + namespace: default +spec: + coolField: "Synchronized resource" + compositeDeletePolicy: Foreground +``` + +Verify the resource status: + +```bash +kubectl get nopresource my-resource -o yaml + +``` +When the `APIBound=True` condition is present, it means that the API object has +been synced to the provider cluster, and is being reconciled there. Whenever the +API object in the provider cluster gets status updates (for example +`Ready=True`), that status is synced back to the consumer cluster. + +Switch contexts to the provider cluster to see the API object being created: + +```bash +up ctx +# or kubectl config set-context +``` + +```bash +kubectl get nopresource my-resource -o yaml +``` + +Note that in the provider cluster, the API object is labeled with information on +where the API object originates from, and `connect.upbound.io/managed=true`. + +## Monitoring and troubleshooting + +### Check connection status + +```bash +kubectl get clusterconnection +``` + +Expected output: +``` +NAME STATUS MESSAGE +spaces-connection Ready Provider controlplane is available +``` + +### View available APIs + +```bash +kubectl get clusterconnection spaces-connection -o jsonpath='{.status.offeredAPIs[*].name}' +``` + +### Check API binding status + +```bash +kubectl get clusterapibinding +``` + +### Debug resource synchronization + +```bash +kubectl describe +``` + +## Removal + +### Using the up CLI + +```bash +up controlplane api-connector uninstall \ + --consumer-kubeconfig ~/.kube/config \ + --all +``` + +The `--all` flag removes all resources including connections and secrets. +Without the flag, only runtime related resources won't be removed. + +:::note +Uninstall doesn't remove any API objects in the provider control plane. If you +want to clean up all API objects there, delete all API objects from the consumer +cluster before API connector uninstallation, and wait for the objects to get +deleted. +::: + + +### Using Helm + +```bash +helm uninstall api-connector -n upbound-system +``` + +## Limitations + +- **Preview feature**: Subject to breaking changes. Not yet production grade. +- **CRD updates**: CRDs are pulled once but not automatically updated. If multiple Crossplane clusters offer the same CRD API, API changes must be synchronized out of band, for example using a [Crossplane Configuration](https://docs.crossplane.io/latest/packages/). +- **Network requirements**: Consumer cluster must have direct network access to provider cluster. +- **Wide permissions needed in consumer cluster**: Because the API connector doesn't know up front the names of the APIs it needs to reconcile, it currently runs with full "root" privileges in the consumer cluster. + +- **Connector polling**: API Connector checks for drift between the consumer and provider cluster + periodically through polling. The poll interval can be changed with the `pollInterval` Helm value. + + +## Advanced configuration + +### Multiple connections + +You can connect to multiple provider clusters simultaneously by creating multiple `ClusterConnection` resources with different names and configurations. + +[contact]: https://www.upbound.io/contact-us diff --git a/spaces_versioned_docs/version-v1.15/howtos/auto-upgrade.md b/spaces_versioned_docs/version-v1.15/howtos/auto-upgrade.md new file mode 100644 index 000000000..249056fb4 --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/auto-upgrade.md @@ -0,0 +1,131 @@ +--- +title: Automatically upgrade control planes +sidebar_position: 50 +description: How to configure automatic upgrades of Crossplane in a control plane +plan: "standard" +--- + + + +Upbound Spaces can automatically upgrade the version of Upbound Crossplane in managed control plane instances. You can edit the `spec.crossplane.autoUpgrade` field in your `ControlPlane` specification with the available release channels below. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). The `spec.crossplane.autoUpgrade` field has been available since v1.9. + +For ControlPlane API specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +| Channel | Description | Example | +|------------|-----------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------| +| **None** | Disables auto upgrades. | _Uses version specified in `spec.crossplane.version`._ | +| **Patch** | Upgrades to the latest supported patch release. | _Control plane version 1.12.2-up.2 auto upgrades to 1.12.3-up.1 upon release._ | +| **Stable** | Default setting. Upgrades to the latest supported patch release on minor version _N-1_ where N is the latest supported minor version. | _If latest supported minor version is 1.14, auto upgrades to latest patch - 1.13.2-up.3_ | +| **Rapid** | Upgrades to the latest supported patch release on the latest supported minor version. | _If the latest supported minor version is 1.14, auto upgrades to the latest patch of minor version. 1.14 upgrades to 1.14.5-up.1_ | + + +:::warning + +The `Rapid` channel is only recommended for users willing to accept the risk of new features and potentially breaking changes. + +::: + +## Examples + +The specs below are examples of how to edit the `autoUpgrade` channel in your `ControlPlane` specification. + +To run a control plane with the `Rapid` auto upgrade channel, your spec should look like this: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: example-ctp +spec: + crossplane: + autoUpgrade: + channel: Rapid + writeConnectionSecretToRef: + name: kubeconfig-example-ctp +``` + +To run a control plane with a pinned version of Crossplane, specify in the `version` field: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: example-ctp +spec: + crossplane: + version: 1.14.3-up.1 + autoUpgrade: + channel: None + writeConnectionSecretToRef: + name: kubeconfig-example-ctp +``` + +## Supported Crossplane versions + +Spaces supports the three [preceding minor versions][preceding-minor-versions] from the last supported minor version. example, if the last supported minor version is `1.14`, minor versions `1.13` and `1.12` are also supported. Versions older than the three most recent minor versions aren't supported. Only supported Crossplane versions are valid specifications for new control planes. + +Current Crossplane version support by Spaces version: + +| Spaces Version | Crossplane Version Min | Crossplane Version Max | +|:--------------:|:----------------------:|:----------------------:| +| 1.2 | 1.13 | 1.15 | +| 1.3 | 1.13 | 1.15 | +| 1.4 | 1.14 | 1.16 | +| 1.5 | 1.14 | 1.16 | +| 1.6 | 1.14 | 1.16 | +| 1.7 | 1.14 | 1.16 | +| 1.8 | 1.15 | 1.17 | +| 1.9 | 1.16 | 1.18 | +| 1.10 | 1.16 | 1.18 | +| 1.11 | 1.16 | 1.18 | +| 1.12 | 1.17 | 1.19 | + + +Upbound offers extended support for all installed Crossplane versions released within a 12 month window since the last Spaces release. Contact your Upbound sales representative for more information on version support. + + +:::warning + +If the auto upgrade channel is `Stable` or `Rapid`, the Crossplane version will always stay within the support window after auto upgrade. If set to `Patch` or `None`, the minor version may be outside the support window. You are responsible for upgrading to a supported version + +::: + +To view the support status of a control plane instance, use `kubectl get ctp`. + +```bash +kubectl get ctp +NAME CROSSPLANE VERSION SUPPORTED READY MESSAGE AGE +example-ctp 1.13.2-up.3 True True 31m + +``` + +Unsupported versions return `SUPPORTED: False`. + +```bash +kubectl get ctp +NAME CROSSPLANE VERSION SUPPORTED READY MESSAGE AGE +example-ctp 1.11.5-up.1 False True 31m + +``` + +For more information, use the `-o yaml` flag to return more information. + +```bash +kubectl get controlplanes.spaces.upbound.io example-ctp -o yaml +status: +conditions: +... +- lastTransitionTime: "2024-01-23T06:36:10Z" + message: Crossplane version 1.11.5-up.1 is outside of the support window. + Oldest supported minor version is 1.12. + reason: UnsupportedCrossplaneVersion + status: "False" + type: Supported +``` + + +[preceding-minor-versions]: /reference/usage/lifecycle/#maintenance-and-updates diff --git a/spaces_versioned_docs/version-v1.15/howtos/automation-and-gitops/_category_.json b/spaces_versioned_docs/version-v1.15/howtos/automation-and-gitops/_category_.json new file mode 100644 index 000000000..b65481af6 --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/automation-and-gitops/_category_.json @@ -0,0 +1,8 @@ +{ + "label": "Automation & GitOps", + "position": 11, + "collapsed": true, + "customProps": { + "plan": "business" + } +} diff --git a/spaces_versioned_docs/version-v1.15/howtos/automation-and-gitops/overview.md b/spaces_versioned_docs/version-v1.15/howtos/automation-and-gitops/overview.md new file mode 100644 index 000000000..57eeb15fc --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/automation-and-gitops/overview.md @@ -0,0 +1,138 @@ +--- +title: Automation and GitOps Overview +sidebar_label: Overview +sidebar_position: 1 +description: Guide to automating control plane deployments with GitOps and Argo CD +plan: "business" +--- + +Automating control plane deployments with GitOps enables declarative, version-controlled infrastructure management. This section covers integrating GitOps workflows with Upbound control planes using Argo CD and related tools. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). GitOps patterns are compatible across all versions of Upbound Spaces. + +For details on deployment models and their differences, see the [Deployment Modes](../../concepts/deployment-modes.md) concept guide. version-specific features, see the . +::: + +## What is GitOps? + +GitOps is an approach for managing infrastructure by: +- **Declaratively describing** desired system state in Git +- **Using controllers** to continuously reconcile actual state with desired state +- **Treating Git as the source of truth** for all configuration and deployments + +Upbound control planes are fully compatible with GitOps patterns and we strongly recommend integrating GitOps in the platforms you build on Upbound. + +## Key Concepts + +### Argo CD +[Argo CD](https://argo-cd.readthedocs.io/) is a popular Kubernetes-native GitOps controller. It continuously monitors Git repositories and automatically applies changes to your infrastructure when commits are detected. + +### Deployment Models + +The way you configure GitOps depends on your deployment model: + +| Aspect | Cloud Spaces | Self-Hosted Spaces | +|--------|--------------|-------------------| +| **Access Method** | Upbound API with tokens | Kubernetes native (secrets/kubeconfig) | +| **Configuration** | Kubeconfig via `up` CLI | Control plane connection secrets | +| **Setup Complexity** | More involved (API integration) | Simpler (native Kubernetes) | +| **Typical Use Case** | Managing Upbound resources | Managing workloads on control planes | + +## Getting Started + +**Choose your path based on your deployment model:** + +###. Cloud Spaces +If you're using Upbound Cloud Spaces (Dedicated or Managed): +1. Start with [GitOps with Upbound Control Planes](../cloud-spaces/gitops-on-upbound.md) +2. Learn how to integrate Argo CD with Cloud Spaces +3. Manage both control plane infrastructure and Upbound resources declaratively + +###. Self-Hosted Spaces +If you're running self-hosted Spaces: +1. Start with [GitOps with ArgoCD in Self-Hosted Spaces](../self-hosted/gitops-with-argocd.md) +2. Learn how to configure control plane connection secrets +3. Manage workloads deployed to your control planes + +## Common Workflows + +### Workflow 1: Managing Control Planes with GitOps +Create and manage control planes themselves declaratively using provider-kubernetes: + +```yaml +apiVersion: kubernetes.crossplane.io/v1alpha2 +kind: Object +metadata: + name: my-controlplane +spec: + forProvider: + manifest: + apiVersion: spaces.upbound.io/v1beta1 + kind: ControlPlane + # ... control plane configuration +``` + +### Workflow 2: Managing Workloads on Control Planes +Deploy applications and resources to control planes using standard Kubernetes GitOps patterns: + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + name: my-app +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-app + namespace: my-app +# ... deployment configuration +``` + +### Workflow 3: Managing Upbound Resources +Use provider-upbound to manage Upbound IAM and repository resources: + +- Teams +- Robots and their team memberships +- Repositories and permissions + +## Advanced Topics + +### Argo CD Plugin for Upbound +Learn more in the [ArgoCD Plugin guide](../self-hosted/use-argo.md) for enhanced integration with self-hosted Spaces. + +### Declarative Control Plane Creation +See [Declaratively create control planes](../self-hosted/declarative-ctps.md) for advanced automation patterns. + +### Consuming Control Plane APIs +Understand how to [consume control plane APIs in your app cluster](../mcp-connector-guide.md) with Argo CD. + +## Prerequisites + +Before implementing GitOps with control planes, ensure you have: + +**For Cloud Spaces:** +- Access to Upbound Cloud Spaces +- `up` CLI installed and configured +- API token with appropriate permissions +- Argo CD or similar GitOps controller running +- Familiarity with Kubernetes RBAC + +**For Self-Hosted Spaces:** +- Self-hosted Spaces deployed and running +- Argo CD deployed in your infrastructure +- Kubectl access to the cluster hosting Spaces +- Understanding of control plane architecture + +## Next Steps + +1. **Choose your deployment model** above +2. **Review the relevant getting started guide** +3. **Set up your GitOps controller** (Argo CD) +4. **Deploy your first automated control plane** +5. **Explore advanced topics** as needed + +:::tip +Start with simple deployments to test your GitOps workflow before moving to production. Use [simulations](../simulations.md) to preview changes before applying them. +::: diff --git a/spaces_versioned_docs/version-v1.15/howtos/backup-and-restore.md b/spaces_versioned_docs/version-v1.15/howtos/backup-and-restore.md new file mode 100644 index 000000000..3b8d026cb --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/backup-and-restore.md @@ -0,0 +1,530 @@ +--- +title: Backup and restore +sidebar_position: 13 +description: Configure and manage backups in your Upbound Space. +plan: "enterprise" +--- + + + +Upbound's _Shared Backups_ is a built-in backup and restore feature. Shared Backups lets you configure automatic schedules for taking snapshots of your control planes. You can restore data from these backups by making new control planes. This guide explains how to use Shared Backups for disaster recovery or upgrade scenarios. + +:::info API Version Information & Available Versions +This guide applies to **all supported versions** (v1.9-v1.15+). + +**Select your API version**: +- [Spaces API v1.15 (Latest)](../../../next/reference/apis/spaces-api/) +- [Spaces API v1.14](../../../1.14/reference/apis/spaces-api/) +- [Spaces API v1.13](../../../1.13/reference/apis/spaces-api/) +- [Spaces API v1.12](../../../1.12/reference/apis/spaces-api/) +- [Spaces API v1.11](../../../1.11/reference/apis/spaces-api/) +- [Spaces API v1.10](../../../1.10/reference/apis/spaces-api/) +- [Spaces API v1.9](../../../1.9/reference/apis/spaces-api/) + +For version support policy, see . version compatibility details, see the . +::: + +## Benefits + +The Shared Backups feature provides the following benefits: + +* Automatic backups for control planes without any operational overhead +* Backup schedules for multiple control planes in a group +* Shared Backups are available across all hosting environments of Upbound (Disconnected, Connected or Cloud Spaces) + + +## Configure a Shared Backup Config + + +[SharedBackupConfig][sharedbackupconfig] is a [group-scoped][group-scoped] resource. You should create them in a group containing one or more control planes. This resource configures the storage details and provider. Whenever a backup executes (either by schedule or manually initiated), it references a SharedBackupConfig to tell it where store the snapshot. + + +### Backup config provider + + +The `spec.objectStorage.provider` and `spec.objectStorage.config` fields configures: + +* The object storage provider +* The path to the provider +* The credentials needed to communicate with the provider + +You can only set one provider. Upbound currently supports AWS, Azure, and GCP as providers. + + +`spec.objectStorage.config` is a freeform map of configuration options for the object storage provider. See [Thanos object storage][thanos-object-storage] for more information on the formats for each supported cloud provider. `spec.bucket` and `spec.provider` overrides the required values in the config. + + + +#### AWS as a storage provider + +:::important +For Cloud Spaces, static credentials are currently the only supported auth method. +::: + +This example demonstrates how to use AWS as a storage provider for your backups: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupConfig +metadata: + name: default + namespace: default +spec: + objectStorage: + provider: AWS + bucket: spaces-backup-bucket + config: + endpoint: s3.eu-west-2.amazonaws.com + region: eu-west-2 + credentials: + source: Secret + secretRef: + name: bucket-creds + key: creds +``` + + +This example assumes you've already created an S3 bucket called "spaces-backup-bucket" in AWS `eu-west-2` region. The account credentials to access the bucket should exist in a secret of the same namespace as the Shared Backup Config. + +#### Azure as a storage provider + +:::important +For Cloud Spaces, static credentials are currently the only supported auth method. +::: + +This example demonstrates how to use Azure as a storage provider for your backups: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupConfig +metadata: + name: default + namespace: default +spec: + objectStorage: + provider: Azure + bucket: upbound-backups + config: + storage_account: upbackupstore + container: upbound-backups + endpoint: blob.core.windows.net + credentials: + source: Secret + secretRef: + name: bucket-creds + key: creds +``` + + +This example assumes you've already created an Azure storage account called `upbackupstore` and blob `upbound-backups`. The storage account key to access the blob should exist in a secret of the same namespace as the Shared Backup Config. + + +#### GCP as a storage provider + +:::important +For Cloud Spaces, static credentials are currently the only supported auth method. +::: + +This example demonstrates how to use Google Cloud Storage as a storage provider for your backups: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupConfig +metadata: + name: default + namespace: default +spec: + objectStorage: + provider: GCP + bucket: spaces-backup-bucket + credentials: + source: Secret + secretRef: + name: bucket-creds + key: creds +``` + + +This example assumes you've already created a Cloud bucket called "spaces-backup-bucket" and a service account with access to this bucket. The key file should exist in a secret of the same namespace as the Shared Backup Config. + + +## Configure a Shared Backup Schedule + + +[SharedBackupSchedule][sharedbackupschedule] is a [group-scoped][group-scoped-1] resource. You should create them in a group containing one or more control planes. This resource defines a backup schedule for control planes within its corresponding group. + +Below is an example of a Shared Backup Schedule that takes backups every day of all control planes having `environment: production` labels: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: daily-schedule + namespace: default +spec: + schedule: "@daily" + configRef: + kind: SharedBackupConfig + name: default + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +### Define a schedule + +The `spec.schedule` field is a [Cron-formatted][cron-formatted] string. Some common examples are below: + + +| Entry | Description | +| ----------------- | ------------------------------------------------------------------------------------------------- | +| `@hourly` | Run once an hour. | +| `@daily` | Run once a day. | +| `@weekly` | Run once a week. | +| `0 0/4 * * *` | Run every 4 hours. | +| `0/15 * * * 1-5` | Run every fifteenth minute on Monday through Friday. | +| `@every 1h30m10s` | Run every 1 hour, 30 minutes, and 10 seconds. Hour is the largest measurement of time for @every. | + + +### Exclude resources from the backup + +The `spec.excludedResources` field is an array of resource names to exclude from each backup. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: daily-schedule +spec: + excludedResources: + - "xclusters.aws.platformref.upbound.io" + - "xdatabase.aws.platformref.upbound.io" + - "xrolepolicyattachment.iam.aws.crossplane.io" +``` + +:::warning +You must specify resource names in lowercase "resource.group" format (for example, `xclusters.aws.platformref.upbound.io`). Using only the resource kind (for example, `XCluster`) isn't supported. +::: + +### Suspend a schedule + +Use `spec.suspend` field to suspend the schedule. It creates no new backups, but allows running backups to complete. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: daily-schedule +spec: + suspend: true +``` + +### Set the time to live + +Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: daily-schedule +spec: + ttl: 168h # Backup is garbage collected after 7 days +``` +:::tip +By default, this setting doesn't delete uploaded files. Review the next section to define +the deletion policy. +::: + +### Define the deletion policy + +Set the `spec.deletionPolicy` to define backup deletion actions, including the +deletion of the backup file from the bucket. The Deletion Policy value defaults +to `Orphan`. Set it to `Delete` to remove uploaded files in the bucket. more +information on the backup and restore process, review the [Spaces API +documentation][spaces-api-documentation]. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: daily-schedule +spec: + ttl: 168h # Backup is garbage collected after 7 days + deletionPolicy: Delete # Defaults to Orphan +``` + +### Garbage collect backups when the schedule gets deleted + +Set the `spec.useOwnerReferencesInBackup` to garbage collect associated backups when a shared schedule gets deleted. If set to true, backups are garbage collected when the schedule gets deleted. + +### Control plane selection + +To configure which control planes in a group you want to create a backup schedule for, use the `spec.controlPlaneSelector` field. You can either use `labelSelectors` or the `names` of a control plane directly. A control plane matches if any of the label selectors match. + +This example matches all control planes in the group that have `environment: production` as a label: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: my-backup-schedule +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +You can use the more complex `matchExpressions` to match labels based on an expression. This example matches control planes that have label `environment: production` or `environment: staging`: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: my-backup-schedule +spec: + controlPlaneSelector: + labelSelectors: + - matchExpressions: + - { key: environment, operator: In, values: [production,staging] } +``` + +You can also specify the names of control planes directly: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: my-backup-schedule +spec: + controlPlaneSelector: + names: + - controlplane-dev + - controlplane-staging + - controlplane-prod +``` + + +## Configure a Shared Backup + + + +[SharedBackup][sharedbackup] is a [group-scoped][group-scoped-2] resource. You should create them in a group containing one or more control planes. This resource causes a backups to occur for control planes within its corresponding group. + +Below is an example of a Shared Backup that takes a backup of all control planes having `environment: production` labels: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup + namespace: default +spec: + configRef: + kind: SharedBackupConfig + name: default + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +### Exclude resources from the backup + +The `spec.excludedResources` field is an array of resource names to exclude from each backup. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup +spec: + excludedResources: + - "xclusters.aws.platformref.upbound.io" + - "xdatabase.aws.platformref.upbound.io" + - "xrolepolicyattachment.iam.aws.crossplane.io" +``` + +:::warning +You must specify resource names in lowercase "resource.group" format (for example, `xclusters.aws.platformref.upbound.io`). Using only the resource kind (for example, `XCluster`) isn't supported. +::: + +### Set the time to live + +Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup +spec: + ttl: 168h # Backup is garbage collected after 7 days +``` + + +### Garbage collect backups on Shared Backup deletion + + + +Set the `spec.useOwnerReferencesInBackup` to define whether to garbage collect associated backups when a shared backup gets deleted. If set to true, backups are garbage collected when the shared backup gets deleted. + +### Control plane selection + +To configure which control planes in a group you want to create a backup for, use the `spec.controlPlaneSelector` field. You can either use `labelSelectors` or the `names` of a control plane directly. A control plane matches if any of the label selectors match. + +This example matches all control planes in the group that have `environment: production` as a label: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +You can use the more complex `matchExpressions` to match labels based on an expression. This example matches control planes that have label `environment: production` or `environment: staging`: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup +spec: + controlPlaneSelector: + labelSelectors: + - matchExpressions: + - { key: environment, operator: In, values: [production,staging] } +``` + +You can also specify the names of control planes directly: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup +spec: + controlPlaneSelector: + names: + - controlplane-dev + - controlplane-staging + - controlplane-prod +``` + +## Create a manual backup + +[Backup][backup] is a [group-scoped][group-scoped-3] resource that causes a single backup to occur for a control planes in its corresponding group. + +Below is an example of a manual Backup of a control plane: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: Backup +metadata: + name: my-backup + namespace: default +spec: + configRef: + kind: SharedBackupConfig + name: default + controlPlane: my-awesome-ctp + deletionPolicy: Delete +``` + +The backup specification `DeletionPolicy` defines backup deletion actions, +including the deletion of the backup file from the bucket. The `Deletion Policy` +value defaults to `Orphan`. Set it to `Delete` to remove uploaded files +in the bucket. +For more information on the backup and restore process, review the [Spaces API documentation][spaces-api-documentation-1]. + + +### Choose a control plane to backup + +The `spec.controlPlane` field defines which control plane to execute a backup against. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: Backup +metadata: + name: my-backup + namespace: default +spec: + controlPlane: my-awesome-ctp +``` + +If the control plane doesn't exist, the backup fails after multiple failed retry attempts. + +### Exclude resources from the backup + +The `spec.excludedResources` field is an array of resource names to exclude from the manual backup. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: Backup +metadata: + name: my-backup +spec: + excludedResources: + - "xclusters.aws.platformref.upbound.io" + - "xdatabase.aws.platformref.upbound.io" + - "xrolepolicyattachment.iam.aws.crossplane.io" +``` + +:::warning +You must specify resource names in lowercase "resource.group" format (for example, `xclusters.aws.platformref.upbound.io`). Using only the resource kind (for example, `XCluster`) isn't supported. +::: + +### Set the time to live + +Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: Backup +metadata: + name: my-backup +spec: + ttl: 168h # Backup is garbage collected after 7 days +``` + +## Restore a control plane from a backup + +You can restore a control plane's state from a backup. Below is an example of creating a new control plane from a previous backup called `restore-me`: + + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: my-awesome-restored-ctp + namespace: default +spec: + restore: + source: + kind: Backup + name: restore-me +``` + + +[group-scoped]: /spaces/concepts/groups +[group-scoped-1]: /spaces/concepts/groups +[group-scoped-2]: /spaces/concepts/groups +[group-scoped-3]: /spaces/concepts/groups +[sharedbackupconfig]: /reference/apis/spaces-api/latest +[thanos-object-storage]: https://thanos.io/tip/thanos/storage.md/ +[sharedbackupschedule]: /reference/apis/spaces-api/latest +[cron-formatted]: https://en.wikipedia.org/wiki/Cron +[spaces-api-documentation]: /reference/apis/spaces-api/v1_9 +[sharedbackup]: /reference/apis/spaces-api/latest +[backup]: /reference/apis/spaces-api/latest +[spaces-api-documentation-1]: /reference/apis/spaces-api/v1_9 + + + diff --git a/spaces_versioned_docs/version-v1.15/howtos/cloud-spaces/_category_.json b/spaces_versioned_docs/version-v1.15/howtos/cloud-spaces/_category_.json new file mode 100644 index 000000000..1e1869a38 --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/cloud-spaces/_category_.json @@ -0,0 +1,10 @@ +{ + "label": "Cloud Spaces", + "position": 1, + "collapsed": true, + "customProps": { + "plan": "standard" + } +} + + diff --git a/spaces_versioned_docs/version-v1.15/howtos/cloud-spaces/dedicated-spaces-deployment.md b/spaces_versioned_docs/version-v1.15/howtos/cloud-spaces/dedicated-spaces-deployment.md new file mode 100644 index 000000000..ebad9493e --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/cloud-spaces/dedicated-spaces-deployment.md @@ -0,0 +1,33 @@ +--- +title: Dedicated Spaces +sidebar_position: 4 +description: A guide to Upbound Dedicated Spaces +plan: business +--- + + +## Benefits + +Dedicated Spaces offer the following benefits: + +- **Single-tenancy** A control plane space where Upbound guarantees you're the only tenant operating in the environment. +- **Connectivity to your private network** Establish secure network connections between your Dedicated Cloud Space running in Upbound and your own resources behind your private network. +- **Reduced Overhead.** Offload day-to-day operational burdens to Upbound while focusing on your job of building your platform. + +## Architecture + +A Dedicated Space is a deployment of the Upbound Spaces software inside an +Upbound-controlled cloud account and network. The control planes you run. + +The diagram below illustrates the high-level architecture of Upbound Dedicated Spaces: + +![Upbound Managed Spaces arch](/img/managed-arch-gcp.png) + +## How to get access to Dedicated Spaces + +If you have an interest in Upbound Dedicated Spaces, contact +[Upbound][contact-us]. We can chat more about your +requirements and see if Dedicated Spaces are a good fit for you. + +[contact-us]: https://www.upbound.io/contact-us +[managed-space]: /spaces/howtos/self-hosted/managed-spaces-deployment diff --git a/spaces_versioned_docs/version-v1.15/howtos/cloud-spaces/gitops-on-upbound.md b/spaces_versioned_docs/version-v1.15/howtos/cloud-spaces/gitops-on-upbound.md new file mode 100644 index 000000000..fa59a8dce --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/cloud-spaces/gitops-on-upbound.md @@ -0,0 +1,318 @@ +--- +title: GitOps with Upbound Control Planes +sidebar_position: 80 +description: An introduction to doing GitOps with control planes on Upbound Cloud Spaces +tier: "business" +--- + +:::info Deployment Model +This guide applies to **Upbound Cloud Spaces** (Dedicated and Managed Spaces). For self-hosted Spaces deployments, see [GitOps with ArgoCD in Self-Hosted Spaces](/spaces/howtos/self-hosted/gitops-with-argocd/). +::: + +GitOps is an approach for managing a system by declaratively describing desired resources' configurations in Git and using controllers to realize the desired state. Upbound's control planes are compatible with this pattern and it's strongly recommended you integrate GitOps in the platforms you build on Upbound. + + +## Integrate with Argo CD + + +[Argo CD][argo-cd] is a project in the Kubernetes ecosystem commonly used for GitOps. You can use it in tandem with Upbound control planes to achieve GitOps flows. The sections below explain how to integrate these tools with Upbound. + +### Generate a kubeconfig for your control plane + +Use the up CLI to [generate a kubeconfig][generate-a-kubeconfig] for your control plane. + +```bash +up ctx /// -f - > context.yaml +``` + +### Create an API token + + +You need a personal access token (PAT). You create PATs on a per-user basis in the Upbound Console. Go to [My Account - API tokens][my-account-api-tokens] and select Create New Token. Give the token a name and save the secret value to somewhere safe. + + +### Add the up CLI init container to Argo + +Create a new file called `up-plugin-values.yaml` and paste the following YAML: + +```yaml +controller: + volumes: + - name: up-plugin + emptyDir: {} + - name: up-home + emptyDir: {} + + volumeMounts: + - name: up-plugin + mountPath: /usr/local/bin/up + subPath: up + - name: up-home + mountPath: /home/argocd/.up + + initContainers: + - name: up-plugin + image: xpkg.upbound.io/upbound/up-cli:v0.39.0 + command: ["cp"] + args: + - /usr/local/bin/up + - /plugin/up + volumeMounts: + - name: up-plugin + mountPath: /plugin + +server: + volumes: + - name: up-plugin + emptyDir: {} + - name: up-home + emptyDir: {} + + volumeMounts: + - name: up-plugin + mountPath: /usr/local/bin/up + subPath: up + - name: up-home + mountPath: /home/argocd/.up + + initContainers: + - name: up-plugin + image: xpkg.upbound.io/upbound/up-cli:v0.39.0 + command: ["cp"] + args: + - /usr/local/bin/up + - /plugin/up + volumeMounts: + - name: up-plugin + mountPath: /plugin +``` + +### Install or upgrade Argo using the values file + +Install or upgrade Argo via Helm, including the values from the `up-plugin-values.yaml` file: + +```bash +helm upgrade --install -n argocd -f up-plugin-values.yaml --reuse-values argocd argo/argo-cd +``` + + +### Configure Argo CD + + +To configure Argo CD for Annotation resource tracking, edit the Argo CD ConfigMap in the Argo CD namespace. +Add `application.resourceTrackingMethod: annotation` to the data section as below. +This configuration turns off Argo CD auto pruning, preventing the deletion of Crossplane resources. + +Next, configure the [auto respect RBAC for the Argo CD controller][auto-respect-rbac-for-the-argo-cd-controller]. +By default, Argo CD attempts to discover some Kubernetes resource types that don't exist in a control plane. +You must configure Argo CD to respect the cluster's RBAC rules so that Argo CD can sync. +Add `resource.respectRBAC: normal` to the data section as below. + +```bash +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-cm +data: + ... + application.resourceTrackingMethod: annotation + resource.respectRBAC: normal +``` + +:::tip +The `resource.respectRBAC` configuration above tells Argo to respect RBAC for _all_ cluster contexts. If you're using an Argo CD instance to manage more than only control planes, you should consider changing the `clusters` string match for the configuration to apply only to control planes. For example, if every control plane context name followed the convention of being named `controlplane-`, you could set the string match to be `controlplane-*` +::: + + +### Create a cluster context definition + + +Replace the variables and run the following script to configure a new Argo cluster context definition. + +To configure Argo for a control plane in a Connected Space, replace `stringData.server` with the ingress URL of the control plane. This URL is what's outputted when using `up ctx`. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: my-control-plane + namespace: argocd + labels: + argocd.argoproj.io/secret-type: cluster +type: Opaque +stringData: + name: my-control-plane-context + server: https://.spaces.upbound.io/apis/spaces.upbound.io/v1beta1/namespaces//controlplanes//k8s + config: | + { + "execProviderConfig": { + "apiVersion": "client.authentication.k8s.io/v1", + "command": "up", + "args": [ "org", "token" ], + "env": { + "ORGANIZATION": "", + "UP_TOKEN": "" + } + }, + "tlsClientConfig": { + "insecure": false, + "caData": "" + } + } +``` + + +## GitOps for Upbound resources + + +Like any other cloud service, you can drive the lifecycle of Upbound Cloud resources with Crossplane. This lets you establish GitOps flows to declaratively create and manage: + +- [control plane groups][control-plane-groups] +- [control planes][control-planes] +- [Upbound IAM resources][upbound-iam-resources] + +Use a control plane installed with [provider-upbound][provider-upbound] and [provider-kubernetes][provider-kubernetes] to achieve this. + +### Provider-upbound + +[Provider-upbound][provider-upbound-2] is a Crossplane provider built by Upbound to interact with Upbound resources. use _provider-upbound_ to declaratively create and manage the lifecycle of IAM resources and repositories: + +- [Robots][robots] and their membership to teams +- [Teams][teams] +- [Repositories][repositories] and [permissions][permissions] on those repositories. + +:::tip +This provider defines managed resources for control planes, their auth, and permissions. These resources only applicable for customers who run in Upbound's **Legacy Spaces** control plane hosting environments. Customers should use provider-kubernetes explained below to manage the lifecycle of control planes with Crossplane. +::: + +### Provider-kubernetes + +[Provider-kubernetes][provider-kubernetes-3] is a Crossplane provider that defines an [Object][object] resource. Use _Objects_ as general-purpose resources to wrap _any_ Kubernetes resource for Crossplane to manage. + +Upbound [Space APIs][space-apis] are Kube-like APIs and have implemented support for most Kubernetes-style API concepts. You can use kubectl or any other Kubernetes-compatible tooling to interact with the API. This means you can use _provider-kubernetes_ to drive interactions with Space APIs. + +:::warning +When interacting with a Cloud Space's API, the Kubernetes [watch][watch] feature **isn't implemented.** Argo CD requires _watch_ support to function as expected, meaning you can't point Argo directly at a Cloud Space until it's implemented. +::: + +Use _provider-kubernetes_ to declaratively drive interactions with all [Space APIs][space-apis-1]. Wrap the desired API resource in an _Object_. See the example below for a control plane: + +```yaml +apiVersion: kubernetes.crossplane.io/v1alpha2 +kind: Object +metadata: + name: my-controlplane +spec: + forProvider: + manifest: + apiVersion: spaces.upbound.io/v1beta1 + kind: ControlPlane + metadata: + name: my-controlplane + namespace: default + spec: + crossplane: + autoUpgrade: + channel: Rapid +``` + +[Control plane groups][control-plane-groups-2] are a special case because they technically map to an underlying Kubernetes namespace. You should create a `kind: namespace` with the `spaces.upbound.io/group` label to create a control plane group in a Space. See the example below: + +```yaml +apiVersion: kubernetes.crossplane.io/v1alpha2 +kind: Object +metadata: + name: group1 +spec: + forProvider: + manifest: + apiVersion: v1 + kind: Namespace + metadata: + name: group1 + labels: + spaces.upbound.io/group: "true" + spec: {} +``` + +### Configure auth for provider-kubernetes + +Like any other Crossplane provider, _provider-kubernetes_ requires a valid [ProviderConfig][providerconfig] to authenticate with Upbound before interacting with its APIs. Follow the steps below to configure auth for a ProviderConfig on a control plane that you want to use to interact with Upbound resources. + +1. Define an environment variable for the name of your Upbound org account. Use `up org list` to retrieve this value. +```ini +export UPBOUND_ACCOUNT="" +``` + +2. Create a [personal access token][personal-access-token] and store it as an environment variable. +```shell +export UPBOUND_TOKEN="" +``` + +3. Log on to Upbound. +```shell +up login +``` + +4. Create a kubeconfig for the desired Cloud Space instance you want to interact with. +```shell +export CONTROLPLANE_CONFIG=/tmp/controlplane-kubeconfig +KUBECONFIG=$CONTROLPLANE_CONFIG up ctx $UPBOUND_ACCOUNT/upbound-gcp-us-west-1 # Replace this path with whichever Cloud Space you want to communicate with. +``` + +5. On the control plane you want to use to interact with Upbound resources, create a secret containing the credentials: +```shell +kubectl -n crossplane-system create secret generic cluster-config --from-file=kubeconfig=$CONTROLPLANE_CONFIG +kubectl -n crossplane-system create secret generic upbound-credentials --from-literal=token=$UPBOUND_TOKEN +``` + +6. Create a ProviderConfig that references the credentials created in the prior step. Create this resource in your control plane: +```yaml +apiVersion: kubernetes.crossplane.io/v1alpha1 +kind: ProviderConfig +metadata: + name: default +spec: + credentials: + source: Secret + secretRef: + namespace: crossplane-system + name: cluster-config + key: kubeconfig + identity: + type: UpboundTokens + source: Secret + secretRef: + name: upbound-credentials + namespace: crossplane-system + key: token +``` + +You can now create _Objects_ in the control plane which wrap Space APIs. + +[generate-a-kubeconfig]: /manuals/cli/concepts/contexts +[control-plane-groups]: /spaces/concepts/groups +[control-planes]: /spaces/concepts/control-planes +[upbound-iam-resources]: /manuals/platform/concepts/identity-management +[space-apis]: /reference/apis/spaces-api/v1_9 +[space-apis-1]: /reference/apis/spaces-api/v1_9 +[control-plane-groups-2]: /spaces/concepts/groups + + +[argo-cd]: https://argo-cd.readthedocs.io/en/stable/ +[my-account-api-tokens]: https://accounts.upbound.io/settings/tokens +[auto-respect-rbac-for-the-argo-cd-controller]: https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#auto-respect-rbac-for-controller +[spec-writeconnectionsecrettoref]: /reference/apis/spaces-api/latest +[auto-respect-rbac-for-the-argo-cd-controller-1]: https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#auto-respect-rbac-for-controller +[provider-upbound]: https://marketplace.upbound.io/providers/upbound/provider-upbound +[provider-kubernetes]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes +[provider-upbound-2]: https://marketplace.upbound.io/providers/upbound/provider-upbound +[robots]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/iam.upbound.io/Robot/v1alpha1 +[teams]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/iam.upbound.io/Team/v1alpha1 +[repositories]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/repository.upbound.io/Repository/v1alpha1 +[permissions]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/repository.upbound.io/Permission/v1alpha1 +[provider-kubernetes-3]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes +[object]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes/v0.17.0/resources/kubernetes.crossplane.io/Object/v1alpha2 +[watch]: https://kubernetes.io/docs/reference/using-api/api-concepts/#watch-bookmarks +[providerconfig]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes/v0.17.0/resources/kubernetes.crossplane.io/ProviderConfig/v1alpha1 +[personal-access-token]: https://accounts.upbound.io/settings/tokens diff --git a/spaces_versioned_docs/version-v1.15/howtos/control-plane-topologies.md b/spaces_versioned_docs/version-v1.15/howtos/control-plane-topologies.md new file mode 100644 index 000000000..9020e5a41 --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/control-plane-topologies.md @@ -0,0 +1,566 @@ +--- +title: Control Plane Topologies +sidebar_position: 15 +description: Configure scheduling of composites to remote control planes +--- + +:::info API Version Information +This guide is for the Control Plane Topology feature, which is in **private preview**. interested customers with access to this feature, it applies to v1.12+. + +For related API specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). details on feature availability, see the . +::: + +:::important +This feature is in private preview for select customers in Upbound Spaces. If you're interested in this deployment mode, please [contact us](https://www.upbound.io/support/contact). +::: + +Upbound's _Control Plane Topology_ feature lets you build and deploy a platform +of multiple control planes. These control planes work together for a unified platform +experience. + + +With the _Topology_ feature, you can install resource APIs that are +reconciled by other control planes and configure the routing that occurs between +control planes. You can also build compositions that reference other resources +running on your control plane or elsewhere in Upbound. + +This guide explains how to use Control Plane Topology APIs to install, configure +remote APIs, and build powerful compositions that reference other resources. + +## Benefits + +The Control Plane Topology feature provides the following benefits: + +* Decouple your platform architecture into independent offerings to improve your platform's software development lifecycle. +* Install composite APIs from Configurations as CRDs which are fulfilled and reconciled by other control planes. +* Route APIs to other control planes by configuring an _Environment_ resource, which define a set of routable dimensions. + +## How it works + + +Imagine the scenario where you want to let a user reference a subnet when creating a database instance. To your control plane, the `kind: database` and `kind: subnet` are independent resources. To you as the composition author, these resources have an important relationship. It may be that: + +- you don't want your user to ever be able to create a database without specifying a subnet. +- you want to let them create a subnet when they create the database, if it doesn't exist. +- you want to allow them to reuse a subnet that got created elsewhere or gets shared by another user. + +In each of these scenarios, you must resort to writing complex composition logic +to handle each case. The problem is compounded when the resource exists in a +context separate from the current control plane's context. Imagine a scenario +where one control plane manages Database resources and a second control plane +manages networking resources. With the _Topology_ feature, you can offload these +concerns to Upbound machinery. + + +![Control Plane Topology feature arch](/img/topology-arch.png) + +## Prerequisites + +Enable the Control Plane Topology feature in the Space you plan to run your control plane in: + +- Cloud Spaces: Not available yet +- Connected Spaces: Space administrator must enable this feature +- Disconnected Spaces: Space administrator must enable this feature + + + +## Compose resources with _ReferencedObjects_ + + + +_ReferencedObject_ is a resource type available in an Upbound control plane that lets you reference other Kubernetes resources in Upbound. + +:::tip +This feature is useful for composing resources that exist in a +remote context, like another control plane. You can also use +_ReferencedObjects_ to resolve references to any other Kubernetes object +in the current control plane context. This could be a secret, another Crossplane +resource, or more. +::: + +### Declare the resource reference in your XRD + +To compose a _ReferencedObject_, you should start by adding a resource reference +in your Composite Resource Definition (XRD). The convention for the resource +reference follows the shape shown below: + +```yaml +Ref: + type: object + properties: + apiVersion: + type: string + default: "" + enum: [ "" ] + kind: + type: string + default: "" + enum: [ "" ] + grants: + type: array + default: [ "Observe" ] + items: + type: string + enum: [ "Observe", "Create", "Update", "Delete", "*" ] + name: + type: string + namespace: + type: string + required: + - name +``` + +The `Ref` should be the kind of resource you want to reference. The `apiVersion` and `kind` should be the associated API version and kind of the resource you want to reference. + +The `name` and `namespace` strings are inputs that let your users specify the resource instance. + +#### Grants + +The `grants` field is a special array that lets you give users the power to influence the behavior of the referenced resource. You can configure which of the available grants you let your user select and which it defaults to. Similar in behavior as [Crossplane management policies][crossplane-management-policies], each grant value does the following: + +- **Observe:** The composite may observe the state of the referenced resource. +- **Create:** The composite may create the referenced resource if it doesn't exist. +- **Update:** The composite may update the referenced resource. +- **Delete:** The composite may delete the referenced resource. +- **\*:** The composite has full control over the referenced resource. + +Here are some examples that show how it looks in practice: + +
+ +Show example for defining the reference to another composite resource + +```yaml +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xsqlinstances.database.platform.upbound.io +spec: + type: object + properties: + parameters: + type: object + properties: + networkRef: + type: object + properties: + apiVersion: + type: string + default: "networking.platform.upbound.io" + enum: [ "networking.platform.upbound.io" ] + grants: + type: array + default: [ "Observe" ] + items: + type: string + enum: [ "Observe" ] + kind: + type: string + default: "Network" + enum: [ "Network" ] + name: + type: string + namespace: + type: string + required: + - name +``` + +
+ + +
+Show example for defining the reference to a secret +```yaml +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xsqlinstances.database.platform.upbound.io +spec: + type: object + properties: + parameters: + type: object + properties: + secretRef: + type: object + properties: + apiVersion: + type: string + default: "v1" + enum: [ "v1" ] + grants: + type: array + default: [ "Observe" ] + items: + type: string + enum: [ "Observe", "Create", "Update", "Delete", "*" ] + kind: + type: string + default: "Secret" + enum: [ "Secret" ] + name: + type: string + namespace: + type: string + required: + - name +``` +
+ +### Manually add the jsonPath + +:::important +This step is a known limitation of the preview. We're working on tooling that +removes the need for authors to do this step. +::: + +During the preview timeframe of this feature, you must add an annotation by hand +to the XRD. In your XRD's `metadata.annotations`, set the +`references.upbound.io/schema` annotation. It should be a JSON string in the +following format: + +```json +{ + "apiVersion": "references.upbound.io/v1alpha1", + "kind": "ReferenceSchema", + "references": [ + { + "jsonPath": ".spec.parameters.secretRef", + "kinds": [ + { + "apiVersion": "v1", + "kind": "Secret" + } + ] + } + ] +} +``` + +Flatten this JSON into a string and set the annotation on your XRD. View the +example below for an illustration: + +
+Show example setting the references.upbound.io/schema annotation +```yaml +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xthings.networking.acme.com + annotations: + references.upbound.io/schema: '{"apiVersion":"references.upbound.io/v1alpha1","kind":"ReferenceSchema","references":[{"jsonPath":".spec.secretRef","kinds":[{"apiVersion":"v1","kind":"Secret"}]},{"jsonPath":".spec.configMapRef","kinds":[{"apiVersion":"v1","kind":"ConfigMap"}]}]}' +``` +
+ +
+Show example for setting multiples references in the references.upbound.io/schema annotation +```yaml +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xthings.networking.acme.com + annotations: + references.upbound.io/schema: '{"apiVersion":"references.upbound.io/v1alpha1","kind":"ReferenceSchema","references":[{"jsonPath":".spec.parameters.secretRef","kinds":[{"apiVersion":"v1","kind":"Secret"}]},{"jsonPath":".spec.parameters.configMapRef","kinds":[{"apiVersion":"v1","kind":"ConfigMap"}]}]}' +``` +
+ + +You can use a VSCode extension like [vscode-pretty-json][vscode-pretty-json] to make this task easier. + + +### Compose a _ReferencedObject_ + +To pair with the resource reference declared in your XRD, you must compose the referenced resource. Use the _ReferencedObject_ resource type to bring the resource into your composition. _ReferencedObject_ has the following schema: + +```yaml +apiVersion: references.upbound.io/v1alpha1 +kind: ReferencedObject +spec: + managementPolicies: + - Observe + deletionPolicy: Orphan + composite: + apiVersion: + kind: + name: + jsonPath: .spec.parameters.secretRef +``` + +The `spec.composite.apiVersion` and `spec.composite.kind` should match the API version and kind of the `compositeTypeRef` declared in your composition. The `spec.composite.name` should be the name of the composite resource instance. + +The `spec.composite.jsonPath` should be the path to the root of the resource ref you declared in your XRD. + +
+Show example for composing a resource reference to a secret + +```yaml +apiVersion: apiextensions.crossplane.io/v1 +kind: Composition +metadata: + name: demo-composition +spec: + compositeTypeRef: + apiVersion: networking.acme.com/v1alpha1 + kind: XThing + mode: Pipeline + pipeline: + - step: patch-and-transform + functionRef: + name: crossplane-contrib-function-patch-and-transform + input: + apiVersion: pt.fn.crossplane.io/v1beta1 + kind: Resources + resources: + - name: secret-ref-object + base: + apiVersion: references.upbound.io/v1alpha1 + kind: ReferencedObject + spec: + managementPolicies: + - Observe + deletionPolicy: Orphan + composite: + apiVersion: networking.acme.com/v1alpha1 + kind: XThing + name: TO_BE_PATCHED + jsonPath: .spec.parameters.secretRef + patches: + - type: FromCompositeFieldPath + fromFieldPath: metadata.name + toFieldPath: spec.composite.name +``` +
+ +By declaring a resource reference in your XRD, Upbound handles resolution of the desired resource. + +## Deploy APIs + +To configure routing resource requests between control planes, you need to deploy APIs in at least two control planes. + +### Deploy into a service-level control plane + +Package the APIs you build into a Configuration package an deploy it on a +control plane in an Upbound Space. In Upbound, it's common to refer to the +control plane where the Configuration package is deployed as a **service-level +control plane**. This control plane runs the controllers that processes the API +requests and provisions underlying resources. In a later section, you learn how +you can use _Topology_ features to [configure routing][configure-routing]. + +### Deploy as Remote APIs on a platform control plane + +You should use the same package source as deployed in the **service-level +control planes**, but this time deploy the Configuration in a separate control +plane as a _RemoteConfiguration_. The _RemoteConfiguration_ installs Kubernetes +CustomResourceDefinitions for the APIs defined in the Configuration package, but +no controllers get deployed. + +### Install a _RemoteConfiguration_ + +_RemoteConfiguration_ is a resource type available in an Upbound manage control +planes that acts like a sort of Crossplane [Configuration][configuration] +package. Unlike standard Crossplane Configurations, which install XRDs, +compositions, and functions into a desired control plane, _RemoteConfigurations_ +install only the CRDs for claimable composite resource types. + +#### Install directly + +Install a _RemoteConfiguration_ by defining the following and applying it to +your control plane: + +```yaml +apiVersion: pkg.upbound.io/v1alpha1 +kind: RemoteConfiguration +metadata: + name: +spec: + package: +``` + +#### Declare as a project dependency + +You can declare _RemoteConfigurations_ as dependencies in your control plane's +[project file][project-file]. Use the up CLI to add the dependency, providing +the `--remote` flag: + +```tsx live +up dep add --remote +``` + +This command adds a declaration in the `spec.apiDependencies` stanza of your +project's `upbound.yaml` as demonstrated below: + +```yaml +apiVersion: meta.dev.upbound.io/v1alpha1 +kind: Project +metadata: + name: service-controlplane +spec: + apiDependencies: + - configuration: xpkg.upbound.io/upbound/remote-configuration + version: '>=v0.0.0' + dependsOn: + - provider: xpkg.upbound.io/upbound/provider-kubernetes + version: '>=v0.0.0' +``` + +Like a Configuration, a _RemoteConfigurationRevision_ gets created when the +package gets installed on a control plane. Unlike Configurations, XRDs and +compositions **don't** get installed by a _RemoteConfiguration_. Only the CRDs +for claimable composite types get installed and Crossplane thereafter manages +their lifecycle. You can tell when a CRD gets installed by a +_RemoteConfiguration_ because it has the `internal.scheduling.upbound.io/remote: +true` label: + +```yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: things.networking.acme.com + labels: + internal.scheduling.upbound.io/remote: "true" +``` + +## Use an _Environment_ to route resources + +_Environment_ is a resource type available in Upbound control planes that works +in tandem with resources installed by _RemoteConfigurations_. _Environment_ is a +namespace-scoped resource that lets you configure how to route remote resources +to other control planes by a set of user-defined dimensions. + +### Define a routing dimension + +To establish a routing dimensions between two control planes, you must do two +things: + +1. Annotate the service control plane with the name and value of a dimension. +2. Configure an environment on another control plane with a dimension matching the field and value of the service control plane. + +The example below demonstrates the creation of a service control plane with a +`region` dimension: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + labels: + dimension.scheduling.upbound.io/region: "us-east-1" + name: prod-1 + namespace: default +spec: +``` + +Upbound's Spaces controller keeps an inventory of all declared dimensions and +listens for control planes to route to them. + +### Create an _Environment_ + +Next, create an _Environment_ on a separate control plane, referencing the +dimension from before. The example below demonstrates routing all remote +resource requests in the `default` namespace of the control plane based on a +single `region` dimension: + +```yaml +apiVersion: scheduling.upbound.io/v1alpha1 +kind: Environment +metadata: + name: default + namespace: default +spec: + dimensions: + region: us-east-1 +``` + +You can specify whichever dimensions as you want. The example below demonstrates +multiple dimensions: + +```yaml +apiVersion: scheduling.upbound.io/v1alpha1 +kind: Environment +metadata: + name: default + namespace: default +spec: + dimensions: + region: us-east-1 + env: prod + offering: databases +``` + +In order for the routing controller to match, _all_ dimensions must match for a +given service control plane. + +You can specify dimension overrides on a per-resource group basis. This lets you +configure default routing rules for a given _Environment_ and override routing +on a per-offering basis. + +```yaml +apiVersion: scheduling.upbound.io/v1alpha1 +kind: Environment +metadata: + name: default + namespace: default +spec: + dimensions: + region: us-east-1 + resourceGroups: + - name: database.platform.upbound.io # database + dimensions: + region: "us-east-1" + env: "prod" + offering: "databases" + - name: networking.platform.upbound.io # networks + dimensions: + region: "us-east-1" + env: "prod" + offering: "networks" +``` + +### Confirm the configured route + +After you create an _Environment_ on a control plane, the routes selected get +reported in the _Environment's_ `.status.resourceGroups`. This is illustrated +below: + +```yaml +apiVersion: scheduling.upbound.io/v1alpha1 +kind: Environment +metadata: + name: default +... +status: + resourceGroups: + - name: database.platform.upbound.io # database + proposed: + controlPlane: ctp-1 + group: default + space: upbound-gcp-us-central1 + dimensions: + region: "us-east-1" + env: "prod" + offering: "databases" +``` + +If you don't see a response in the `.status.resourceGroups`, this indicates a +match wasn't found or an error establishing routing occurred. + +:::tip +There's no limit to the number of control planes you can route to. You can also +stack routing and form your own topology of control planes, with multiple layers +of routing. +::: + +### Limitations + + +Routing from one control plane to another is currently scoped to control planes +that exist in a single Space. You can't route resource requests to control +planes that exist on a cross-Space boundary. + + +[project-file]: /manuals/cli/howtos/project +[contact-us]: https://www.upbound.io/usage/support/contact +[crossplane-management-policies]: https://docs.crossplane.io/latest/managed-resources/managed-resources/#managementpolicies +[vscode-pretty-json]: https://marketplace.visualstudio.com/items?itemName=chrismeyers.vscode-pretty-json +[configure-routing]: #use-an-environment-to-route-resources +[configuration]: https://docs.crossplane.io/latest/packages/providers diff --git a/spaces_versioned_docs/version-v1.15/howtos/ctp-connector.md b/spaces_versioned_docs/version-v1.15/howtos/ctp-connector.md new file mode 100644 index 000000000..b2cc48c49 --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/ctp-connector.md @@ -0,0 +1,508 @@ +--- +title: Control Plane Connector +weight: 80 +description: A guide for how to connect a Kubernetes app cluster to a control plane in Upbound using the Control Plane connector feature +plan: "standard" +--- + + + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). Control Plane Connector functionality is consistent across versions. + +For related API specifications and resources, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +Control Plane Connector connects arbitrary Kubernetes application clusters outside the +Upbound Spaces to your control planes running in Upbound Spaces. +This lets you interact with your control plane's API from the app cluster. The claim APIs and the namespaced XR APIs +you define via CompositeResourceDefinitions (XRDs) in the control plane, are available in +your app cluster alongside Kubernetes workload APIs like Pod. Control Plane Connector +enables the same experience as a locally installed Crossplane. + +![control plane connector operations flow](/img/ConnectorFlow.png) + +### Control Plane Connector operations + +Control Plane Connector leverages the [Kubernetes API AggregationLayer][kubernetes-api-aggregationlayer] +to create an extension API server and serve the claim APIs and the namespaced XR APIs in the control plane. It +discovers the claim APIs and the namespaced XR APIs available in the control plane and registers corresponding +APIService resources on the app cluster. Those APIService resources refer to the +extension API server of Control Plane Connector. + +The claim APIs and the namespaced XR APIs are available in your Kubernetes cluster, just like all native +Kubernetes APIs. + +The Control Plane Connector processes every request targeting the claim APIs and the namespaced XR APIs and makes the +relevant requests to the connected control plane. + +Only the connected control plane stores and processes all claims and namespaced XRs created in the app +cluster, eliminating any storage use at the application cluster. The control plane +connector provisions a target namespace at the control plane for the app cluster and stores +all claims and namespaced XRs in this target namespace. + +For managing the claims and namespaced XRs, the Control Plane Connector creates a unique identifier for a +resource by combining input parameters from claims, including: +- `metadata.name` +- `metadata.namespace` +- `your cluster name` + + +It employs SHA-256 hashing to generate a hash value and then extracts the first +16 characters of that hash. This ensures the resulting identifier remains within +the 64-character limit in Kubernetes. + + + +For instance, if a claim named `my-bucket` exists in the test namespace in +`cluster-dev`, the system calculates the SHA-256 hash from +`my-bucket-x-test-x-00000000-0000-0000-0000-000000000000` and takes the first 16 +characters. The control plane side then names the claim `claim-c603e518969b413e`. + +For namespaced XRs, the process is similar, only the prefix is different. +The name becomes `nxr-c603e518969b413e`. + + +### Installation + + + + + +Log in with the up CLI: + +```bash +up login +``` + +Connect your app cluster to a namespace in an Upbound control plane with `up controlplane connector install `. This command creates a user token and installs the Control Plane Connector to your cluster. It's recommended you create a values file called `connector-values.yaml` and provide the following below. Select the tab according to which environment your control plane is running in. + + + + + + +```yaml +upbound: + # This is your org account in Upbound e.g. the name displayed after executing `up org list` + account: + # This is a personal access token generated in the Upbound Console + token: + +spaces: + # If your control plane is running in Upbound's GCP Cloud Space, else use upbound-aws-us-east-1.spaces.upbound.io + host: "upbound-gcp-us-west-1.spaces.upbound.io" + insecureSkipTLSVerify: true + controlPlane: + # The name of the control plane you want the Connector to attach to + name: + # The control plane group the control plane resides in + group: + # The namespace within the control plane to sync claims from the app cluster to. NOTE: This must be created before you install the connector. + claimNamespace: +``` + + + + + +1. Create a [kubeconfig][kubeconfig] for the control plane. Update your Upbound context to the path for your desired control plane. +```ini +up login +up ctx /upbound-gcp-us-central-1/default/your-control-plane +up ctx . -f - > context.yaml +``` + +2. Write it to a secret in the cluster where you plan to +install the Control Plane Connector to. +```ini +kubectl create secret generic my-controlplane-kubeconfig --from-file=context.yaml +``` + +3. Reference this secret in the +`spaces.controlPlane.kubeconfigSecret` field below. + +```yaml +spaces: + controlPlane: + # The namespace within the control plane to sync claims from the app cluster to. NOTE: This must be created before you install the connector. + claimNamespace: + kubeconfigSecret: + name: my-controlplane-kubeconfig + key: kubeconfig +``` + + + + + + +Provide the values file above when you run the CLI command: + + +```bash {copy-lines="3"} +up controlplane connector install my-control-plane my-app-ns-1 --file=connector-values.yaml +``` + +The Claim APIs and the namespaced XR APIs from your control plane are now visible in the cluster. +You can verify this with `kubectl api-resources`. + +```bash +kubectl api-resources +``` + +### Uninstall + +Disconnect an app cluster that you prior installed the Control Plane Connector on by +running the following: + +```bash +up ctp connector uninstall +``` + +This command uninstalls the helm chart for the Control Plane Connector from an app +cluster. It moves any claims in the app cluster into the control plane +at the specified namespace. + +:::tip +Make sure your kubeconfig's current context is pointed at the app cluster where +you want to uninstall Control Plane Connector from. +::: + + + + +It's recommended you create a values file called `connector-values.yaml` and +provide the following below. Select the tab according to which environment your +control plane is running in. + + + + + + +```yaml +upbound: + # This is your org account in Upbound e.g. the name displayed after executing `up org list` + account: + # This is a personal access token generated in the Upbound Console + token: + +spaces: + # Upbound GCP US-West-1 upbound-gcp-us-west-1.spaces.upbound.io + # Upbound AWS US-East-1 upbound-aws-us-east-1.spaces.upbound.io + # Upbound GCP US-Central-1 upbound-gcp-us-central-1.spaces.upbound.io + host: "" + insecureSkipTLSVerify: true + controlPlane: + # The name of the control plane you want the Connector to attach to + name: + # The control plane group the control plane resides in + group: + # The namespace within the control plane to sync claims from the app cluster to. + # NOTE: This must be created before you install the connector. + claimNamespace: +``` + + + + +Create a [kubeconfig][kubeconfig-1] for the +control plane. Write it to a secret in the cluster where you plan to +install the Control Plane Connector to. Reference this secret in the +`spaces.controlPlane.kubeconfigSecret` field below. + +```yaml +spaces: + controlPlane: + # The namespace within the control plane to sync claims from the app cluster to. NOTE: This must be created before you install the connector. + claimNamespace: + kubeconfigSecret: + name: my-controlplane-kubeconfig + key: kubeconfig +``` + + + + + + +Provide the values file above when you `helm install` the Control Plane Connector: + + +```bash +helm install --wait mcp-connector oci://xpkg.upbound.io/spaces-artifacts/mcp-connector -n kube-system -f connector-values.yaml +``` +:::tip +Create an API token from the Upbound user account settings page in the console by following [these instructions][these-instructions]. +::: + +### Uninstall + +You can uninstall Control Plane Connector with Helm by running the following: + +```bash +helm uninstall mcp-connector +``` + + + + + +### Example usage + +This example creates a control plane using [Configuration +EKS][configuration-eks]. `KubernetesCluster` is +available as a claim API in your control plane. The following is [an +example][an-example] +object you can create in your control plane. + +```yaml +apiVersion: k8s.starter.org/v1alpha1 +kind: KubernetesCluster +metadata: + name: my-cluster + namespace: default +spec: + id: my-cluster + parameters: + nodes: + count: 3 + size: small + services: + operators: + prometheus: + version: "34.5.1" + writeConnectionSecretToRef: + name: my-cluster-kubeconfig +``` + +After connecting your Kubernetes app cluster to the control plane, you +can create the `KubernetesCluster` object in your app cluster. Although your +local cluster has an Object, the actual resources is in your managed control +plane inside Upbound. + +```bash {copy-lines="3"} +# Applying the claim YAML above. +# kubectl is set up to talk with your Kubernetes cluster. +kubectl apply -f claim.yaml + + +kubectl get claim -A +NAME SYNCED READY CONNECTION-SECRET AGE +my-cluster True True my-cluster-kubeconfig 2m +``` + +Once Kubernetes creates the object, view the console to see your object. + +![Claim by connector in console](/img/ClaimInConsole.png) + +You can interact with the object through your cluster just as if it +lives in your cluster. + +### Migration to control planes + +This guide details the migration of a Crossplane installation to Upbound-managed +control planes using the Control Plane Connector to manage claims on an application +cluster. + +![migration flow application cluster to control plane](/img/ConnectorMigration.png) + +#### Export all resources + +Before proceeding, ensure that you have set the correct kubecontext for your application +cluster. + +```bash +up controlplane migration export --pause-before-export --output=my-export.tar.gz --yes +``` + +This command performs the following: +- Pauses all claim, composite, and managed resources before export. +- Scans the control plane for resource types. +- Exports Crossplane and native resources. +- Archives the exported state into `my-export.tar.gz`. + +Example output: +```bash +Exporting control plane state... + ✓ Pausing all claim resources before export... 1 resources paused! ⏸️ + ✓ Pausing all composite resources before export... 7 resources paused! ⏸️ + ✓ Pausing all managed resources before export... 34 resources paused! ⏸️ + ✓ Scanning control plane for types to export... 231 types found! 👀 + ✓ Exporting 231 Crossplane resources...125 resources exported! 📤 + ✓ Exporting 3 native resources...19 resources exported! 📤 ✓ Archiving exported state... archived to "my-export.tar.gz"! 📦 + +Successfully exported control plane state! +``` + +#### Import all resources + +The system restores the target control plane with the exported +resources, which serves as the destination for the Control Plane Connector. + + +Log into Upbound and select the correct context: + +```bash +up login +up ctx +up ctp create ctp-a +``` + +Output: +```bash +ctp-a created +``` + +Verify that the Crossplane version on both the application cluster and the new managed +control plane matches the core Crossplane version. + +Use the following command to import the resources: +```bash +up controlplane migration import -i my-export.tar.gz \ + --unpause-after-import \ + --mcp-connector-cluster-id=my-appcluster \ + --mcp-connector-claim-namespace=my-appcluster +``` + +This command: +- Note: `--mcp-connector-cluster-id` needs to be unique per application cluster +- Note: `--mcp-connector-claim-namespace` is the namespace the system creates + during the import +- Restores base resources +- Waits for XRDs and packages to establish +- Imports Claims, XRs resources +- Finalizes the import and resumes managed resources + +Example output: +```bash +Importing control plane state... + ✓ Reading state from the archive... Done! 👀 + ✓ Importing base resources... 56 resources imported!📥 + ✓ Waiting for XRDs... Established! ⏳ + ✓ Waiting for Packages... Installed and Healthy! ⏳ + ✓ Importing remaining resources... 88 resources imported! 📥 + ✓ Finalizing import... Done! 🎉 + ✓ Unpausing managed resources ... Done! ▶️ + +fully imported control plane state! +``` + +Verify Imported Claims + + +The Control Plane Connector renames all claims and adds additional labels to them. + +```bash +kubectl get claim -A +``` + +Example output: +```bash +NAMESPACE NAME SYNCED READY CONNECTION-SECRET AGE +my-appcluster cluster.aws.platformref.upbound.io/claim-e708ff592b974f51 True True platform-ref-aws-kubeconfig 3m17s +``` + +Inspect the labels: +```bash +kubectl get -n my-appcluster cluster.aws.platformref.upbound.io/claim-e708ff592b974f51 -o yaml | yq .metadata.labels +``` + +Example output: +```bash +mcp-connector.upbound.io/app-cluster: my-appcluster +mcp-connector.upbound.io/app-namespace: default +mcp-connector.upbound.io/app-resource-name: example +``` + +#### Cleanup the app cluster + +Remove all Crossplane-related resources from the application cluster, including: + +- Managed Resources +- Claims +- Compositions +- XRDs +- Packages (Functions, Configurations, Providers) +- Crossplane and all associated CRDs + + +#### Install Control Plane Connector + + +Follow the preceding installation guide and configure the `connector-values.yaml`: + +```yaml +# NOTE: clusterID needs to match --mcp-connector-cluster-id used in the import on the managed control Plane +clusterID: my-appcluster +upbound: + account: + token: + +spaces: + host: "" + insecureSkipTLSVerify: true + controlPlane: + name: + group: + # NOTE: This is the --mcp-connector-claim-namespace used during the import to the control plane + claimNamespace: +``` +Once the Control Plane Connector installs, verify that resources exist in the application +cluster: + +```bash +kubectl api-resources | grep platform +``` + +Example output: +```bash +awslbcontrollers aws.platform.upbound.io/v1alpha1 true AWSLBController +podidentities aws.platform.upbound.io/v1alpha1 true PodIdentity +sqlinstances aws.platform.upbound.io/v1alpha1 true SQLInstance +clusters aws.platformref.upbound.io/v1alpha1 true Cluster +osss observe.platform.upbound.io/v1alpha1 true Oss +apps platform.upbound.io/v1alpha1 true App +``` + +Restore claims from the control plane to the application cluster: + +```bash +kubectl get claim -A +``` + +Example output: +```bash +NAMESPACE NAME SYNCED READY CONNECTION-SECRET AGE +default cluster.aws.platformref.upbound.io/example True True platform-ref-aws-kubeconfig 127m +``` + +With this guide, you migrated your Crossplane installation to +Upbound-control planes. This ensures seamless integration with your +application cluster using the Control Plane Connector. + +### Connect multiple app clusters to a control plane + +Claims are store in a unique namespace in the Upbound control plane. +Every cluster creates a new control plane namespace. + +![Multi-cluster architecture with control plane connector](/img/ConnectorMulticlusterArch.png) + +There's no limit on the number of clusters connected to a single control plane. +Control plane operators can see all their infrastructure in a central control +plane. + +Without using control planes and Control Plane Connector, users have to install +Crossplane and providers for cluster. Each cluster requires configuration for +providers with necessary credentials. With a single control plane where multiple +clusters connected through Upbound tokens, you don't need to give out any cloud +credentials to the clusters. + + +[kubeconfig]: /manuals/cli/howtos/context-config/#generate-a-kubeconfig-for-a-control-plane-in-a-group +[kubeconfig-1]:/spaces/concepts/control-planes/#connect-directly-to-your-control-plane +[these-instructions]:/manuals/console/#create-a-personal-access-token +[kubernetes-api-aggregationlayer]: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/ +[configuration-eks]: https://github.com/upbound/configuration-eks +[an-example]: https://github.com/upbound/configuration-eks/blob/9f86b6d/.up/examples/cluster.yaml diff --git a/spaces_versioned_docs/version-v1.15/howtos/debugging-a-ctp.md b/spaces_versioned_docs/version-v1.15/howtos/debugging-a-ctp.md new file mode 100644 index 000000000..521271e40 --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/debugging-a-ctp.md @@ -0,0 +1,128 @@ +--- +title: Debugging issues on a control plane +sidebar_position: 70 +description: A guide for how to debug resources on a control plane running in Upbound. +--- + +This guide provides troubleshooting guidance for how to identify and fix issues on a control plane. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). Debugging methods are consistent across versions. + +For related control plane specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version-specific features, see the . +::: + +## Start from Upbound Console + + +The Upbound [Console][console] has a built-in control plane explorer experience +that surfaces status and events for the resources on your control plane. The +explorer is claim-based. Resources in this view exist only if they exist in the +reference chain originating from a claim. This view is a helpful starting point +if you are attempting to debug an issue originating from a claim. + +:::tip +If you directly create Crossplane Managed Resources (`MR`s) or Composite +Resources (`XR`s), they won't render in the explorer. +::: + +### Example + +The example below uses the control plane explorer view to inspect why a claim for an EKS Cluster isn't healthy. + +#### Check the health status of claims + +From the API type card, two claims branching from of it: one shows a healthy green icon, while the other shows an unhealthy red icon. + +![Use control plane explorer view to see status of claims](/img/debug-overview.png) + +Select `More details` on the unhealthy claim card and Upbound shows details for the claim. + +![Use control plane explorer view to see details of claims](/img/debug-claim-more-details.png) + +Looking at the three events for this claim: + +- **ConfigureCompositeResource**: this event indicates Upbound created the claimed Composite Resource (`XR`). + +- **BindCompositeResource**: this indicates the Composite Resource (`XR`) that's being "claimed" isn't ready yet. A claim doesn't show `HEALTHY` until the XR it references is ready. + +- **ConfigureCompositeResource**: the error saying, `cannot apply composite resource...the object has been modified; please apply your changes to the latest version and try again` is a generic event from Crossplane resources. It's safe to ignore this error. + +Next, look at the `status` field of the rendered YAML for the resource. + +![Use control plane explorer view to see status details of claims](/img/debug-claim-status.png) + +The status reports a similar message as the event stream: this claim is waiting for a Composite Resource to be ready. Based on this, investigate the Composite Resource referenced by this claim next. + +#### Check the health status of the Composite Resource + + +The control plane explorer only shows the claim cards by default. Selecting the claim card renders the rest of the Crossplane resource tree associated with the selected claim. + + +The previous claim expands into this screenshot: + +![Use control plane explorer view to expand tree of claim](/img/debug-claim-expansion.png) + +This renders the XR referenced by the claim (along with all its references). You can see the XR is showing the same unhealthy status icon in its card. Notice the XR has itself two nested XRs. One of the nested XRs shows a healthy green icon on its card, while the other shows an unhealthy red icon. Like the claim, a Composite Resource doesn't show healthy until all referenced resources also show healthy. + +#### Inspecting Managed Resources + +Select `more details` to inspect one of the unhealthy Managed Resources shows the following: + +![Use control plane explorer view to view events for an MR](/img/debug-mr-event.png) + +This event reveals it's unhealthy because it's waiting on a reference to another Managed Resource. Searching the rendered YAML of the MR for this resource shows the following: + +![Use control plane explorer view to view status for an MR](/img/debug-mr-status.png) + +The rendered YAML shows this MR is referencing a sibling MR that shares the same controller. The same parent XR created both of these managed resources. Inspect the sibling MR to see what its status is. + +![Use control plane explorer view to view status for a sibling MR](/img/debug-mr-dependency-status.png) + +The sibling MR event stream shows the Provider processed the resource create request. Ignore the `CannotInitalizeManagedResrouce` event. EKS clusters can take 15 minutes or more to provision in AWS. The root cause is everything is fine -- all the resources are still provisioning. Waiting longer and then looking at the control plane explorer again, shows all resources are healthy. reference, below is an example status field for a resource that's healthy and provisioned. + +```yaml +... +status: + atProvider: + id: team-b-app-cluster-bhwfb-hwtgs-20230403135452772300000008 + conditions: + - lastTransitionTime: '2023-04-03T13:56:35Z' + reason: Available + status: 'True' + type: Ready + - lastTransitionTime: '2023-04-03T13:54:02Z' + reason: ReconcileSuccess + status: 'True' + type: Synced + - lastTransitionTime: '2023-04-03T13:54:53Z' + reason: Success + status: 'True' + type: LastAsyncOperation + - lastTransitionTime: '2023-04-03T13:54:53Z' + reason: Finished + status: 'True' + type: AsyncOperation +``` + +### Control plane explorer limitations + +The control plane explorer view is currently designed around claims (`XC`s). The control plane explorer doesn't inspect other Crossplane resources. To inspect other Crossplane resources, use the `up` CLI. + +Some examples of Crossplane resources that require the `up` CLI + +- Managed Resources that aren't associated with a claim +- Composite Resources that aren't associated with a claim +- The status of _deleting_ resources +- ProviderConfigs +- Provider events + +## Use direct CLI access + +If your preference is to use a terminal instead of a GUI, Upbound supports direct access to the API server of the control plane. Use [`up ctx`][up-ctx] to connect directly to your control plane. + + +[console]: /manuals/console/upbound-console +[up-ctx]: /reference/cli-reference diff --git a/spaces_versioned_docs/version-v1.15/howtos/managed-service.md b/spaces_versioned_docs/version-v1.15/howtos/managed-service.md new file mode 100644 index 000000000..40b983a76 --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/managed-service.md @@ -0,0 +1,23 @@ +--- +title: Managed Upbound control planes +description: "Learn about the managed service capabilities of a Space" +sidebar_position: 10 +--- + +Control planes in Upbound are fully isolated [Upbound Crossplane][uxp] instances +that Upbound manages for you. This means: + +- the underlying lifecycle of infrastructure (compute, memory, and storage) required to power your instance. +- scaling of the infrastructure. +- the maintenance of the core Upbound Crossplane components that make up a control plane. + +This lets users focus on building their APIs and operating their control planes, +while Upbound handles the rest. Each control plane has its own dedicated API +server connecting users to their control plane. + +## Learn about Upbound control planes + +Read the [concept][ctp-concept] documentation to learn about Upbound control planes. + +[uxp]: /manuals/uxp/overview +[ctp-concept]: /spaces/concepts/control-planes \ No newline at end of file diff --git a/spaces_versioned_docs/version-v1.15/howtos/mcp-connector-guide.md b/spaces_versioned_docs/version-v1.15/howtos/mcp-connector-guide.md new file mode 100644 index 000000000..8a3866d07 --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/mcp-connector-guide.md @@ -0,0 +1,169 @@ +--- +title: Consume control plane APIs in an app cluster with control plane connector +sidebar_position: 99 +description: A tutorial to configure a Space with Argo to declaratively create and + manage control planes +--- + +In this tutorial, you learn how to configure a Kubernetes app cluster to communicate with a control plane in an Upbound self-hosted Space. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Control plane connector functionality is consistent across versions. + +For related API specifications and resources, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +The [control plane connector][control-plane-connector] bridges your Kubernetes application clusters---running outside of Upbound--to your control planes running in Upbound. This allows you to interact with your control plane's API right from the app cluster. The claim APIs you define via `CompositeResourceDefinitions` are available alongside Kubernetes workload APIs like `Pod`. In effect, control plane connector provides the same experience as a locally installed Crossplane. + +## Prerequisites + +To complete this tutorial, you need the following: + +- Have already deployed an Upbound Space. +- Have already deployed an Kubernetes cluster (referred to as `app cluster`). + +## Create a control plane + +Create a new control plane in your self-hosted Space. Run the following command in a terminal: + +```bash +up ctp create my-control-plane +``` + +Once the control plane is ready, connect to it. + +```bash +up ctp connect my-control-plane +``` + +For convenience, install a an Upbound [platform reference Configuration][platform-reference-configuration] from the marketplace. production scenarios, replace this with your own Crossplane Configurations or compositions. + +```bash +up ctp configuration install xpkg.upbound.io/upbound/platform-ref-aws:v1.4.0 +``` + +## Fetch the control plane's connection details + +Run the following command in a terminal: + +```shell +kubectl get secret kubeconfig-my-control-plane -n default -o jsonpath='{.data.kubeconfig}' | base64 -d > kubeconfig-my-control-plane.yaml +``` + +This command saves the kubeconfig for the control plane to a file in your working directory. + +## Install control plane connector in your app cluster + +Switch contexts to your Kubernetes app cluster. To install the control plane connector in your app cluster, you must first provide a secret containing your control plane's kubeconfig at install-time. Run the following command in a terminal: + +:::important +Make sure the following commands are executed against your **app cluster**, not your control plane. +::: + +```bash +kubectl create secret generic kubeconfig-my-control-plane -n kube-system --from-file=kubeconfig=./kubeconfig-my-control-plane.yaml +``` + +Set the environment variable below to configure which namespace _in your control plane_ you wish to sync the app cluster's claims to. + +```shell +export CONNECTOR_CTP_NAMESPACE=app-cluster-1 +``` + +Install the Control Plane Connector in the app cluster and point it to your control plane. + +```bash +up ctp connector install my-control-plane $CONNECTOR_CTP_NAMESPACE --control-plane-secret=kubeconfig-my-control-plane +``` + +## Inspect your app cluster + +After you install Control Plane Connector in the app cluster, you can now see APIs which live on the control plane. You can confirm this is the case by running the following command on your app cluster: + +```bash {copy-lines="1"} +kubectl api-resources | grep upbound + +# The output should look like this: +sqlinstances aws.platform.upbound.io/v1alpha1 true SQLInstance +clusters aws.platformref.upbound.io/v1alpha1 true Cluster +osss observe.platform.upbound.io/v1alpha1 true Oss +apps platform.upbound.io/v1alpha1 true App +``` + +## Claim a database instance on your app cluster + +Create a database claim against the `SQLInstance` API and observe resources get created by your control plane. Apply the following resources to your app cluster: + +```yaml +cat < --output + ``` + + The command exports your existing Crossplane control plane configuration/state into an archive file. + +::: note +By default, the export command doesn't make any changes to your existing Crossplane control plane state, leaving it intact. Use the `--pause-before-export` flag to pause the reconciliation on managed resources before exporting the archive file. + +This safety mechanism ensures the control plane you migrate state to doesn't assume ownership of resources before you're ready. +::: + +2. Use the control plane [create command][create-command] to create a managed +control plane in Upbound: + + ```bash + up controlplane create my-controlplane + ``` + +3. Use [`up ctx`][up-ctx] to connect to the control plane created in the previous step: + + ```bash + up ctx "///my-controlplane" + ``` + + The command configures your local `kubeconfig` to connect to the control plane. + +4. Run the following command to import the archive file into the control plane: + + ```bash + up controlplane migration import --input + ``` + +:::note +By default, the import command leaves the control plane in an inactive state by pausing the reconciliation on managed +resources. This pause gives you an opportunity to review the imported configuration/state before activating the control plane. +Use the `--unpause-after-import` flag to change the default behavior and activate the control plane immediately after +importing the archive file. +::: + + + +5. Review and validate the imported configuration/state. When you are ready, activate your managed + control plane by running the following command: + + ```bash + kubectl annotate managed --all crossplane.io/paused- + ``` + + At this point, you can delete the source Crossplane control plane. + +## CLI options + +### Filtering + +The migration tool captures the state of a Control Plane. The only filtering +supported is Kubernetes namespace and Kubernetes resource Type filtering. + +You can exclude namespaces using the `--exclude-namespaces` CLI option. This can prevent the CLI from including unwanted resources in the export. + +```bash +--exclude-namespaces=kube-system,kube-public,kube-node-lease,local-path-storage,... + +# A list of specific namespaces to exclude from the export. Defaults to 'kube-system', 'kube-public','kube-node-lease', and 'local-path-storage'. +``` + +You can exclude Kubernetes Resource types by using the `--exclude-resources` CLI option: + +```bash +--exclude-resources=EXCLUDE-RESOURCES,... + +# A list of resource types to exclude from the export in "resource.group" format. No resources are excluded by default. +``` + +For example, here's an example for excluding the CRDs installed by Crossplane functions (since they're not needed): + +```bash +up controlplane migration export \ + --exclude-resources=gotemplates.gotemplating.fn.crossplane.io,kclinputs.template.fn.crossplane.io +``` + +:::warning +You must specify resource names in lowercase "resource.group" format (for example, `gotemplates.gotemplating.fn.crossplane.io`). Using only the resource kind (for example, `GoTemplate`) isn't supported. +::: + + +:::tip Function Input CRDs + +Exclude function input CRDs (`inputs.template.fn.crossplane.io`, `resources.pt.fn.crossplane.io`, `gotemplates.gotemplating.fn.crossplane.io`, `kclinputs.template.fn.crossplane.io`) from migration exports. Upbound automatically recreates these resources during import. Function input CRDs typically have owner references to function packages and may have restricted RBAC access. Upbound installs these CRDs during the import when function packages are restored. + +::: + + +After export, users can also change the archive file to only include necessary resources. + +### Export non-Crossplane resources + +Use the `--include-extra-resources=` CLI option to select other CRD types to include in the export. + +### Set the kubecontext + +Currently `--context` isn't supported in the migration CLI. You should be able to use the `--kubeconfig` CLI option to use a file that's set to the correct context. example: + +```bash +up controlplane migration export --kubeconfig +``` + +Use this in tandem with `up ctx` to export a control plane's kubeconfig: + +```bash +up ctx --kubeconfig ~/.kube/config + +# To list the current contet +up ctx . --kubeconfig ~/.kube/config +``` + +## Export archive + +The migration CLI exports an archive upon successful completion. Below is an example export of a control plane that excludes several CRD types and skips the confirmation prompt. A file gets written to the working directory, unless you select another output file: + +
+ +View the example export + +```bash +$ up controlplane migration export --exclude-resources=gotemplates.gotemplating.fn.crossplane.io,kclinputs.template.fn.crossplane.io --yes +Exporting control plane state... +✓ Scanning control plane for types to export... 121 types found! 👀 +✓ Exporting 121 Crossplane resources...60 resources exported! 📤 +✓ Exporting 3 native resources...8 resources exported! 📤 +✓ Archiving exported state... archived to "xp-state.tar.gz"! 📦 +``` + +
+ + +When an export occurs, a file named `xp-state.tar.gz` by default gets created in the working directory. You can unzip the file and all the contents of the export are all text YAML files. + +- Each CRD (for example `vpcs.ec2.aws.upbound.io`) gets its own directory +which contains: + - A `metadata.yaml` file that contains Kubernetes Object Metadata + - A list of Kubernetes Categories the resource belongs to +- A `cluster` directory that contains YAML manifests for all resources provisioned +using the CRD. + +Sample contents for a Cluster with a single `XNetwork` Composite from +[configuration-aws-network][configuration-aws-network] is show below: + + +
+ +View the example cluster content + +```bash +├── compositionrevisions.apiextensions.crossplane.io +│ ├── cluster +│ │ ├── kcl.xnetworks.aws.platform.upbound.io-4ca6a8a.yaml +│ │ └── xnetworks.aws.platform.upbound.io-9859a34.yaml +│ └── metadata.yaml +├── configurations.pkg.crossplane.io +│ ├── cluster +│ │ └── configuration-aws-network.yaml +│ └── metadata.yaml +├── deploymentruntimeconfigs.pkg.crossplane.io +│ ├── cluster +│ │ └── default.yaml +│ └── metadata.yaml +├── export.yaml +├── functions.pkg.crossplane.io +│ ├── cluster +│ │ ├── crossplane-contrib-function-auto-ready.yaml +│ │ ├── crossplane-contrib-function-go-templating.yaml +│ │ └── crossplane-contrib-function-kcl.yaml +│ └── metadata.yaml +├── internetgateways.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-xgl4q.yaml +│ └── metadata.yaml +├── mainroutetableassociations.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-t2qh7.yaml +│ └── metadata.yaml +├── namespaces +│ └── cluster +│ ├── crossplane-system.yaml +│ ├── default.yaml +│ └── upbound-system.yaml +├── providerconfigs.aws.upbound.io +│ ├── cluster +│ │ └── default.yaml +│ └── metadata.yaml +├── providerconfigusages.aws.upbound.io +│ ├── cluster +│ │ ├── 0a2a3ec6-ef13-45f9-9cf0-63af7f4a6b6b.yaml +...redacted +│ │ └── f7092b0f-3a78-4bfe-82c8-57e5085a9b11.yaml +│ └── metadata.yaml +├── providers.pkg.crossplane.io +│ ├── cluster +│ │ ├── upbound-provider-aws-ec2.yaml +│ │ └── upbound-provider-family-aws.yaml +│ └── metadata.yaml +├── routes.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-dt9cj.yaml +│ └── metadata.yaml +├── routetableassociations.ec2.aws.upbound.io +│ ├── cluster +│ │ ├── borrelli-backup-test-mr2sd.yaml +│ │ ├── borrelli-backup-test-ngq5h.yaml +│ │ ├── borrelli-backup-test-nrkgg.yaml +│ │ └── borrelli-backup-test-wq752.yaml +│ └── metadata.yaml +├── routetables.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-dv4mb.yaml +│ └── metadata.yaml +├── secrets +│ └── namespaces +│ ├── crossplane-system +│ │ ├── cert-token-signing-gateway-pub.yaml +│ │ ├── mxp-hostcluster-certs.yaml +│ │ ├── package-pull-secret.yaml +│ │ └── xgql-tls.yaml +│ └── upbound-system +│ └── aws-creds.yaml +├── securitygrouprules.ec2.aws.upbound.io +│ ├── cluster +│ │ ├── borrelli-backup-test-472f4.yaml +│ │ └── borrelli-backup-test-qftmw.yaml +│ └── metadata.yaml +├── securitygroups.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-w5jch.yaml +│ └── metadata.yaml +├── storeconfigs.secrets.crossplane.io +│ ├── cluster +│ │ └── default.yaml +│ └── metadata.yaml +├── subnets.ec2.aws.upbound.io +│ ├── cluster +│ │ ├── borrelli-backup-test-8btj6.yaml +│ │ ├── borrelli-backup-test-gbmrm.yaml +│ │ ├── borrelli-backup-test-m7kh7.yaml +│ │ └── borrelli-backup-test-nttt5.yaml +│ └── metadata.yaml +├── vpcs.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-7hwgh.yaml +│ └── metadata.yaml +└── xnetworks.aws.platform.upbound.io +├── cluster +│ └── borrelli-backup-test.yaml +└── metadata.yaml +43 directories, 87 files +``` + +
+ + +The `export.yaml` file contains metadata about the export, including the configuration of the export, Crossplane information, and what's included in the export bundle. + +
+ +View the export + +```yaml +version: v1alpha1 +exportedAt: 2025-01-06T17:39:53.173222Z +options: + excludedNamespaces: + - kube-system + - kube-public + - kube-node-lease + - local-path-storage + includedResources: + - namespaces + - configmaps + - secrets + excludedResources: + - gotemplates.gotemplating.fn.crossplane.io + - kclinputs.template.fn.crossplane.io +crossplane: + distribution: universal-crossplane + namespace: crossplane-system + version: 1.17.3-up.1 + featureFlags: + - --enable-provider-identity + - --enable-environment-configs + - --enable-composition-functions + - --enable-usages +stats: + total: 68 + nativeResources: + configmaps: 0 + namespaces: 3 + secrets: 5 + customResources: + amicopies.ec2.aws.upbound.io: 0 + amilaunchpermissions.ec2.aws.upbound.io: 0 + amis.ec2.aws.upbound.io: 0 + availabilityzonegroups.ec2.aws.upbound.io: 0 + capacityreservations.ec2.aws.upbound.io: 0 + carriergateways.ec2.aws.upbound.io: 0 + compositeresourcedefinitions.apiextensions.crossplane.io: 0 + compositionrevisions.apiextensions.crossplane.io: 2 + compositions.apiextensions.crossplane.io: 0 + configurationrevisions.pkg.crossplane.io: 0 + configurations.pkg.crossplane.io: 1 +...redacted +``` + +
+ +### Skipped resources + +Along with to the resources excluded via CLI options, the following resources aren't +included in the backup: + +- The `kube-root-ca.crt` ConfigMap, since this is cluster-specific +- Resources directly managed via Helm (ArgoCD's helm implementation, which templates +Helm resources and then applies them, get included in the backup). The migration creates the exclusion list by looking for: + - Any Resource with the label `"app.kubernetes.io/managed-by" == "Helm"` + - Kubernetes Secrets with the label prefix `helm.sh/release`. example, `helm.sh/release.v1` +- Resources installed via a Crossplane package. These have an `ownerReference` with +a prefix `pkg.crossplane.io`. The expectation is that during import, the Crossplane Package Manager bears responsibility for installing the resources. +- Crossplane Locks: Any `Lock.pkg.crossplane.io` resource isn't included in the +export. + +## Restore + +The following is an example of a successful import run. At the end of the import, all Managed Resources are in a paused state. + +
+ +View the migration import + +```bash +$ up controlplane migration import +Importing control plane state... +✓ Reading state from the archive... Done! 👀 +✓ Importing base resources... 18 resources imported! 📥 +✓ Waiting for XRDs... Established! ⏳ +✓ Waiting for Packages... Installed and Healthy! ⏳ +✓ Importing remaining resources... 50 resources imported! 📥 +✓ Finalizing import... Done! 🎉 +``` + +
+ +Your scenario may involve migrating resources which already exist through other automation on the platform. When executing an import in these circumstances, the importer applies the new manifests to the cluster. If the resource already exists, the restore sets fields to what's in the backup. + +The importer restores all resources in the export archive. Managed Resources get imported with the `crossplane.io/paused: "true"` annotation set. Use the `--unpause-after-import` CLI argument to automatically un-pause resources that got +paused during backup, or remove the annotation manually. + +### Restore order + +The importer restores based on Kubernetes types. The restore order doesn't include parent/child relationships. + +Because Crossplane Composites create new Managed Resources if not present on the cluster, all +Claims, Composites and Managed Resources get imported in a paused state. You can un-pause them after the restore completes. + +The first step of import is installing Base Resources into the cluster. These resources (such has +packages and XRDs) must be ready before proceeding with the import. +Base Resources are: + +- Kubernetes Resources + - ConfigMaps + - Namespaces + - Secrets +- Crossplane Resources + - ControllerConfigs: `controllerconfigs.pkg.crossplane.io` + - DeploymentRuntimeConfigs: `deploymentruntimeconfigs.pkg.crossplane.io` + - StoreConfigs: `storeconfigs.secrets.crossplane.io` +- Crossplane Packages + - Providers: `providers.pkg.crossplane.io` + - Functions: `functions.pkg.crossplane.io` + - Configurations: `configurations.pkg.crossplane.io` + +Restore waits for the base resources to be `Ready` before moving on to the next step. Next, restore walks through the archive and restores all the manifests present. + +During import, the `crossplane.io/paused` annotation gets added to Managed Resources, Claims +and Composites. + +To manually un-pause managed resources after an import, remove the annotation by running: + +```bash +kubectl annotate managed --all crossplane.io/paused- +``` + +You can also run import again with the `--unpause-after-import` flag to remove the annotations. + +```bash +up controlplane migration import --unpause-after-import +``` + +### Restoring resource status + +The importer applies the status of all resources during import. The importer determines if the CRD version has a status field defined based on the stored CRD version. + + +[cli-command]: /reference/cli-reference +[up-cli]: /reference/cli-reference +[up-cli-1]: /manuals/cli/overview +[create-command]: /reference/cli-reference +[up-ctx]: /reference/cli-reference +[configuration-aws-network]: https://marketplace.upbound.io/configurations/upbound/configuration-aws-network diff --git a/spaces_versioned_docs/version-v1.15/howtos/observability.md b/spaces_versioned_docs/version-v1.15/howtos/observability.md new file mode 100644 index 000000000..8fc5c3278 --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/observability.md @@ -0,0 +1,395 @@ +--- +title: Observability +sidebar_position: 50 +description: A guide for how to use the integrated observability pipeline feature + in a Space. +plan: "enterprise" +--- + + + +This guide explains how to configure observability in Upbound Spaces. Upbound +provides integrated observability features built on +[OpenTelemetry][opentelemetry] to collect, process, and export logs, metrics, +and traces. + +Upbound Spaces offers two levels of observability: + +1. **Space-level observability** - Observes the cluster infrastructure where Spaces software is installed (Self-Hosted only) +2. **Control plane observability** - Observes workloads running within individual control planes + + + + + +:::info API Version Information & Version Selector +This guide applies to all supported versions (v1.9-v1.15+). Observability features evolved: + +- **v1.9-v1.12**: SharedTelemetryConfig available (alpha until v1.11) +- **v1.11+**: Observability promoted to stable with logs export support +- **v1.14+**: Both space-level and control-plane observability GA + +**View API Reference for Your Version**: +| Version | Status | Link | +|---------|--------|------| +| v1.15 | Latest | [Spaces API v1.15](../../../next/reference/apis/spaces-api/) | +| v1.14 | Supported | [Spaces API v1.14](../../../1.14/reference/apis/spaces-api/) | +| v1.13 | Supported | [Spaces API v1.13](../../../1.13/reference/apis/spaces-api/) | +| v1.12 | Supported | [Spaces API v1.12](../../../1.12/reference/apis/spaces-api/) | +| v1.11 | Supported | [Spaces API v1.11](../../../1.11/reference/apis/spaces-api/) | +| v1.10 | Limited | [Spaces API v1.10](../../../1.10/reference/apis/spaces-api/) | +| v1.9 | EOL | [Spaces API v1.9](../../../1.9/reference/apis/spaces-api/) | + +For version support policy and feature availability, see and . +::: + +:::important +**Space-level observability** (available since v1.6.0, GA in v1.14.0): +- Disabled by default +- Requires manual enablement and configuration +- Self-Hosted Spaces only + +**Control plane observability** (available since v1.13.0, GA in v1.14.0): +- Enabled by default +- No additional configuration required +::: + + + + +## Prerequisites + + +**Control plane observability** is enabled by default. No additional setup is +required. + + + +### Self-hosted Spaces + +1. **Enable the observability feature** when installing Spaces: + ```bash + up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "observability.enabled=true" + ``` + +Set `features.alpha.observability.enabled=true` instead if using Spaces version +before `v1.14.0`. + +2. **Install OpenTelemetry Operator** (required for Space-level observability): + ```bash + kubectl apply -f https://github.com/open-telemetry/opentelemetry-operator/releases/download/v0.116.0/opentelemetry-operator.yaml + ``` + + :::important + If running Spaces `v1.11` or later, use OpenTelemetry Operator `v0.110.0` or later due to breaking changes. + ::: + + +## Space-level Observability + +Space-level observability is only available for self-hosted Spaces and allows +administrators to observe the cluster infrastructure. + +### Configuration + +Configure Space-level observability using the `spacesCollector` value in your +Spaces Helm chart: + +```yaml +observability: + spacesCollector: + config: + exporters: + otlphttp: + endpoint: "" + headers: + api-key: YOUR_API_KEY + exportPipeline: + logs: + - otlphttp + metrics: + - otlphttp +``` + +This configuration exports metrics and logs from: + +- Crossplane installation +- Spaces infrastructure (controller, API, router, etc.) + +### Router metrics + +The Spaces router uses Envoy as a reverse proxy and automatically exposes +metrics when you enable Space-level observability. These metrics provide +visibility into: + +- Traffic routing to control planes and services +- Request status codes, timeouts, and retries +- Circuit breaker state preventing cascading failures +- Client connection patterns and request volume +- Request latency (P50, P95, P99) + +For more information about available metrics, example queries, and how to enable +this feature, see the [Space-level observability guide][space-level-o11y]. + +## Control plane observability + +Control plane observability collects telemetry data from workloads running +within individual control planes using `SharedTelemetryConfig` resources. + +The pipeline deploys [OpenTelemetry Collectors][opentelemetry-collectors] per +control plane, defined by a `SharedTelemetryConfig` at the group level. +Collectors pass data to external observability backends. + +:::important +From Spaces `v1.13` and beyond, telemetry only includes user-facing control +plane workloads (Crossplane, providers, functions). + +Self-hosted users can include system workloads (`api-server`, `etcd`) by setting +`observability.collectors.includeSystemTelemetry=true` in Helm. +::: + +:::important +Spaces validates `SharedTelemetryConfig` resources before applying them by +sending telemetry to configured exporters. self-hosted Spaces, ensure that +`spaces-controller` can reach the exporter endpoints. +::: + +### `SharedTelemetryConfig` + +`SharedTelemetryConfig` is a group-scoped custom resource that defines telemetry +configuration for control planes. + +#### New Relic example + +```yaml +apiVersion: observability.spaces.upbound.io/v1alpha1 +kind: SharedTelemetryConfig +metadata: + name: newrelic + namespace: default +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + org: foo + exporters: + otlphttp: + endpoint: https://otlp.nr-data.net + headers: + api-key: YOUR_API_KEY + exportPipeline: + metrics: [otlphttp] + traces: [otlphttp] + logs: [otlphttp] +``` + +#### Datadog Example + +```yaml +apiVersion: observability.spaces.upbound.io/v1alpha1 +kind: SharedTelemetryConfig +metadata: + name: datadog + namespace: default +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + org: foo + exporters: + datadog: + api: + site: ${DATADOG_SITE} + key: ${DATADOG_API_KEY} + exportPipeline: + metrics: [datadog] + traces: [datadog] + logs: [datadog] +``` + +### Control plane selection + +Use `spec.controlPlaneSelector` to specify which control planes should use the +telemetry configuration. + +#### Label-based selection + +```yaml +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +#### Expression-based selection + +```yaml +spec: + controlPlaneSelector: + labelSelectors: + - matchExpressions: + - { key: environment, operator: In, values: [production,staging] } +``` + +#### Name-based selection + +```yaml +spec: + controlPlaneSelector: + names: + - controlplane-dev + - controlplane-staging + - controlplane-prod +``` + +### Manage sensitive data + +:::important +Available from Spaces `v1.10` +::: + +Store sensitive data in Kubernetes secrets and reference them in your +`SharedTelemetryConfig`: + +1. **Create the secret:** + ```bash + kubectl create secret generic sensitive -n \ + --from-literal=apiKey='YOUR_API_KEY' + ``` + +2. **Reference in SharedTelemetryConfig:** + ```yaml + apiVersion: observability.spaces.upbound.io/v1alpha1 + kind: SharedTelemetryConfig + metadata: + name: newrelic + spec: + configPatchSecretRefs: + - name: sensitive + key: apiKey + path: exporters.otlphttp.headers.api-key + controlPlaneSelector: + labelSelectors: + - matchLabels: + org: foo + exporters: + otlphttp: + endpoint: https://otlp.nr-data.net + headers: + api-key: dummy # Replaced by secret value + exportPipeline: + metrics: [otlphttp] + traces: [otlphttp] + logs: [otlphttp] + ``` + +### Telemetry processing + +:::important +Available from Spaces `v1.11` +::: + +Configure processing pipelines to transform telemetry data using the [transform +processor][transform-processor]. + +#### Add labels to metrics + +```yaml +spec: + processors: + transform: + error_mode: ignore + metric_statements: + - context: datapoint + statements: + - set(attributes["newLabel"], "someLabel") + processorPipeline: + metrics: [transform] +``` + +#### Remove labels + +From metrics: +```yaml +processors: + transform: + metric_statements: + - context: datapoint + statements: + - delete_key(attributes, "kubernetes_namespace") +``` + +From logs: +```yaml +processors: + transform: + log_statements: + - context: log + statements: + - delete_key(attributes, "log.file.name") +``` + +#### Modify log messages + +```yaml +processors: + transform: + log_statements: + - context: log + statements: + - set(attributes["original"], body) + - set(body, Concat(["log message:", body], " ")) +``` + +### Monitor status + +Check the status of your `SharedTelemetryConfig`: + +```bash +kubectl get stc +NAME SELECTED FAILED PROVISIONED AGE +datadog 1 0 1 63s +``` + +- `SELECTED`: Number of control planes selected +- `FAILED`: Number of control planes that failed provisioning +- `PROVISIONED`: Number of successfully running collectors + +For detailed status information: + +```bash +kubectl describe stc +``` + +## Supported exporters + +Both Space-level and control plane observability support: +- `datadog` -. Datadog integration +- `otlphttp` - General-purpose exporter (used by New Relic, among others) +- `debug` -. troubleshooting + +## Considerations + +- **Control plane conflicts**: Each control plane can only use one `SharedTelemetryConfig`. Multiple configs selecting the same control plane conflict. +- **Custom collector image**: Both Space-level and control plane observability use the same custom OpenTelemetry Collector image with supported exporters. +- **Resource scope**: `SharedTelemetryConfig` resources are group-scoped, allowing different telemetry configurations per group. + +For more advanced configuration options, review the [Helm chart +reference][helm-chart-reference] and [OpenTelemetry Transformation Language +documentation][opentelemetry-transformation-language]. + + +[opentelemetry]: https://opentelemetry.io/ +[opentelemetry-collectors]: https://opentelemetry.io/docs/collector/ +[opentelemetry-collector-configuration]: https://opentelemetry.io/docs/collector/configuration/#exporters +[opentelemetry-operator]: https://opentelemetry.io/docs/kubernetes/operator/ +[transform-processor]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/transformprocessor/README.md +[opentelemetry-transformation-language]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl +[space-level-o11y]: /spaces/howtos/self-hosted/space-observability +[helm-chart-reference]: /reference/helm-reference +[opentelemetry-transformation-language-functions]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/ottlfuncs/README.md +[opentelemetry-transformation-language-contexts]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts +[guide-on-ottl]: https://betterstack.com/community/guides/observability/ottl/#a-brief-overview-of-the-ottl-grammar diff --git a/spaces_versioned_docs/version-v1.15/howtos/query-api.md b/spaces_versioned_docs/version-v1.15/howtos/query-api.md new file mode 100644 index 000000000..78163de2f --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/query-api.md @@ -0,0 +1,320 @@ +--- +title: Query API +sidebar_position: 40 +description: Use the `up` CLI to query objects and resources +--- + + + + +Upbound's Query API allows users to inspect objects and resources within their control planes. The read-only `up alpha query` and `up alpha get` CLI commands allow you to gather information on your control planes in a fast and efficient package. These commands follow the [`kubectl` conventions][kubectl-conventions] for filtering, sorting, and retrieving information from your Space. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). Query API is available in Cloud Spaces since v1.6 and Self-Hosted since v1.8. + +For detailed deployment procedures, see [Deploy Query API infrastructure - Self-Hosted](./self-hosted/query-api.md). version compatibility details, see the . +::: + + + +## Using the Query API + + +The Query API allows you to retrieve control plane information faster than traditional `kubectl` commands. This feature lets you debug your Crossplane resources with the CLI or within the Upbound Console's enhanced management views. + +### Query within a single control plane + +Use the `up alpha get` command to retrieve information about objects within the current control plane context. This command uses the **Query** endpoint and targets the current control plane. + +To switch between control plane groups, use the [`up ctx` ][up-ctx] and change to your desired context with an interactive prompt or specify with your control plane path: + +```shell +up ctx /// +``` + +You can query within a single control plane with the [`up alpha get` command][up-alpha-get-command] to return more information about a given object within the current kubeconfig context. + +The `up alpha get` command can query resource types and aliases to return objects in your control plane. + +```shell +up alpha get managed +NAME READY SYNCED AGE +custom-account1-5bv5j-sa True True 15m +custom-cluster1-bq6dk-net True True 15m +custom-account1-5bv5j-subnet True True 15m +custom-cluster1-bq6dk-nodepool True True 15m +custom-cluster1-bq6dk-cluster True True 15m +custom-account1-5bv5j-net True True 15m +custom-cluster1-bq6dk-subnet True True 15m +custom-cluster1-bq6dk-sa True True 15m +``` + +The [`-A` flag][a-flag] queries for objects across all namespaces. + +```shell +up alpha get configmaps -A +NAMESPACE NAME AGE +crossplane-system uxp-versions-config 18m +crossplane-system universal-crossplane-config 18m +crossplane-system kube-root-ca.crt 18m +upbound-system kube-root-ca.crt 18m +kube-system kube-root-ca.crt 18m +kube-system coredns 18m +default kube-root-ca.crt 18m +kube-node-lease kube-root-ca.crt 18m +kube-public kube-root-ca.crt 18m +kube-system kube-apiserver-legacy-service-account-token-tracking 18m +kube-system extension-apiserver-authentication 18m +``` + +To query for [multiple resource types][multiple-resource-types], you can add the name or alias for the resource as a comma separated string. + +```shell +up alpha get providers,providerrevisions + +NAME HEALTHY REVISION IMAGE STATE DEP-FOUND DEP-INSTALLED AGE +providerrevision.pkg.crossplane.io/crossplane-contrib-provider-nop-ecc25c121431 True 1 xpkg.upbound.io/crossplane-contrib/provider-nop:v0.2.1 Active 18m +NAME INSTALLED HEALTHY PACKAGE AGE +provider.pkg.crossplane.io/crossplane-contrib-provider-nop True True xpkg.upbound.io/crossplane-contrib/provider-nop:v0.2.1 18m +``` + +### Query multiple control planes + +The [`up alpha query` command][up-alpha-query-command] returns a list of objects of any kind within all the control planes in your Space. This command uses either the **SpaceQuery** or **GroupQuery** endpoints depending on your query scope. The `-A` flag switches the query context from the group level to the entire Space + +The `up alpha query` command accepts resources and aliases to return objects across your group or Space. + +```shell +up alpha query crossplane + +NAME ESTABLISHED OFFERED AGE +compositeresourcedefinition.apiextensions.crossplane.io/xnetworks.platform.acme.co True True 20m +compositeresourcedefinition.apiextensions.crossplane.io/xaccountscaffolds.platform.acme.co True True 20m + + +NAME XR-KIND XR-APIVERSION AGE +composition.apiextensions.crossplane.io/xaccountscaffolds.platform.acme.co XAccountScaffold platform.acme.co/v1alpha1 20m +composition.apiextensions.crossplane.io/xnetworks.platform.acme.co XNetwork platform.acme.co/v1alpha1 20m + + +NAME REVISION XR-KIND XR-APIVERSION AGE +compositionrevision.apiextensions.crossplane.io/xaccountscaffolds.platform.acme.co-5ae9da5 1 XAccountScaffold platform.acme.co/v1alpha1 20m +compositionrevision.apiextensions.crossplane.io/xnetworks.platform.acme.co-414ce80 1 XNetwork platform.acme.co/v1alpha1 20m + +NAME READY SYNCED AGE +nopresource.nop.crossplane.io/custom-cluster1-bq6dk-subnet True True 19m +nopresource.nop.crossplane.io/custom-account1-5bv5j-net True True 19m + +## Output truncated... + +``` + + +The [`--sort-by` flag][sort-by-flag] allows you to return information to your specifications. You can construct your sort order in a JSONPath expression string or integer. + + +```shell +up alpha query crossplane -A --sort-by="{.metadata.name}" + +CONTROLPLANE NAME AGE +default/test deploymentruntimeconfig.pkg.crossplane.io/default 10m + +CONTROLPLANE NAME AGE TYPE DEFAULT-SCOPE +default/test storeconfig.secrets.crossplane.io/default 10m Kubernetes crossplane-system +``` + +To query for multiple resource types, you can add the name or alias for the resource as a comma separated string. + +```shell +up alpha query namespaces,configmaps -A + +CONTROLPLANE NAME AGE +default/test namespace/upbound-system 15m +default/test namespace/crossplane-system 15m +default/test namespace/kube-system 16m +default/test namespace/default 16m + +CONTROLPLANE NAMESPACE NAME AGE +default/test crossplane-system configmap/uxp-versions-config 15m +default/test crossplane-system configmap/universal-crossplane-config 15m +default/test crossplane-system configmap/kube-root-ca.crt 15m +default/test upbound-system configmap/kube-root-ca.crt 15m +default/test kube-system configmap/coredns 16m +default/test default configmap/kube-root-ca.crt 16m + +## Output truncated... + +``` + +The Query API also allows you to return resource types with specific [label columns][label-columns]. + +```shell +up alpha query composite -A --label-columns=crossplane.io/claim-namespace + +CONTROLPLANE NAME SYNCED READY COMPOSITION AGE CLAIM-NAMESPACE +query-api-test/test xeks.argo.discover.upbound.io/test-k7xbk False xeks.argo.discover.upbound.io 51d default + +CONTROLPLANE NAME EXTERNALDNS SYNCED READY COMPOSITION AGE CLAIM-NAMESPACE +spaces-clusters/controlplane-query-api-test-spaces-playground xexternaldns.externaldns.platform.upbound.io/spaces-cluster-0-xd8v2-lhnl7 6.34.2 True True xexternaldns.externaldns.platform.upbound.io 19d default +default/query-api-test xexternaldns.externaldns.platform.upbound.io/space-awg-kine-f7dxq-nkk2q 6.34.2 True True xexternaldns.externaldns.platform.upbound.io 55d default + +## Output truncated... + +``` + +### Query API request format + +The CLI can also return a version of your query request with the [`--debug` flag][debug-flag]. This flag returns the API spec request for your query. + +```shell +up alpha query composite -A -d + +apiVersion: query.spaces.upbound.io/v1alpha1 +kind: SpaceQuery +metadata: + creationTimestamp: null +spec: + cursor: true + filter: + categories: + - composite + controlPlane: {} + limit: 500 + objects: + controlPlane: true + table: {} + page: {} +``` + +For more complex queries, you can interact with the Query API like a Kubernetes-style API by creating a query and applying it with `kubectl`. + +The example below is a query for `claim` resources in every control plane from oldest to newest and returns specific information about those claims. + + +```yaml +apiVersion: query.spaces.upbound.io/v1alpha1 +kind: SpaceQuery +spec: + filter: + categories: + - claim + order: + - creationTimestamp: Asc + cursor: true + count: true + objects: + id: true + controlPlane: true + object: + kind: true + apiVersion: true + metadata: + name: true + uid: true + spec: + containers: + image: true +``` + + +The Query API is served by the Spaces API endpoint. You can use `up ctx` to +switch the kubectl context to the Spaces API ingress. After that, you can use +`kubectl create` and receive the `response` for your query parameters. + + +```shell +kubectl create -f spaces-query.yaml -o yaml +``` + +Your `response` should look similar to this example: + +```yaml {copy-lines="none"} +apiVersion: query.spaces.upbound.io/v1alpha1 +kind: SpaceQuery +metadata: + creationTimestamp: "2024-08-08T14:41:46Z" + name: default +response: + count: 3 + cursor: + next: "" + page: 0 + pageSize: 100 + position: 0 + objects: + - controlPlane: + name: query-api-test + namespace: default + id: default/query-api-test/823b2781-7e70-4d91-a6f0-ee8f455d67dc + object: + apiVersion: spaces.platform.upbound.io/v1alpha1 + kind: Space + metadata: + name: space-awg-kine + resourceVersion: "803868" + uid: 823b2781-7e70-4d91-a6f0-ee8f455d67dc + spec: {} + - controlPlane: + name: test-1 + namespace: test + id: test/test-1/08a573dd-851a-42cc-a600-b6f6ed37ee8d + object: + apiVersion: argo.discover.upbound.io/v1alpha1 + kind: EKS + metadata: + name: test-1 + resourceVersion: "4270320" + uid: 08a573dd-851a-42cc-a600-b6f6ed37ee8d + spec: {} + - controlPlane: + name: controlplane-query-api-test-spaces-playground + namespace: spaces-clusters + id: spaces-clusters/controlplane-query-api-test-spaces-playground/b5a6770f-1f85-4d09-8990-997c84bd4159 + object: + apiVersion: spaces.platform.upbound.io/v1alpha1 + kind: Space + metadata: + name: spaces-cluster-0 + resourceVersion: "1408337" + uid: b5a6770f-1f85-4d09-8990-997c84bd4159 + spec: {} +``` + + +## Query API Explorer + + + +import CrdDocViewer from '@site/src/components/CrdViewer'; + +### Query + +The Query resource allows you to query objects in a single control plane. + + + +### GroupQuery + +The GroupQuery resource allows you to query objects across a group of control planes. + + + +### SpaceQuery + +The SpaceQuery resource allows you to query objects across all control planes in a space. + + + + + + +[documentation]: /spaces/howtos/self-hosted/query-api +[up-ctx]: /reference/cli-reference +[up-alpha-get-command]: /reference/cli-reference +[a-flag]: /reference/cli-reference +[multiple-resource-types]: /reference/cli-reference +[up-alpha-query-command]: /reference/cli-reference +[sort-by-flag]: /reference/cli-reference +[label-columns]: /reference/cli-reference +[debug-flag]: /reference/cli-reference +[kubectl-conventions]: https://kubernetes.io/docs/reference/kubectl/generated/kubectl_get/ diff --git a/spaces_versioned_docs/version-v1.15/howtos/secrets-management.md b/spaces_versioned_docs/version-v1.15/howtos/secrets-management.md new file mode 100644 index 000000000..88e730ae5 --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/secrets-management.md @@ -0,0 +1,719 @@ +--- +title: Secrets Management +sidebar_position: 20 +description: A guide for how to configure synchronizing external secrets into control + planes in a Space. +--- + +Upbound's _Shared Secrets_ is a built in secrets management feature that +provides an integrated way to manage secrets across your platform. It allows you +to store sensitive data like passwords and certificates for your managed control +planes as secrets in an external secret store. + +This feature is a wrapper around the [External Secrets Operator (ESO)][external-secrets-operator-eso] that pulls secrets from external vaults and distributes them across your platform. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). The SharedSecretStore and SharedExternalSecret CRDs have been stable since v1.9. + +For API specifications and configuration details, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +## Benefits + +The Shared Secrets feature allows you to: + +* Access secrets from a variety of external secret stores without operation overhead +* Configure synchronization for multiple control planes in a group +* Store and manage all your secrets centrally +* Use Shared Secrets across all Upbound environments(Cloud and Disconnected Spaces) +* Synchronize secrets across groups of control planes while maintaining clear security boundaries +* Manage secrets at scale programmatically while ensuring proper isolation and access control + +## Understanding the Architecture + +The Shared Secrets feature uses a hierarchical approach to centrally manage +secrets and effectively control their distribution. + +![Shared Secrets workflow diagram](/img/shared-secrets-workflow.png) + +1. The flow begins at the group level, where you define your secret sources and distribution rules +2. These rules automatically create corresponding resources in your control planes +3. In each control plane, specific namespaces receive the secrets +4. Changes at the group level automatically propagate through this chain + +## Component configuration + +Upbound Shared Secrets consists of two components: + +1. **SharedSecretStore**: Defines connections to external secret providers +2. **SharedExternalSecret**: Specifies which secrets to synchronize and where + + +### Connect to an External Vault + + +The `SharedSecretStore` component is the connection point to your external +secret vaults. It provisions ClusterSecretStore resources into control planes +within the group. + + +#### AWS Secrets Manager + + + +In this example, you'll create a `SharedSecretStore` to connect to AWS +Secrets Manager in `us-west-2`. Then apply access to all control planes labeled with +`environment: production`, and make these secrets available in the `default` and +`crossplane-system` namespaces. + + +You can configure access to AWS Secrets Manager using static credentials or +workload identity. + +:::important +While the underlying ESO API supports more auth methods, static credentials are currently the only supported auth method in Cloud Spaces. +::: + +##### Static credentials + +1. Use the AWS CLI to create access credentials. + + +2. Create your access credentials. +```ini +# Create a text file with AWS credentials +cat > aws-credentials.txt << EOF +[default] +aws_access_key_id = +aws_secret_access_key = +EOF +``` + +3. Next,store the access credentials in a secret in the namespace you want to have access to the `SharedSecretStore`. +```shell +kubectl create secret \ + generic aws-credentials \ + -n default \ + --from-file=creds=./aws-credentials.txt +``` + +4. Create a `SharedSecretStore` custom resource file called `secretstore.yaml`. + Paste the following configuration: +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: aws-secrets +spec: + # Define which control planes should receive this configuration + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production + + # Define which namespaces within those control planes can access secrets + namespaceSelector: + names: + - default + - crossplane-system + + # Configure the connection to AWS Secrets Manager + provider: + aws: + service: SecretsManager + region: us-west-2 + auth: + secretRef: + accessKeyIDSecretRef: + name: aws-credentials + key: access-key-id + secretAccessKeySecretRef: + name: aws-credentials + key: secret-access-key +``` + + + +##### Workload Identity with IRSA + + + +You can also use AWS IAM Roles for Service Accounts (IRSA) depending on your +organizations needs: + +1. Ensure you have deployed the Spaces software into an IRSA-enabled EKS cluster. +2. Follow the AWS instructions to create an IAM OIDC provider with your EKS OIDC + provider URL. +3. Determine the Spaces-generated `controlPlaneID` of your control plane: +```shell +kubectl get controlplane -o jsonpath='{.status.controlPlaneID}' +``` + +4. Create an IAM trust policy in your AWS account to match the control plane. +```yaml +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam:::oidc-provider/" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + ":aud": "sts.amazonaws.com", + ":sub": [ +"system:serviceaccount:mxp--system:external-secrets-controller"] + } + } + } + ] +} +``` + +5. Update your Spaces deployment to annotate the SharedSecrets service account + with the role ARN. +```shell +up space upgrade ... \ + --set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."eks\.amazonaws\.com/role-arn"="" +``` + +6. Create a SharedSecretStore and reference the SharedSecrets service account: +```ini {copy-lines="all"} +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: aws-sm + namespace: default +spec: + provider: + aws: + service: SecretsManager + region: + auth: + jwt: + serviceAccountRef: + name: external-secrets-controller + controlPlaneSelector: + names: + - + namespaceSelector: + names: + - default +``` + +When you create a `SharedSecretStore` the underlying mechanism: + +1. Applies at the group level +2. Determines which control planes should receive this configuration by the `controlPlaneSelector` +3. Automatically creates a ClusterSecretStore inside each identified control plane +4. Maintains a connection in each control plane with the ClusterSecretStore + credentials and configuration from the parent SharedSecretStore + +Upbound automatically generates a ClusterSecretStore in each matching control +plane when you create a SharedSecretStore. + +```yaml {copy-lines="none"} +# Automatically created in each matching control plane +apiVersion: external-secrets.io/v1beta1 +kind: ClusterSecretStore +metadata: + name: aws-secrets # Name matches the parent SharedSecretStore +spec: + provider: + upboundspaces: + storeRef: + name: aws-secret +``` + +When you create the SharedSecretStore controller, it replaces the provider with +a special provider called `upboundspaces`. This provider references the +SharedSecretStore object in the Spaces API. This avoids copying the actual cloud +credentials from Spaces to each control plane. + +This workflow allows you to configure the store connection only once at the +group level and automatically propagates to each control plane. Individual control +planes can use the store without exposure to the group-level configuration and +updates all child ClusterSecretStores when updated. + + +#### Azure Key Vault + + +:::important +While the underlying ESO API supports more auth methods, static credentials are currently the only supported auth method in Cloud Spaces. +::: + +##### Static credentials + +1. Use the Azure CLI to create a service principal and authentication file. +2. Create a service principal and save credentials in a file: +```json +{ + "appId": "myAppId", + "displayName": "myServicePrincipalName", + "password": "myServicePrincipalPassword", + "tenant": "myTentantId" +} +``` + +3. Store the credentials as a Kubernetes secret: +```shell +kubectl create secret \ + generic azure-secret-sp \ + -n default \ + --from-file=creds=./azure-credentials.json +``` + +4. Create a SharedSecretStore referencing these credentials: +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: azure-kv +spec: + provider: + azurekv: + tenantId: "" + vaultUrl: "" + authSecretRef: + clientId: + name: azure-secret-sp + key: ClientID + clientSecret: + name: azure-secret-sp + key: ClientSecret + controlPlaneSelector: + names: + - + namespaceSelector: + names: + - default +``` + +##### Workload Identity + + +You can also use Entra Workload Identity Federation to access Azure Key Vault +without needing to manage secrets. + +To use Entra Workload ID with AKS: + + +1. Deploy the Spaces software into a [workload identity-enabled AKS cluster][workload-identity-enabled-aks-cluster]. +2. Retrieve the OIDC issuer URL of the AKS cluster: +```ini +az aks show --name "" \ + --resource-group "" \ + --query "oidcIssuerProfile.issuerUrl" \ + --output tsv +``` + +3. Use the Azure CLI to make a managed identity: +```ini +az identity create \ + --name "" \ + --resource-group "" \ + --location "" \ + --subscription "" +``` + +4. Look up the managed identity's client ID: +```ini +az identity show \ + --resource-group "" \ + --name "" \ + --query 'clientId' \ + --output tsv +``` + +5. Update your Spaces deployment to annotate the SharedSecrets service account with the associated Entra application client ID from the previous step: +```ini +up space upgrade ... \ + --set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."azure\.workload\.identity/client-id"="" \ + --set-string controlPlanes.sharedSecrets.pod.customLabels."azure\.workload\.identity/use"="true" +``` + +6. Determine the Spaces-generated `controlPlaneID` of your control plane. When you deploy a `kind: controlplane` in a Space, the Spaces software deploys a set of pods in a new namespace following the format `mxp--system`. +```ini +kubectl get controlplane -o jsonpath='{.status.controlPlaneID}' +``` + +7. Create a federated identity credential. +```ini +FEDERATED_IDENTITY_CREDENTIAL_NAME= +USER_ASSIGNED_IDENTITY_NAME= +RESOURCE_GROUP= +AKS_OIDC_ISSUER= +CONTROLPLANE_ID= +az identity federated-credential create --name ${FEDERATED_IDENTITY_CREDENTIAL_NAME} --identity-name "${USER_ASSIGNED_IDENTITY_NAME}" --resource-group "${RESOURCE_GROUP}" --issuer "${AKS_OIDC_ISSUER}" --subject system:serviceaccount:"mxp-${CONTROLPLANE_ID}-system:external-secrets-controller" --audience api://AzureADTokenExchange +``` + +8. Assign the `Key Vault Secrets User` role to the user-assigned managed identity that you created earlier. This step gives the managed identity permission to read secrets from the key vault: +```ini +az role assignment create \ + --assignee-object-id "${IDENTITY_PRINCIPAL_ID}" \ + --role "Key Vault Secrets User" \ + --scope "${KEYVAULT_RESOURCE_ID}" \ + --assignee-principal-type ServicePrincipal +``` + +:::important +You must manually restart a workload's pod when you add the annotation to the running pod's service account. The Entra workload identity mutating admission webhook requires a restart to inject the necessary environment. +::: + +8. Create a `SharedSecretStore`. Replace `vaultURL` with the URL of your Azure Key Vault instance. Replace `identityId` with the client ID of the managed identity created earlier: +```yaml {copy-lines="all"} +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: azure-kv +spec: + provider: + azurekv: + authType: WorkloadIdentity + vaultUrl: "" + controlPlaneSelector: + names: + - + namespaceSelector: + names: + - default +``` + + + + +#### Google Cloud Secret Manager + + + +You can configure access to Google Cloud Secret Manager using static credentials or workload identity. Below are instructions for configuring either. See the [ESO provider API][eso-provider-api] for more information. + +:::important +While the underlying ESO API supports more auth methods, static credentials are currently the only supported auth method in Cloud Spaces. +::: + +##### Static credentials + +1. Use the [GCP CLI][gcp-cli] to create access credentials. +2. Save the output in a file called `gcp-credentials.json`. +3. Store the access credentials in a secret in the same namespace as the `SharedSecretStore`. + ```shell {label="kube-create-secret",copy-lines="all"} + kubectl create secret \ + generic gcpsm-secret \ + -n default \ + --from-file=creds=./gcp-credentials.json + ``` + +4. Create a `SharedSecretStore`, referencing the secret created earlier. Replace `projectID` with your GCP Project ID: +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: gcp-sm +spec: + provider: + gcpsm: + auth: + secretRef: + secretAccessKeySecretRef: + name: gcpsm-secret + key: creds + projectID: + controlPlaneSelector: + names: + - + namespaceSelector: + names: + - default +``` + +:::tip +The example above maps a Shared Secret Store into a single namespace of a single control plane. Read [control plane selection][control-plane-selection] and [namespace selection][namespace-selection] to learn how to map into one or more namespaces of one or more control planes. +::: + + +##### Workload identity with Service Accounts to IAM Roles + + +To configure, grant the `roles/iam.workloadIdentityUser` role to the Kubernetes +service account in the control plane namespace to impersonate the IAM service +account. + +1. Ensure you've deployed Spaces on a [Workload Identity Federation-enabled][workload-identity-federation-enabled] GKE cluster. +2. Determine the Spaces-generated `controlPlaneID` of your control plane. When you deploy a `kind: controlplane` in a Space, the Spaces software deploys a set of pods in a new namespace following the format `mxp--system`. +```ini +kubectl get controlplane -o jsonpath='{.status.controlPlaneID}' +``` + +3. Create a GCP IAM service account with the [GCP CLI][gcp-cli-1]: +```ini +gcloud iam service-accounts create \ + --project= +``` + +4. Grant the IAM service account the role to access GCP Secret Manager: +```ini +SA_NAME= +IAM_SA_PROJECT_ID= +gcloud projects add-iam-policy-binding IAM_SA_PROJECT_ID \ + --member "serviceAccount:SA_NAME@IAM_SA_PROJECT_ID.iam.gserviceaccount.com" \ + --role roles/secretmanager.secretAccessor +``` + +5. When you enable the Shared Secrets feature, a service account gets created in each control plane for the External Secrets Operator. Apply a [GCP IAM policy binding][gcp-iam-policy-binding] to associate this service account with the desired GCP IAM role. +```ini +PROJECT_ID= +PROJECT_NUMBER= +CONTROLPLANE_ID= +gcloud projects add-iam-policy-binding projects/${PROJECT_ID} \ + --role "roles/iam.workloadIdentityUser" \ + --member=principal://iam.googleapis.com/projects/${PROJECT_NUMBER}/locations/global/workloadIdentityPools/${PROJECT_ID}.svc.id.goog/subject/ns/mxp-${CONTROLPLANE_ID}-system/sa/external-secrets-controller +``` + +6. Update your Spaces deployment to annotate the SharedSecrets service account with GCP IAM service account's identifier: +```ini +up space upgrade ... \ + --set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."iam\.gke\.io/gcp-service-account"="" +``` + +7. Create a `SharedSecretStore`. Replace `projectID` with your GCP Project ID: +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: gcp-sm +spec: + provider: + gcpsm: + projectID: + controlPlaneSelector: + names: + - + namespaceSelector: + names: + - default +``` + +:::tip +The example above maps a Shared Secret Store into a single namespace of a single control plane. Read [control plane selection][control-plane-selection-1] and [namespace selection][namespace-selection-2] to learn how to map into one or more namespaces of one or more control planes. +::: + +### Manage your secret distribution + +After you create your SharedSecretStore, you can define which secrets to +distribute using SharedExternalSecret: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedExternalSecret +metadata: + name: database-credentials + namespace: default +spec: + # Select the same control planes as your SharedSecretStore + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production + + externalSecretSpec: + refreshInterval: 1h + secretStoreRef: + name: aws-secrets # References the SharedSecretStore name + kind: ClusterSecretStore + target: + name: db-credentials + data: + - secretKey: username + remoteRef: + key: prod/database/credentials + property: username + - secretKey: password + remoteRef: + key: prod/database/credentials + property: password +``` + +This configuration: + +* Pulls database credentials from your external secret provider +* Creates secrets in all production control planes +* Refreshes the secrets every hour +* Creates a secret called `db-credentials` in each control plane + +When you create a SharedExternalSecret at the group level, Upbound's system +creates a template for the corresponding ClusterExternalSecrets in each selected +control plane. + +The example below simulates the ClusterExternalSecret that Upbound creates: + +```yaml +# Inside each matching control plane: +apiVersion: external-secrets.io/v1beta1 +kind: ClusterExternalSecret +metadata: + name: database-credentials +spec: + refreshInterval: 1h + secretStoreRef: + name: aws-secrets + kind: ClusterSecretStore + data: + - secretKey: username + remoteRef: + key: prod/database/credentials + property: username +``` + +The hierarchy in this configuration is: + +1. SharedExternalSecret (group level) defines what secrets to distribute +2. ClusterExternalSecret (control plane level) manages the distribution within + each control plane + +3. Kubernetes Secrets (namespace level) are created in specified namespaces + + +#### Control plane selection + +To configure which control planes in a group you want to project a SecretStore into, use the `spec.controlPlaneSelector` field. You can either use `labelSelectors` or the `names` of a control plane directly. A control plane matches if any of the label selectors match. + +This example matches all control planes in the group that have `environment: production` as a label: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +You can use the more complex `matchExpressions` to match labels based on an expression. This example matches control planes that have label `environment: production` or `environment: staging`: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + controlPlaneSelector: + labelSelectors: + - matchExpressions: + - { key: environment, operator: In, values: [production,staging] } +``` + +You can also specify the names of control planes directly: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + controlPlaneSelector: + names: + - controlplane-dev + - controlplane-staging + - controlplane-prod +``` + + +#### Namespace selection + +To configure which namespaces **within each matched control plane** to project the secret store into, use `spec.namespaceSelector` field. The projected secret store only appears in the namespaces matching the provided selector. You can either use `labelSelectors` or the `names` of namespaces directly. A control plane matches if any of the label selectors match. + +**For all control planes matched by** `spec.controlPlaneSelector`, This example matches all namespaces in each selected control plane that have `team: team1` as a label: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + namespaceSelector: + labelSelectors: + - matchLabels: + team: team1 +``` + +You can use the more complex `matchExpressions` to match labels based on an expression. This example matches namespaces that have label `team: team1` or `team: team2`: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + namespaceSelector: + labelSelectors: + - matchExpressions: + - { key: team, operator: In, values: [team1,team2] } +``` + +You can also specify the names of namespaces directly: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + namespaceSelector: + names: + - team1-namespace + - team2-namespace +``` + +## Configure secrets directly in a control plane + + +The above explains using group-scoped resources to project secrets into multiple control planes. You can also use ESO API types directly in a control plane as you would in standalone Crossplane or Kubernetes. + + +See the [ESO documentation][eso-documentation] for a full guide on using the API types. + +## Best practices + +When you configure secrets management in your Upbound environment, keep the +following best practices in mind: + +**Use consistent labeling schemes** across your control planes for predictable +and manageable secret distribution. + +**Organize your secrets** in your external provider using a hierarchical +structure that mirrors your control plane organization. + +**Set appropriate refresh intervals** based on your security requires and the +nature of the secrets. + +**Use namespace selection sparingly** to limit secret distribution to only the +namespaces that need them. + +**Use separate tokens for each environment.** Keep them in distinct +SharedSecretStores. Users could bypass SharedExternalSecret selectors by +creating ClusterExternalSecrets directly in control planes. This grants access to all +secrets available to that token. + +**Document your secret management architecture**, including which control planes +should receive which secrets. + +[control-plane-selection]: #control-plane-selection +[namespace-selection]: #namespace-selection +[control-plane-selection-1]: #control-plane-selection +[namespace-selection-2]: #namespace-selection + +[external-secrets-operator-eso]: https://external-secrets.io +[workload-identity-enabled-aks-cluster]: https://learn.microsoft.com/en-us/azure/aks/workload-identity-deploy-cluster +[eso-provider-api]: https://external-secrets.io/latest/provider/google-secrets-manager/ +[gcp-cli]: https://cloud.google.com/iam/docs/creating-managing-service-account-keys +[workload-identity-federation-enabled]: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#enable_on_clusters_and_node_pools +[gcp-cli-1]: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#kubernetes-sa-to-iam +[gcp-iam-policy-binding]: https://cloud.google.com/sdk/gcloud/reference/iam/service-accounts/add-iam-policy-binding +[eso-documentation]: https://external-secrets.io/latest/introduction/getting-started/ diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/_category_.json b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/_category_.json new file mode 100644 index 000000000..5bf23bb0a --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/_category_.json @@ -0,0 +1,11 @@ +{ + "label": "Self-Hosted Spaces", + "position": 2, + "collapsed": true, + "customProps": { + "plan": "business" + } + +} + + diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/administer-features.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/administer-features.md new file mode 100644 index 000000000..ce878014e --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/administer-features.md @@ -0,0 +1,121 @@ +--- +title: Administer features +sidebar_position: 12 +description: Enable and disable features in Spaces +--- + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Feature flags and their availability may vary by version. + +For detailed feature availability across versions, see the . +::: + +This guide shows how to enable or disable features in your self-hosted Space. + +## Shared secrets + +**Status:** Preview + +This feature is enabled by default in Cloud Spaces. + +To enable this feature in a self-hosted Space, set +`features.alpha.sharedSecrets.enabled=true` when installing the Space: + +```bash +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "features.alpha.sharedSecrets.enabled=true" \ +``` + + +## Observability + +**Status:** GA +**Available from:** Spaces v1.13+ + +This feature is enabled by default in Cloud Spaces. + + + +To enable this feature in a self-hosted Space, set +`observability.enabled=true` (`features.alpha.observability.enabled=true` before `v1.14.0`) when installing the Space: + +```bash +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "observability.enabled=true" \ +``` + +The observability feature collects telemetry data from user-facing control +plane workloads like: + +* Crossplane +* Providers +* Functions + +Self-hosted Spaces users can add control plane system workloads such as the +`api-server`, `etcd` by setting the +`observability.collectors.includeSystemTelemetry` Helm flag to true. + +### Sensitive data + +To avoid exposing sensitive data in the `SharedTelemetryConfig` resource, use +Kubernetes secrets to store the sensitive data and reference the secret in the +`SharedTelemetryConfig` resource. + +Create the secret in the same namespace/group as the `SharedTelemetryConfig` +resource. The example below uses `kubectl create secret` to create a new secret: + +```bash +kubectl create secret generic sensitive -n \ + --from-literal=apiKey='YOUR_API_KEY' +``` + +Next, reference the secret in the `SharedTelemetryConfig` resource: + +```yaml +apiVersion: observability.spaces.upbound.io/v1alpha1 +kind: SharedTelemetryConfig +metadata: + name: newrelic +spec: + configPatchSecretRefs: + - name: sensitive + key: apiKey + path: exporters.otlphttp.headers.api-key + controlPlaneSelector: + labelSelectors: + - matchLabels: + org: foo + exporters: + otlphttp: + endpoint: https://otlp.nr-data.net + headers: + api-key: dummy # This value is replaced by the secret value, can be omitted + exportPipeline: + metrics: [otlphttp] + traces: [otlphttp] + logs: [otlphttp] +``` + +The `configPatchSecretRefs` field in the `spec` specifies the secret `name`, +`key`, and `path` values to inject the secret value in the +`SharedTelemetryConfig` resource. + +## Shared backups + +As of Spaces `v.12.0`, this feature is enabled by default. + +To disable in a self-hosted Space, pass the `features.alpha.sharedBackup.enabled=false` as a Helm chart value. +`--set "features.alpha.sharedBackup.enabled=false"` + +## Query API + +**Status:** Preview +The Query API is available in the Cloud Space offering and enabled by default. + +Query API is required for self-hosted deployments with connected Spaces. See the +related [documentation][documentation] +to enable this feature. + +[documentation]: /spaces/howtos/query-api/ diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/attach-detach.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/attach-detach.md new file mode 100644 index 000000000..1465921cf --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/attach-detach.md @@ -0,0 +1,198 @@ +--- +title: Connect or disconnect a Space +sidebar_position: 12 +description: Enable and connect self-hosted Spaces to the Upbound console +--- +:::info API Version Information +This guide applies to **Spaces v1.8.0 and later** (Self-Hosted only). Connection to Upbound console requires Query API and RBAC to be enabled. + +For version-specific features and requirements, see the . Query API setup details, see [Deploy Query API infrastructure](./query-api.md). +::: + +:::important +This feature is in preview. Starting in Spaces `v1.8.0` and later, you must +deploy and [enable the Query API][enable-the-query-api] and [enable Upbound +RBAC][enable-upbound-rbac] to connect a Space to Upbound. +::: + +[Upbound][upbound] allows you to connect self-hosted Spaces and enables a streamlined operations and debugging experience in your Console. + +## Usage + +### Connect + +Before you begin, make sure you have: + +- An existing Upbound [organization][organization] in Upbound SaaS. +- The `up` CLI installed and logged into your organization +- `kubectl` installed with the kubecontext of your self-hosted Space cluster. +- A `token.json` license, provided by your Upbound account representative. +- You enabled the [Query API][query-api] in the self-hosted Space. + +Create a new `UPBOUND_SPACE_NAME`. If you don't create a name, `up` automatically generates one for you: + +```ini +export UPBOUND_SPACE_NAME=your-self-hosted-space +``` + +#### With up CLI + +:::tip +The command tries to connect the Space to the org account context pointed at by your `up` CLI profile. Make sure you've logged into Upbound SaaS with `up login -a ` before trying to connect the Space. +::: + +Connect the Space to the Console: + +```bash +up space connect "${UPBOUND_SPACE_NAME}" +``` + +This command installs a Connect agent, creates a service account, and configures permissions in your Upbound cloud organization in the `upbound-system` namespace of your Space. + +#### With Helm + +Export your Upbound org account name to an environment variable called `UPBOUND_ORG_NAME`. You can see this value by running `up org list` after logging on to Upbound. + +```ini +export UPBOUND_ORG_NAME=your-org-name +``` + +Create a new robot token and export it to an environment variable called `UPBOUND_TOKEN`: + +```bash +up robot create "${UPBOUND_SPACE_NAME}" --description="Robot used for authenticating Space '${UPBOUND_SPACE_NAME}' with Upbound Connect" +export UPBOUND_TOKEN=$(up robot token create "$UPBOUND_SPACE_NAME" "$UPBOUND_SPACE_NAME" --file - | jq -r '.token') +``` + +:::note +Follow the [`jq` installation guide][jq-install] if your machine doesn't include +it by default. +::: + +Create a secret containing the robot token: + +```bash +kubectl create secret -n upbound-system generic connect-token --from-literal=token=${UPBOUND_TOKEN} +``` + +Specify your username and password for the helm OCI registry: + +```bash +jq -r .token $SPACES_TOKEN_PATH | helm registry login xpkg.upbound.io -u $(jq -r .accessId $SPACES_TOKEN_PATH) --password-stdin +``` + +In the same cluster where you installed the Spaces software, install the Upbound connect agent with your token secret. + +```bash +helm -n upbound-system upgrade --install agent \ + oci://xpkg.upbound.io/spaces-artifacts/agent \ + --version "0.0.0-441.g68777b9" \ + --set "image.repository=xpkg.upbound.io/spaces-artifacts/agent" \ + --set "registration.image.repository=xpkg.upbound.io/spaces-artifacts/register-init" \ + --set "imagePullSecrets[0].name=upbound-pull-secret" \ + --set "registration.enabled=true" \ + --set "space=${UPBOUND_SPACE_NAME}" \ + --set "organization=${UPBOUND_ORG_NAME}" \ + --set "tokenSecret=connect-token" \ + --wait +``` + + +#### View your Space in the Console + + +Go to the [Upbound Console][upbound-console], log in, and choose the newly connected Space from the Space selector dropdown. + +![A screenshot of the Upbound Console space selector dropdown](/img/attached-space.png) + +:::note +You can only connect a self-hosted Space to a single organization at a time. +::: + +### Disconnect + +#### With up CLI + +To disconnect a self-hosted Space or a deleted self-hosted Space, run the following command: + +```bash +up space disconnect "${UPBOUND_SPACE_NAME}" +``` + +If the Space still exists, this command uninstalls the Connect agent and deletes the associated service account and permissions. + +#### With Helm + +To disconnect a self-hosted Space or a deleted self-hosted Space, run the following command: + +```bash +helm delete -n upbound-system agent +``` + +Clean up the robot token you created for this self-hosted Space: + +```bash +up robot delete "${UPBOUND_SPACE_NAME}" --force +``` + +## Security model + +### Architecture + +![An architectural diagram of a self-hosted Space attached to Upbound](/img/console-attach-architecture.jpg) + +:::note +This diagram illustrates a self-hosted Space running in AWS connected to the global Upbound Console. The same model applies to a Space running in AKS, GKE, or other Kubernetes environments. +::: + +### Data path + +Upbound uses a Pub/Sub model over TLS to communicate between Upbound's global +console and your self-hosted Space. Self-hosted Spaces establishes a secure +connection with `connect.upbound.io`. `api.upbound.io`, and `auth.upbound.io` and subscribes to an +endpoint. + +:::important +Add `connect.upbound.io`, `api.upbound.io`, and `auth.upbound.io` to your organization's list of +allowed endpoints. +::: + +The +Upbound Console communicates to the Space through that endpoint. The data flow +is: + +1. Users sign in to the Upbound Console, redirecting to authenticate with an organization's configured Identity Provider via SSO. +2. Once authenticated, actions in the Console, like listing control planes or specific resource types from a control plane. These requests post as messages to the Upbound Connect service. +3. A user's self-hosted Space polls the Upbound Connect service periodically for new messages, verifies the authenticity of the message, and fulfills the request contained. +4. A user's self-hosted Space returns the results of the request to the Upbound Connect service and the Console renders the results in the user's browser session. + +**Upbound never stores data originated from a self-hosted Space.** The data is transient and only exposed in the user's browser session. The Console needs this data to render your resources and control planes in the UI. + +### Data transmitted + +Users interact with the Upbound Console to generate request queries to the Upbound Connect Service while exploring, managing, or debugging a self-hosted Space. These requests send data back to the user's browser session in the Console, including: + +* Metadata for the Space +* Metadata for control planes in the state +* Configuration manifests for various resource types within your Space: Crossplane managed resources, composite resources, composite resource claims, Upbound shared secrets, Upbound shared backups, Crossplane providers, ProviderConfigs, Configurations, and Crossplane Composite Functions. + +:::important +This data only concerns resource configuration. The data _inside_ the managed +resource in your Space isn't visible at any point. +::: + +**Upbound can't see your data.** Upbound doesn't have access to session-based data rendered for your users in the Upbound Console. Upbound has no information about your self-hosted Space, other than that you've connected a self-hosted Space. + +### Threat vectors + +Only users with editor or administrative permissions can make changes using the Console like creating or deleting control planes or groups. + + +[enable-the-query-api]: /spaces/howtos/self-hosted/query-api +[enable-upbound-rbac]: /manuals/platform/concepts/authorization/upbound-rbac +[upbound]: /manuals/console/upbound-console +[organization]: /manuals/platform/concepts/identity-management/organizations +[query-api]: /spaces/howtos/self-hosted/query-api +[jq-install]: https://jqlang.org/download/ + +[upbound-console]: https://console.upbound.io diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/billing.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/billing.md new file mode 100644 index 000000000..145ff9f03 --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/billing.md @@ -0,0 +1,307 @@ +--- +title: Self-Hosted Space Billing +sidebar_position: 50 +description: A guide for how billing works in an Upbound Space +--- + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Billing models may evolve between versions; see Capacity Licensing for alternative models. + +For version-specific features and capacity-based licensing details, see the . reference specifications, see [Capacity Licensing](/spaces/howtos/self-hosted/capacity-licensing). +::: + +Spaces are a self-hosting feature of Upbound's [flagship product][flagship-product] for platform teams to deploy control planes in their self-managed environments. You can install Spaces into any Kubernetes cluster in your own cloud account, on-premises data center, or on the edge. The pricing usage-based and requires an Upbound account and subscription. The billing unit is a `Loop`. + + +:::info +This guide describes the traditional usage-based billing model using object storage. disconnected or air-gapped environments, consider [Capacity Licensing](/spaces/howtos/self-hosted/capacity-licensing), which provides a simpler fixed-capacity model with local usage tracking. +::: + +## Billing details + +Spaces **aren't connected** to Upbound's global service. To enable proper billing, the Spaces software ships a controller whose responsibility is to collect billing data from your Spaces deployment. The collection and storage of your billing data happens expressly locally within your environment; no data is automatically emitted back to Upbound's global service. This data gets written to object storage of your choice. AWS, Azure, and GCP are currently supported. The Spaces software exports billing usage software every ~15 seconds. + +Spaces customers must periodically provide the billing data to Upbound. Contact your Upbound sales representative to learn more. + + + +## AWS S3 + + + +Configure billing to write to an S3 bucket by providing the following values at install-time. Create an S3 bucket if you don't already have one. + +### IAM policy + +You must create an IAM policy and attach it to the IAM user (for static credentials) or IAM role (for assumed +roles). + +The policy example below enables the necessary S3 permissions: + +```json +{ + "Sid":"EnableS3Permissions", + "Effect":"Allow", + "Action": [ + "s3:PutObject", + "s3:GetObject", + "s3:ListBucket", + "s3:DeleteObject" + ], + "Resource": [ + "arn:aws:s3:::your-bucket-name/*", + "arn:aws:s3:::your-bucket-name" + ] +}, +{ + "Sid": "ListBuckets", + "Effect": "Allow", + "Action": "s3:ListAllMyBuckets", + "Resource": "*" +} +``` + +### Authentication with static credentials + +In your Spaces install cluster, create a secret in the `upbound-system` +namespace. This secret must contain keys `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`. + +```bash +kubectl create secret generic billing-credentials -n upbound-system \ + --from-literal=AWS_ACCESS_KEY_ID= \ + --from-literal=AWS_SECRET_ACCESS_KEY= +``` + +Install the Space software, providing the billing details to the other required values. + + + + + + +```bash {hl_lines="2-6"} +helm -n upbound-system upgrade --install spaces ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=aws" \ + --set "billing.storage.aws.region=" \ + --set "billing.storage.aws.bucket=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + +```bash {hl_lines="2-6"} +up space init ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=aws" \ + --set "billing.storage.aws.region=" \ + --set "billing.storage.aws.bucket=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + + + +### Authentication with an IAM role + + +To use short-lived credentials with an assumed IAM role, create an IAM role with +established trust to the `vector`-serviceaccount in all `mxp-*-system` +namespaces. + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::12345678912:oidc-provider/oidc.eks.eu-west-2.amazonaws.com/id/YOUROIDCPROVIDERID" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringLike": { + "oidc.eks.eu-west-2.amazonaws.com/id/YOUROIDCPROVIDERID:sub": "system:serviceaccount:mxp-*-system:vector" + } + } + } + ] +} +``` + +For more information about workload identities, review the [Workload-identity +Configuration documentation][workload-identity-configuration-documentation] + + + + + + +```bash {hl_lines="2-7"} +helm -n upbound-system upgrade --install spaces ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=aws" \ + --set "billing.storage.aws.region=" \ + --set "billing.storage.aws.bucket=" \ + --set "billing.storage.secretRef.name=" \ + --set "controlPlanes.vector.serviceAccount.customAnnotations[eks.amazonaws.com/role-arn]=" + ... +``` + + + + + +```bash {hl_lines="2-7"} +up space init ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=aws" \ + --set "billing.storage.aws.region=" \ + --set "billing.storage.aws.bucket=" \ + --set "billing.storage.secretRef.name=" \ + --set "controlPlanes.vector.serviceAccount.customAnnotations[eks.amazonaws.com/role-arn]=" + ... +``` + + + + + + +*Note*: You must set `billing.storage.secretRef.name` to an empty string when using an assumed role. + + +## Azure blob storage + +Configure billing to write to a blob in Azure by providing the following values at install-time. Create a storage account and container if you don't already have one. + +Then, on the cluster where you installed the Spaces software, create a secret in `upbound-system`. This secret must contain keys `AZURE_TENANT_ID`, `AZURE_CLIENT_ID`, and `AZURE_CLIENT_SECRET`. Make sure to replace the values with details generated from your Azure account. + +```bash +kubectl create secret generic billing-credentials -n upbound-system \ + --from-literal=AZURE_TENANT_ID= \ + --from-literal=AZURE_CLIENT_ID= \ + --from-literal=AZURE_CLIENT_SECRET= +``` + +Install the Space software, providing the billing details to the other required values. + + + + + + +```bash {hl_lines="2-6"} +helm -n upbound-system upgrade --install spaces ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=azure" \ + --set "billing.storage.azure.storageAccount=" \ + --set "billing.storage.azure.container=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + +```bash {hl_lines="2-6"} +up space init ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=azure" \ + --set "billing.storage.azure.storageAccount=" \ + --set "billing.storage.azure.container=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + + + +## GCP Cloud Storage Buckets + + +Configure billing to write to a Cloud Storage bucket in GCP by providing the following values at install-time. Create a bucket if you don't already have one. + +Then, on the cluster where you installed the Spaces software, create a secret in `upbound-system`. This secret must contain the key `google_application_credentials`. Make sure to replace the value with a GCP service account key JSON generated from your GCP account. + +```bash +kubectl create secret generic billing-credentials -n upbound-system \ + --from-literal=google_application_credentials= +``` + +Install the Space software, providing the billing details to the other required values. + + + + + + +```bash {hl_lines="2-5"} +helm -n upbound-system upgrade --install spaces ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=gcp" \ + --set "billing.storage.gcp.bucket=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + +```bash {hl_lines="2-5"} +up space init ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=gcp" \ + --set "billing.storage.gcp.bucket=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + +## Export billing data to send to Upbound + +To prepare the billing data to send to Upbound, do the following: + +Ensure the current context of your kubeconfig points at the Spaces cluster. Then, run the [export][export] command. + + +:::important +Your current CLI must have read access to the bucket to run this command. +::: + + +The example below exports billing data stored in AWS: + +```bash +up space billing export --provider=aws \ + --bucket=spaces-billing-bucket \ + --account=your-upbound-org \ + --billing-month=2024-07 \ + --force-incomplete +``` + +The command creates a billing report that's zipped up in your current working directory. Send the output to your Upbound sales representative. + + +You can find full instructions and command options in the up [CLI reference][cli-reference] docs. + + +[export]: /reference/cli-reference +[cli-reference]: /reference/cli-reference +[flagship-product]: https://www.upbound.io/platform +[workload-identity-configuration-documentation]: https://docs.upbound.io/operate/accounts/authentication/oidc-configuration diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/capacity-licensing.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/capacity-licensing.md new file mode 100644 index 000000000..a1dc6c101 --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/capacity-licensing.md @@ -0,0 +1,591 @@ +--- +title: Capacity Licensing +sidebar_position: 60 +description: A guide for capacity-based licensing in self-hosted Spaces +plan: "enterprise" +--- + + + + + +This guide explains how to configure and monitor capacity-based licensing in +self-hosted Upbound Spaces. Capacity licensing provides a simplified billing +model for disconnected or air-gapped environments where automated usage +reporting isn't possible. + +:::info +Spaces `v1.15` and later support Capacity Licensing as an +alternative to the traditional usage-based billing model described in the +[Self-Hosted Space Billing][space-billing] guide. +::: + +## Overview + +Capacity licensing allows organizations to purchase a fixed capacity of +resources upfront. The Spaces software tracks usage locally and provides +visibility into consumption against your purchased capacity, all without +requiring external connectivity to Upbound's services. + +### Key concepts + +- **Resource Hours**: The primary billing unit representing all resources + managed by Crossplane over time. This includes managed resources, + composites (XRs), claims (XRCs), and all composed resources - essentially + everything Crossplane manages. The system aggregates resource counts over each + hour using trapezoidal integration to accurately account for changes in + resource count throughout the hour. +- **Operations**: The number of Operations invoked by Crossplane. +- **License Capacity**: The total amount of resource hours and operations included in your license. +- **Usage Tracking**: Continuous monitoring of consumption with real-time utilization percentages. + +### How it works + +1. Upbound provides you with a license file containing your purchased capacity +2. You configure a `SpaceLicense` in your Spaces cluster +3. The metering system automatically: + - Collects measurements from all control planes every minute + - Aggregates usage data into hourly intervals + - Stores usage data in a local PostgreSQL database + - Updates the `SpaceLicense` status with current consumption + +## Prerequisites + +### PostgreSQL database + +Capacity licensing requires a PostgreSQL database to store usage measurements. You can use: + +- An existing PostgreSQL instance +- A managed PostgreSQL service (AWS RDS, Azure Database, Google Cloud SQL) +- A PostgreSQL instance deployed in your cluster + +The database must be: + +- Accessible from the Spaces cluster +- Configured with a dedicated database and credentials + +#### Example: Deploy PostgreSQL with CloudNativePG + +If you don't have an existing PostgreSQL instance, you can deploy one in your +cluster using [CloudNativePG] (CNPG). CNPG is a Kubernetes operator that +manages PostgreSQL clusters. + +1. Install the CloudNativePG operator: + +```bash +kubectl apply -f https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.24/releases/cnpg-1.24.1.yaml +``` + +2. Create a PostgreSQL cluster for metering: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: metering-postgres + namespace: upbound-system +spec: + instances: 1 + imageName: ghcr.io/cloudnative-pg/postgresql:16 + bootstrap: + initdb: + database: metering + owner: metering + postInitApplicationSQL: + - ALTER ROLE "metering" CREATEROLE; + storage: + size: 5Gi + # Optional: Configure resources for production use + # resources: + # requests: + # memory: "512Mi" + # cpu: "500m" + # limits: + # memory: "1Gi" + # cpu: "1000m" +--- +apiVersion: v1 +kind: Secret +metadata: + name: metering-postgres-app + namespace: upbound-system + labels: + cnpg.io/reload: "true" +stringData: + username: metering + password: "your-secure-password-here" +type: kubernetes.io/basic-auth +``` + +```bash +kubectl apply -f metering-postgres.yaml +``` + +3. Wait for the cluster to be ready: + +```bash +kubectl wait --for=condition=ready cluster/metering-postgres -n upbound-system --timeout=5m +``` + +4. You can access the PostgreSQL cluster at `metering-postgres-rw.upbound-system.svc.cluster.local:5432`. + +:::tip +For production deployments, consider: +- Increasing `instances` to 3 for high availability +- Configuring [backups] to object storage +- Setting appropriate resource requests and limits +- Using a dedicated storage class with good I/O performance +::: + +### License file + +Contact your Upbound sales representative to obtain a license file for your organization. The license file contains: +- Your unique license ID +- Purchased capacity (resource hours and operations) +- License validity period +- Any usage restrictions (such as cluster UUID pinning) + +## Configuration + +### Step 1: Create database credentials secret + +Create a Kubernetes secret containing your PostgreSQL password using the pgpass format: + +```bash +# Create a pgpass file with format: hostname:port:database:username:password +# Note: The database name and username must be 'metering' +# For CNPG clusters, use the read-write service endpoint: -rw..svc.cluster.local +echo "metering-postgres-rw.upbound-system.svc.cluster.local:5432:metering:metering:your-secure-password-here" > pgpass + +# Create the secret +kubectl create secret generic metering-postgres-credentials \ + -n upbound-system \ + --from-file=pgpass=pgpass + +# Clean up the pgpass file +rm pgpass +``` + +The secret must contain a single key: +- **`pgpass`**: PostgreSQL password file in the format `hostname:port:metering:metering:password` + +:::note +The database name and username are fixed as `metering`. Ensure your PostgreSQL instance has a database named `metering` with a user `metering` that has appropriate permissions. + +If you deployed PostgreSQL using CNPG as shown in the example above, the password should match what you set in the `metering-postgres-app` secret. +::: + +:::tip +For production environments, consider using external secret management solutions: +- [External Secrets Operator][eso] +- Cloud-specific secret managers (AWS Secrets Manager, Azure Key Vault, GCP Secret Manager) +::: + +### Step 2: Enable metering in Spaces + +Enable the metering feature when installing or upgrading Spaces: + + + + + +```bash {hl_lines="2-7"} +helm -n upbound-system upgrade --install spaces ... \ + --set "metering.enabled=true" \ + --set "metering.storage.postgres.connection.url=metering-postgres-rw.upbound-system.svc.cluster.local:5432" \ + --set "metering.storage.postgres.connection.credentials.secret.name=metering-postgres-credentials" \ + --set "metering.interval=1m" \ + --set "metering.workerCount=10" \ + --set "metering.aggregationInterval=1h" \ + --set "metering.measurementRetentionDays=30" + ... +``` + + + + + +```bash {hl_lines="2-7"} +up space init ... \ + --set "metering.enabled=true" \ + --set "metering.storage.postgres.connection.url=metering-postgres-rw.upbound-system.svc.cluster.local:5432" \ + --set "metering.storage.postgres.connection.credentials.secret.name=metering-postgres-credentials" \ + --set "metering.interval=1m" \ + --set "metering.workerCount=10" \ + --set "metering.aggregationInterval=1h" \ + --set "metering.measurementRetentionDays=30" + ... +``` + + + + + +#### Configuration options + +| Option | Default | Description | +|--------|---------|-------------| +| `metering.enabled` | `false` | Enable the metering feature | +| `metering.storage.postgres.connection.url` | - | PostgreSQL host and port (format: `host:port`, required) | +| `metering.storage.postgres.connection.credentials.secret.name` | - | Name of the secret containing PostgreSQL credentials (required) | +| `metering.storage.postgres.connection.sslmode` | `require` | SSL mode for PostgreSQL connection (`disable`, `allow`, `prefer`, `require`, `verify-ca`, `verify-full`) | +| `metering.storage.postgres.connection.ca.name` | - | Name of the secret containing CA certificate for TLS connections (optional) | +| `metering.interval` | `1m` | How often to collect measurements from control planes | +| `metering.workerCount` | `10` | Number of parallel workers for measurement collection | +| `metering.aggregationInterval` | `1h` | How often to aggregate measurements into hourly usage data | +| `metering.measurementRetentionDays` | `30` | Days to retain raw measurements (0 = indefinite) | + + +#### Database sizing and retention + +The metering system uses two PostgreSQL tables to track usage: + +**Raw measurements table** (`measurements`): +- Stores point-in-time snapshots collected every measurement interval (default: 1 minute) +- One row per control plane per interval +- Affected by the `measurementRetentionDays` setting +- Used for detailed auditing and troubleshooting + +**Aggregated usage table** (`hourly_usage`): +- Stores hourly aggregated resource hours and operations per license +- One row per hour per license +- Never deleted (required for accurate license tracking) +- Grows much slower than raw measurements + +##### Storage sizing guidelines + +Estimate your PostgreSQL storage needs based on these factors: + + +| Deployment Size | Control Planes | Measurement Interval | Retention Days | Raw Measurements | Indexes & Overhead | Total Storage | +|----------------|----------------|---------------------|----------------|------------------|-------------------|---------------| +| Small | 10 | 1m | 30 | ~85 MB | ~40 MB | **~125 MB** | +| Medium | 50 | 1m | 30 | ~430 MB | ~215 MB | **~645 MB** | +| Large | 200 | 1m | 30 | ~1.7 GB | ~850 MB | **~2.5 GB** | +| Large (90-day retention) | 200 | 1m | 90 | ~5.2 GB | ~2.6 GB | **~7.8 GB** | + +The aggregated hourly usage table adds minimal overhead (~50 KB per year per license). + +**Formula for custom calculations**: +``` +Daily measurements per control plane = (24 * 60) / interval_minutes +Total rows = control_planes × daily_measurements × retention_days +Storage (MB) ≈ (total_rows × 200 bytes) / 1,048,576 × 1.5 (with indexes) +``` + +##### Retention behavior + +The `measurementRetentionDays` setting controls retention of raw measurement data: + +- **Default: 30 days** - Balances audit capabilities with storage efficiency +- **Set to 0**: Disables cleanup, retains all raw measurements indefinitely +- **Cleanup runs**: Every aggregation interval (default: hourly) +- **What's kept forever**: Aggregated hourly usage data (needed for license tracking) +- **What's cleaned up**: Raw point-in-time measurements older than retention period + +**Recommendations**: +- **30 days**: For most troubleshooting and short-term auditing +- **60 to 90 days**: For environments requiring extended audit trails +- **Unlimited (0)**: Only for environments with ample storage or specific compliance requirements + +:::note +Increasing retention period linearly increases storage requirements for raw measurements. The aggregated hourly data is always retained regardless of this setting. +::: + +### Step 3: Apply your license + +Use the `up` CLI to apply your license file: + +```bash +up space license apply /path/to/license.json +``` + +This command automatically: +- Creates a secret containing your license file in the `upbound-system` namespace +- Creates the `SpaceLicense` resource configured to use that secret + +:::tip +You can specify a different namespace for the license secret using the `--namespace` flag: +```bash +up space license apply /path/to/license.json --namespace my-namespace +``` +::: + +
+Alternative: Manual kubectl approach + +If you prefer not to use the `up` CLI, you can manually create the resources: + +1. Create the license secret: + +```bash +kubectl create secret generic space-license \ + -n upbound-system \ + --from-file=license.json=/path/to/license.json +``` + +2. Create the SpaceLicense resource: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceLicense +metadata: + name: space +spec: + secretRef: + name: space-license + namespace: upbound-system + key: license.json +``` + +```bash +kubectl apply -f spacelicense.yaml +``` + +:::important +You **must** name the `SpaceLicense` resource `space`. This resource is a singleton and only one can exist in the cluster. +::: + +
+ +## Monitoring usage + +### Check license status + +Use the `up` CLI to view your license details and current usage: + +```bash +up space license show +``` + +Example output: + +``` +Spaces License Status: Valid (License is valid) + +Created: 2024-01-01T00:00:00Z +Expires: 2025-01-01T00:00:00Z + +Plan: enterprise + +Resource Hour Limit: 1000000 +Operation Limit: 500000 + +Enabled Features: +- spaces +- query-api +- backup-restore +``` + +The output shows: +- License validity status and any validation messages +- Creation and expiration dates +- Your commercial plan tier +- Capacity limits for resource hours and operations +- Enabled features in your license +- Any restrictions (such as cluster UUID pinning) + +
+Alternative: View detailed status with kubectl + +For detailed information including usage statistics, use kubectl: + +```bash +kubectl get spacelicense space -o yaml +``` + +Example output showing usage data: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceLicense +metadata: + name: space +spec: + secretRef: + name: space-license + namespace: upbound-system +status: + conditions: + - type: LicenseValid + status: "True" + reason: Valid + message: "License is valid" + id: "lic_abc123xyz" + plan: "enterprise" + capacity: + resourceHours: 1000000 + operations: 500000 + usage: + resourceHours: 245680 + operations: 12543 + resourceHoursUtilization: "24.57%" + operationsUtilization: "2.51%" + firstMeasurement: "2024-01-15T10:00:00Z" + lastMeasurement: "2024-02-10T14:30:00Z" + createdAt: "2024-01-01T00:00:00Z" + expiresAt: "2025-01-01T00:00:00Z" + enabledFeatures: + - "spaces" + - "query-api" + - "backup-restore" +``` + +
+ +### Understanding the status fields + +| Field | Description | +|-------|-------------| +| `status.id` | Unique license identifier | +| `status.plan` | Your commercial plan (community, standard, enterprise) | +| `status.capacity` | Total capacity included in your license | +| `status.usage.resourceHours` | Total resource hours consumed | +| `status.usage.operations` | Total operations performed | +| `status.usage.resourceHoursUtilization` | Percentage of resource hours capacity used | +| `status.usage.operationsUtilization` | Percentage of operations capacity used | +| `status.usage.firstMeasurement` | When usage tracking began | +| `status.usage.lastMeasurement` | Most recent usage update | +| `status.expiresAt` | License expiration date | + +### Monitor with kubectl + +Watch your license utilization in real-time: + +```bash +kubectl get spacelicense space -w +``` + +Short output format: + +``` +NAME PLAN VALID REASON AGE +space enterprise True Valid 45d +``` + +## Managing licenses + +### Updating your license + +To update your license with a new license file (for example, when renewing or upgrading capacity), apply the new license: + +```bash +up space license apply /path/to/new-license.json +``` + +This command replaces the existing license secret and updates the SpaceLicense resource. + +### Removing a license + +To remove a license: + +```bash +up space license remove +``` + +This command: +- Prompts for confirmation before proceeding +- Removes the license secret + +To skip the confirmation prompt, use the `--force` flag: + +```bash +up space license remove --force +``` + +## Troubleshooting + +### License not updating + +If the license status doesn't update with usage data: + +1. **Check metering controller logs**: + ```bash + kubectl logs -n upbound-system deployment/spaces-controller -c metering + ``` + +2**Check if the system captures your measurements**: + + ```bash + # Connect to PostgreSQL and query the measurements table + kubectl exec -it -- psql -U -d \ + -c "SELECT COUNT(*) FROM measurements WHERE timestamp > NOW() - INTERVAL '1 hour';" + ``` + +### High utilization warnings + +If you're approaching your capacity limits: + +1. **Review resource usage** by control plane to identify high consumers +2. **Contact your Upbound sales representative** to discuss capacity expansion +3. **Optimize managed resources** by cleaning up unused resources + +### License validation failures + +If your license shows as invalid: + +1. **Check expiration date**: `kubectl get spacelicense space -o jsonpath='{.status.expiresAt}'` +2. **Verify license file integrity**: Ensure the secret contains valid JSON +3. **Check for cluster UUID restrictions**: Upbound pins some licenses to + specific clusters +4. **Review controller logs** for detailed error messages + +## Differences from traditional billing + +### Capacity licensing + +- ✅ Works in disconnected environments +- ✅ Provides real-time usage visibility +- ✅ No manual data export required +- ✅ Requires PostgreSQL database +- ✅ Fixed capacity model + +### Traditional billing (object storage) + + +- ❌ Requires periodic manual export +- ❌ Delayed visibility into usage +- ✅ Works with S3/Azure Blob/GCS +- ❌ Requires cloud storage access +- ✅ Pay-as-you-go model + +## Best practices + +### Database management + +1. **Regular backups**: Back up your metering database regularly to preserve usage history +2. **Monitor database size**: Set appropriate retention periods to manage storage growth +3. **Use managed databases**: Consider managed PostgreSQL services for production +4. **Connection pooling**: Use connection pooling for better performance at scale + +### License management + +1. **Monitor utilization**: Set up alerts before reaching 80% capacity +2. **Plan renewals early**: Start renewal discussions 60 days before expiration +3. **Track grace periods**: Note the `gracePeriodEndsAt` date for planning +4. **Secure license files**: Treat license files as sensitive credentials + +### Operational monitoring + +1. **Set up dashboards**: Create Grafana dashboards for usage trends +2. **Enable alerting**: Configure alerts for high utilization and expiration +3. **Regular audits**: Periodically review usage patterns across control planes +4. **Capacity planning**: Use historical data to predict future capacity needs + +## Next steps + +- Learn about [Observability] to monitor your Spaces deployment +- Explore [Backup and Restore][backup-restore] to protect your control plane data +- Review [Self-Hosted Space Billing][space-billing] for the traditional billing model +- Contact [Upbound Sales][sales] to discuss capacity licensing options + + +[space-billing]: /spaces/howtos/self-hosted/billing +[CloudNativePG]: https://cloudnative-pg.io/ +[backups]: https://cloudnative-pg.io/documentation/current/backup_recovery/ +[backup-restore]: /spaces/howtos/backup-and-restore +[sales]: https://www.upbound.io/contact +[eso]: https://external-secrets.io/ +[Observability]: /spaces/howtos/observability + + diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/certs.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/certs.md new file mode 100644 index 000000000..e517c250e --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/certs.md @@ -0,0 +1,274 @@ +--- +title: Istio Ingress Gateway With Custom Certificates +sidebar_position: 20 +description: Install self hosted spaces using istio ingress gateway in a Kind cluster +--- + +:::important +Prerequisites + +- Spaces Token available in a file +- `docker login xpkg.upbound.io -u -p ` +- [`istioctl`][istioctl] installation +- `jq` installation +::: + +This document describes the installation of a self hosted space on an example `kind` +cluster along with Istio Ingress Gateway and certificates. The service mesh and certificates +installation is transferable to self hosted spaces in arbitrary clouds. + +## Create a kind cluster + +```shell +cat < +## Install Istio + + + +:::important +This is an example and not recommended for use in production. +::: + + +1. Create the `istio-values.yaml` file + +```shell +cat > istio-values.yaml << 'EOF' +apiVersion: install.istio.io/v1alpha1 +kind: IstioOperator +spec: + hub: gcr.io/istio-release + components: + ingressGateways: + - enabled: true + name: istio-ingressgateway + k8s: + nodeSelector: + ingress-ready: "true" + overlays: + - apiVersion: apps/v1 + kind: Deployment + name: istio-ingressgateway + patches: + - path: spec.template.spec.containers.[name:istio-proxy].ports + value: + - containerPort: 8080 + hostPort: 80 + - containerPort: 8443 + hostPort: 443 +EOF +``` + +2. Install istio via `istioctl` + +```shell +istioctl install -f istio-values.yaml +``` + +## Create a self-signed Certificate via cert-manager + +:::important +This Certificate manifest creates a self-signed certificate for a proof of concept +environment and isn't recommended for production use cases. +::: + +1. Create the upbound-system namespace + +```shell +kubectl create namespace upbound-system +``` + +2. Create a self-signed certificate + +```shell +cat < +## Create an Istio Gateway and VirtualService + + + + +Configure an Istio Gateway and VirtualService to use TLS passthrough. + + +```shell +cat < spaces-values.yaml << 'EOF' +# Configure spaces-router to use the TLS secret created by cert-manager. +externalTLS: + tlsSecret: + name: example-tls-secret + caBundleSecret: + name: example-tls-secret + key: ca.crt +ingress: + provision: false + # Allow Istio Ingress Gateway to communicate to the spaces-router + namespaceLabels: + kubernetes.io/metadata.name: istio-system + podLabels: + app: istio-ingressgateway + istio: ingressgateway +EOF +``` + +2. Set the required environment variables + +```shell +# Update these according to your account/token file +export SPACES_TOKEN_PATH= +export UPBOUND_ACCOUNT= +# Replace SPACES_ROUTER_HOST with your Spaces ingress hostname +export SPACES_ROUTER_HOST="proxy.upbound-127.0.0.1.nip.io" +export SPACES_VERSION="1.14.1" +``` + +3. Create an image pull secret for Spaces + +```shell +kubectl -n upbound-system create secret docker-registry upbound-pull-secret \ + --docker-server=https://xpkg.upbound.io \ + --docker-username="$(jq -r .accessId $SPACES_TOKEN_PATH)" \ + --docker-password="$(jq -r .token $SPACES_TOKEN_PATH)" +``` + +4. Install the Spaces helm chart + +```shell +# Login to xpkg.upbound.io +jq -r .token $SPACES_TOKEN_PATH | helm registry login xpkg.upbound.io -u $(jq -r .accessId $SPACES_TOKEN_PATH) --password-stdin + +# Install spaces helm chart +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "authentication.hubIdentities=true" \ + --set "authorization.hubRBAC=true" \ + --wait -f spaces-values.yaml +``` + +## Validate the installation + +Successful access of the `up` command to interact with your self hosted space validates the +certificate installation. + +- `up ctx .` + +You can also issue control plane creation, list and deletion commands. + +- `up ctp create cert-test` +- `up ctp list` +- `up ctx disconnected/kind-kind/default/cert-test && kubectl get namespace` +- `up ctp delete cert-test` + +:::note +If `up` can't connect to your control plane, follow [this guide to create a new profile][up-profile]. +::: + +## Troubleshooting + +Examine your certificate with `openssl`: + +```shell +openssl s_client -connect proxy.upbound-127.0.0.1.nip.io:443 -showcerts +``` + +[istioctl]: https://istio.io/latest/docs/ops/diagnostic-tools/istioctl/ +[up-profile]: /manuals/cli/howtos/profile-config/ diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/configure-ha.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/configure-ha.md new file mode 100644 index 000000000..ddf36c55e --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/configure-ha.md @@ -0,0 +1,450 @@ +--- +title: Production Scaling and High Availability +description: Configure your Self-Hosted Space for production +sidebar_position: 5 +--- + + + +This guide explains how to configure an existing Upbound Space deployment for +production operation at scale. + +Use this guide when you're ready to deploy production scaling, high availability, +and monitoring in your Space. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. + +For API specifications on ControlPlane resources and configurations, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +## Prerequisites + +Before you begin scaling your Spaces deployment, make sure you have: + + +* A working Space deployment +* Cluster administrator access +* An understanding of load patterns and growth in your organization +* A familiarity with node affinity, tainting, and Horizontal Pod Autoscaling + (HPA) + + +## Production scaling strategy + + +In this guide, you will: + + + +* Create dedicated node pools for different component types +* Configure high-availability to ensure there are no single points of failure +* Set dynamic scaling for variable workloads +* Optimize your storage and component operations +* Monitor your deployment health and performance + +## Spaces architecture + +The basic Spaces workflow follows the pattern below: + + +![Spaces workflow][spaces-workflow] + +## Node architecture + +You can mitigate resource contention and improve reliability by separating system +components into dedicated node pools. + +### `etcd` dedicated nodes + +`etcd` performance directly impacts your entire Space, so isolate it for +consistent performance. + +1. Create a dedicated `etcd` node pool + + **Requirements:** + - **Minimum**: 3 nodes for HA + - **Instance type**: General purpose with high network throughput/low latency + - **Storage**: High performance storage (`etcd` is I/O sensitive) + +2. Taint `etcd` nodes to reserve them + + ```bash + kubectl taint nodes target=etcd:NoSchedule + ``` + +3. Configure `etcd` storage + + `etcd` is sensitive to storage I/O performance. Review the [`etcd` scaling + documentation][scaling] + for specific storage guidance. + +### API server dedicated nodes + +API servers handle all control plane requests and should run on dedicated +infrastructure. + +1. Create dedicated API server nodes + + **Requirements:** + - **Minimum**: 2 nodes for HA + - **Instance type**: Compute-optimized, memory-optimized, or general-purpose + - **Scaling**: Scale vertically based on API server load patterns + +2. Taint API server nodes + + ```bash + kubectl taint nodes target=apiserver:NoSchedule + ``` + +### Configure cluster autoscaling + +Enable cluster autoscaling for all node pools. + +For AWS EKS clusters, Upbound recommends using [`Karpenter`][karpenter] for +improved bin-packing and instance type selection. + +For GCP GKE clusters, follow the [GKE autoscaling][gke-autoscaling] guide. + +For Azure AKS clusters, follow the [AKS autoscaling][aks-autoscaling] guide. + + +## Configure high availability + +Ensure control plane components can survive node and zone failures. + +### Enable high availability mode + +1. Configure control planes for high availability + + ```yaml + controlPlanes: + ha: + enabled: true + ``` + + This configures control plane pods to run with multiple replicas and + associated pod disruption budgets. + +### Configure component distribution + +1. Set up API server pod distribution + + ```yaml + controlPlanes: + vcluster: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: target + operator: In + values: + - apiserver + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vcluster + topologyKey: "kubernetes.io/hostname" + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vcluster + topologyKey: topology.kubernetes.io/zone + weight: 100 + ``` + +2. Configure `etcd` pod distribution + + ```yaml + controlPlanes: + etcd: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: target + operator: In + values: + - etcd + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vcluster-etcd + topologyKey: "kubernetes.io/hostname" + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vcluster-etcd + topologyKey: topology.kubernetes.io/zone + weight: 100 + ``` + +### Configure tolerations + +Allow control plane pods to schedule on the tainted dedicated nodes (available +in Spaces v1.14+). + +1. Add tolerations for `etcd` pods + + ```yaml + controlPlanes: + etcd: + tolerations: + - key: "target" + operator: "Equal" + value: "etcd" + effect: "NoSchedule" + ``` + +2. Add tolerations for API server pods + + ```yaml + controlPlanes: + vcluster: + tolerations: + - key: "target" + operator: "Equal" + value: "apiserver" + effect: "NoSchedule" + ``` + + +## Configure autoscaling for Spaces components + + +Set up the Spaces system components to handle variable load automatically. + +### Scale API and `apollo` services + +1. Configure minimum replicas for availability + + ```yaml + api: + replicaCount: 2 + + features: + alpha: + apollo: + enabled: true + replicaCount: 2 + ``` + + Both services support horizontal and vertical scaling based on load patterns. + +### Configure router autoscaling + +The `spaces-router` is the entry point for all traffic and needs intelligent +scaling. + + +1. Enable Horizontal Pod Autoscaler + + ```yaml + router: + hpa: + enabled: true + minReplicas: 2 + maxReplicas: 8 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 80 + ``` + +2. Monitor scaling factors + + **Router scaling behavior:** + - **Vertical scaling**: Scales based on number of control planes + - **Horizontal scaling**: Scales based on request volume + - **Resource monitoring**: Monitor CPU and memory usage + + + +### Configure controller scaling + +The `spaces-controller` manages Space-level resources and requires vertical +scaling. + +1. Configure adequate resources with headroom + + ```yaml + controller: + resources: + requests: + cpu: "500m" + memory: "1Gi" + limits: + cpu: "2000m" + memory: "4Gi" + ``` + + **Important**: The controller can spike when reconciling large numbers of + control planes, so provide adequate headroom for resource spikes. + +## Set up production storage + + +### Configure Query API database + + +1. Use a managed PostgreSQL database + + **Recommended services:** + - [AWS RDS][rds] + - [Google Cloud SQL][gke-sql] + - [Azure Database for PostgreSQL][aks-sql] + + **Requirements:** + - Minimum 400 IOPS performance + + +## Monitoring + + + +Monitor key metrics to ensure healthy scaling and identify issues quickly. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +### Control plane health + +Track these `spaces-controller` metrics: + +1. **Total control planes** + + ``` + spaces_control_plane_exists + ``` + + Tracks the total number of control planes in the system. + +2. **Degraded control planes** + + ``` + spaces_control_plane_degraded + ``` + + Returns control planes that don't have a `Synced`, `Ready`, and + `Healthy` state. + +3. **Stuck control planes** + + ``` + spaces_control_plane_stuck + ``` + + Control planes stuck in a provisioning state. + +4. **Deletion issues** + + ``` + spaces_control_plane_deletion_stuck + ``` + + Control planes stuck during deletion. + +### Alerting + +Configure alerts for critical scaling and health metrics: + +- **High error rates**: Alert when 4xx/5xx response rates exceed thresholds +- **Control plane health**: Alert when degraded or stuck control planes exceed acceptable counts + +## Architecture overview + +**Spaces System Components:** + +- **`spaces-router`**: Entry point for all endpoints, dynamically builds routes to control plane API servers +- **`spaces-controller`**: Reconciles Space-level resources, serves webhooks, works with `mxp-controller` for provisioning +- **`spaces-api`**: API for managing groups, control planes, shared secrets, and telemetry objects (accessed only through spaces-router) +- **`spaces-apollo`**: Hosts the Query API, connects to PostgreSQL database populated by `apollo-syncer` pods + + +**Control Plane Components (per control plane):** +- **`mxp-controller`**: Handles provisioning tasks, serves webhooks, installs UXP and `XGQL` +- **`XGQL`**: GraphQL API powering console views +- **`kube-state-metrics`**: Collects usage metrics for billing (updated by `mxp-controller` when CRDs change) +- **`vector`**: Works with `kube-state-metrics` to send usage data to external storage for billing +- **`apollo syncer`**: Syncs `etcd` data into PostgreSQL for the Query API + + +### `up ctx` workflow + + + up ctx workflow diagram + + +### Access a control plane API server via kubectl + + + kubectl workflow diagram + + +### Query API/Apollo + + + query API workflow diagram + + +## See also + +* [Upbound Spaces deployment requirements][deployment] +* [Upbound `etcd` scaling resources][scaling] + +[up-ctx-workflow]: /img/up-ctx-workflow.png +[kubectl]: /img/kubectl-workflow.png +[query-api]: /img/query-api-workflow.png +[spaces-workflow]: /img/up-basic-flow.png +[rds]: https://aws.amazon.com/rds/postgresql/ +[gke-sql]: https://cloud.google.com/kubernetes-engine/docs/tutorials/stateful-workloads/postgresql +[aks-sql]: https://learn.microsoft.com/en-us/azure/aks/deploy-postgresql-ha?tabs=azuredisk +[deployment]: https://docs.upbound.io/spaces/howtos/self-hosted/deployment-reqs/ +[karpenter]: https://docs.aws.amazon.com/eks/latest/best-practices/karpenter.html +[gke-autoscaling]: https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-autoscaler +[aks-autoscaling]: https://learn.microsoft.com/en-us/azure/aks/cluster-autoscaler-overview +[scaling]: https://docs.upbound.io/deploy/self-hosted-spaces/scaling-resources#scaling-etcd-storage diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/controllers.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/controllers.md new file mode 100644 index 000000000..692740638 --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/controllers.md @@ -0,0 +1,389 @@ +--- +title: Controllers +weight: 250 +description: A guide to how to wrap and deploy an Upbound controller into control planes on Upbound. +--- + +:::important +This feature is in private preview for select customers in Upbound Spaces. If you're interested in this feature, please [contact us](https://www.upbound.io/contact-us). +::: + +Upbound's _Controllers_ feature lets you build and deploy control plane software from the Kubernetes ecosystem. With the _Controllers_ feature, you're not limited to just managing resource types defined by Crossplane. Now you can create resources from _CustomResourceDefinitions_ defined by other Kubernetes ecosystem tooling. + +This guide explains how to bundle and deploy control plane software from the Kubernetes ecosystem on a control plane in Upbound. + +## Benefits + +The Controllers feature provides the following benefits: + +* Deploy control plane software from the Kubernetes ecosystem. +* Use your control plane's package manager to handle the lifecycle of the control plane software and define dependencies between package. +* Build powerful compositions that combine both Crossplane and Kubernetes _CustomResources_. + +## How it works + +A _Controller_ is a package type that bundles control plane software from the Kubernetes ecosystem. Examples of such software includes: + +- Kubernetes policy engines +- CI/CD tooling +- Your own private custom controllers defined by your organization + +You build a _Controller_ package by wrapping a helm chart along with its requisite _CustomResourceDefinitions_. Your _Controller_ package gets pushed to an OCI registry, and from there you can apply it to a control plane like you would any other Crossplane package. Your control plane's package manager is responsible for managing the lifecycle of the software once applied. + +## Prerequisites + +Enable the Controllers feature in the Space you plan to run your control plane in: + +- Cloud Spaces: Not available yet +- Connected Spaces: Space administrator must enable this feature +- Disconnected Spaces: Space administrator must enable this feature + +Packaging a _Controller_ requires [up CLI][cli] `v0.39.0` or later. + + + +## Build a _Controller_ package + + + +_Controllers_ are a package type that get administered by your control plane's package manager. + +### Prepare the package + +To define a _Controller_, you need a Helm chart. This guide assumes the control plane software you want to build into a _Controller_ already has a Helm chart available. + +Start by making a working directory to assemble the necessary parts: + +```ini +mkdir controller-package +cd controller-package +``` + +Inside the working directory, pull the Helm chart: + +```shell +export CHART_REPOSITORY= +export CHART_NAME= +export CHART_VERSION= + +helm pull $CHART_NAME --repo $CHART_REPOSITORY --version $CHART_VERSION +``` + +Be sure to update the Helm chart repository, name, and version with your own. + +Move the Helm chart into its own folder: + +```ini +mkdir helm +mv $CHART_NAME-$CHART_VERSION.tgz helm/chart.tgz +``` + +Unpack the CRDs from the Helm chart into their own directory: + +```shell +export RELEASE_NAME= +export RELEASE_NAMESPACE= + +mkdir crds +helm template $RELEASE_NAME helm/chart.tgz -n $RELEASE_NAMESPACE --include-crds | \ + yq e 'select(.kind == "CustomResourceDefinition")' - | \ + yq -s '("crds/" + .metadata.name + ".yaml")' - +``` +Be sure to update the Helm release name, and namespace with your own. + +:::info +The instructions above assume your CRDs get deployed as part of your Helm chart. If they're deployed another way, you need to manually copy your CRDs instead. +::: + +Create a `crossplane.yaml` with your controller metadata: + +```yaml +cat < crossplane.yaml +apiVersion: meta.pkg.upbound.io/v1alpha1 +kind: Controller +metadata: + annotations: + friendly-name.meta.crossplane.io: Controller + meta.crossplane.io/description: | + A brief description of what the controller does. + meta.crossplane.io/license: Apache-2.0 + meta.crossplane.io/maintainer: + meta.crossplane.io/readme: | + An explanation of your controller. + meta.crossplane.io/source: + name: +spec: + packagingType: Helm + helm: + releaseName: + releaseNamespace: + # Value overrides for the helm release can be provided below. + # values: + # foo: bar +EOF +``` + +Your controller's file structure should look like this: + +```ini +. +├── crds +│ ├── your-crd.yaml +│ ├── second-crd.yaml +│ └── another-crd.yaml +├── crossplane.yaml +└── helm + └── chart.tgz +``` + +### Package and push the _Controller_ + +At the root of your controller's working directory, build the contents into an xpkg: + +```ini +up xpkg build +``` + +This causes an xpkg to get saved to your current directory with a name like `controller-f7091386b4c0.xpkg`. + +Push the package to your desired OCI registry: + +```shell +export UPBOUND_ACCOUNT= +export CONTROLLER_NAME= +export CONTROLLER_VERSION= +export XPKG_FILENAME= + +up xpkg push xpkg.upbound.io/$UPBOUND_ACCOUNT/$CONTROLLER_NAME:$CONTROLLER_VERSION -f $XPKG_FILENAME +``` + + + +## Deploy a _Controller_ package + + + +:::important +_Controllers_ are only installable on control planes running Crossplane `v1.19.0` or later. +::: + +Set your kubecontext to the desired control plane in Upbound. Change the package path to the OCI registry you pushed it to. Then, deploy the _Controller_ directly: + +```shell +export CONTROLLER_NAME= +export CONTROLLER_VERSION= + +cat < crossplane.yaml +apiVersion: meta.pkg.upbound.io/v1alpha1 +kind: Controller +metadata: + annotations: + friendly-name.meta.crossplane.io: Controller ArgoCD + meta.crossplane.io/description: | + The ArgoCD Controller enables continuous delivery and declarative configuration + management for Kubernetes applications using GitOps principles. + meta.crossplane.io/license: Apache-2.0 + meta.crossplane.io/maintainer: Upbound Maintainers + meta.crossplane.io/readme: | + ArgoCD is a declarative GitOps continuous delivery tool for Kubernetes that + follows the GitOps methodology to manage infrastructure and application + configurations. + meta.crossplane.io/source: https://github.com/argoproj/argo-cd + name: argocd +spec: + packagingType: Helm + helm: + releaseName: argo-cd + releaseNamespace: argo-system + # values: + # foo: bar +EOF +``` + +Your controller's file structure should look like this: + +```ini +. +├── crds +│ ├── applications.argoproj.io.yaml +│ ├── applicationsets.argoproj.io.yaml +│ └── appprojects.argoproj.io.yaml +├── crossplane.yaml +└── helm + └── chart.tgz +``` + +### Package and push controller-argocd + +At the root of your controller's working directory, build the contents into an xpkg: + +```ini +up xpkg build +``` + +This causes an xpkg to get saved to your current directory with a name like `argocd-f7091386b4c0.xpkg`. + +Push the package to your desired OCI registry: + +```shell +export UPBOUND_ACCOUNT= +export CONTROLLER_NAME=controller-argocd +export CONTROLLER_VERSION=v7.8.8 +export XPKG_FILENAME= + +up xpkg push --create xpkg.upbound.io/$UPBOUND_ACCOUNT/$CONTROLLER_NAME:$CONTROLLER_VERSION -f $XPKG_FILENAME +``` + +### Deploy controller-argocd to a control plane + +Set your kubecontext to the desired control plane in Upbound. Change the package path to the OCI registry you pushed it to. Then, deploy the _Controller_ directly: + +```ini +cat < + +## Frequently asked questions + +
+Can I package any software or are there any prerequisites to be a Controller? + +We define a *Controller* as a software that has at least one Custom Resource Definition (CRD) and a Kubernetes controller for that CRD. This is the minimum requirement to be a *Controller*. We have some checks to enforce this at packaging time. + +
+ +
+How can I package my software as a Controller? + +Currently, we support Helm charts as the underlying package format for *Controllers*. As long as you have a Helm chart, you can package it as a *Controller*. + +If you don't have a Helm chart, you can't deploy the software. We only support Helm charts as the underlying package format for *Controllers*. We may extend this to support other packaging formats like Kustomize in the future. + +
+ +
+Can I package Crossplane XRDs/Compositions as a Helm chart to deploy as a Controller? + +This is not recommended. For packaging Crossplane XRDs/ and Compositions, we recommend using the `Configuration` package format. A helm chart only with Crossplane XRDs/Compositions does not qualify as a *Controller*. + +
+ +
+How can I override the Helm values when deploying a Controller? + +Overriding the Helm values is possible at two levels: +- During packaging time, in the package manifest file. +- At runtime, using a `ControllerRuntimeConfig` resource (similar to Crossplane `DeploymentRuntimeConfig`). + +
+ +
+How can I configure the helm release name and namespace for the controller? + +Right now, it is not possible to configure this at runtime. The package author configures release name and namespace during packaging, so it is hardcoded inside the package. Unlike a regular application that is deployed by a Helm chart, *Controllers* can only be deployed once in a given control plane, so, we hope it should be ok to rely on predefined release names and namespaces. We may consider exposing these in `ControllerRuntimeConfig` later, but, we would like to keep it opinionated unless there are strong reasons to do so. + +
+ +
+Can I deploy more than one instance of a Controller package? + +No, this is not possible. Remember, a *Controller* package introduces CRDs which are cluster-scoped objects. Just like one cannot deploy more than one instance of the same Crossplane Provider package today, it is not possible to deploy more than one instance of a *Controller*. + +
+ +
+Do I need a specific Crossplane version to run Controllers? + +Yes, you need to use Crossplane v1.19.0 or later to use *Controllers*. This is because of the changes in the Crossplane codebase to support third-party package formats in dependencies. + +Spaces `v1.12.0` supports Crossplane `v1.19` in the *Rapid* release channel. + +
+ +
+Can I deploy Controllers outside of an Upbound control plane? With UXP? + +No, *Controllers* are a proprietary package format and are only available for control planes running in Spaces hosting environments in Upbound. + +
+ + +[cli]: /manuals/uxp/overview + diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/ctp-audit-logs.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/ctp-audit-logs.md new file mode 100644 index 000000000..52f52c776 --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/ctp-audit-logs.md @@ -0,0 +1,549 @@ +--- +title: Control plane audit logging +--- + +This guide explains how to enable and configure audit logging for control planes +in Self-Hosted Upbound Spaces. + +Starting in Spaces `v1.14.0`, each control plane contains an API server that +supports audit log collection. You can use audit logging to track creation, +updates, and deletions of Crossplane resources. Control plane audit logs +use observability features to collect audit logs with `SharedTelemetryConfig` and +send logs to an OpenTelemetry (`OTEL`) collector. + +:::info API Version Information +This guide applies to **Spaces v1.14.0 and later** (Self-Hosted only). Audit logging is not available in earlier versions. + +For API specifications on observability resources, see the [Spaces API Reference - v1.14](../../../../../1.14/reference/apis/spaces-api/). details on observability evolution across versions, see the . +::: + +## Prerequisites + +Before you begin, make sure you have: + +* Spaces `v1.14.0` or greater +* Admin access to your Spaces host cluster +* `kubectl` configured to access the host cluster +* `helm` installed +* `yq` installed +* `up` CLI installed and logged in to your organization + +## Enable observability + + +Observability graduated to General Available in `v1.14.0` but is disabled by +default. + + + + + +### Before `v1.14` +To enable the GA Observability feature, upgrade your Spaces installation to `v1.14.0` +or later and update your installation setting to the new flag: + +```diff +helm upgrade spaces upbound/spaces -n upbound-system \ +- --set "features.alpha.observability.enabled=true" ++ --set "observability.enabled=true" +``` + + + +### After `v1.14` + +To enable the GA Observability feature for `v1.14.0` and later, pass the feature +flag: + +```sh +helm upgrade spaces upbound/spaces -n upbound-system \ + --set "observability.enabled=true" + +``` + + + + +To confirm Observability is enabled, run the `helm get values` command: + + +```shell +helm get values --namespace upbound-system spaces | yq .observability +``` + +Your output should return: + +```shell-noCopy + enabled: true +``` + +## Install an observability backend + +:::note +If you already have an observability backend in your environment, skip to the +next section. +::: + + +For this guide, you'll use Grafana's `docker-otel-lgtm` bundle to validate audit log +generation. production environments, configure a dedicated observability +backend like Datadog, Splunk, or an enterprise-grade Grafana stack. + + + +First, make sure your `kubectl` context points to your Spaces host cluster: + +```shell +kubectl config current-context +``` + +The output should return your cluster name. + +Next, install `docker-otel-lgtm` as a deployment using port-forwarding to +connect to Grafana. Create a manifest file and paste the +following configuration: + +```yaml title="otel-lgtm.yaml" +apiVersion: v1 +kind: Namespace +metadata: + name: observability +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: otel-lgtm + name: otel-lgtm + namespace: observability +spec: + ports: + - name: grpc + port: 4317 + protocol: TCP + targetPort: 4317 + - name: http + port: 4318 + protocol: TCP + targetPort: 4318 + - name: grafana + port: 3000 + protocol: TCP + targetPort: 3000 + selector: + app: otel-lgtm +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: otel-lgtm + labels: + app: otel-lgtm + namespace: observability +spec: + replicas: 1 + selector: + matchLabels: + app: otel-lgtm + template: + metadata: + labels: + app: otel-lgtm + spec: + containers: + - name: otel-lgtm + image: grafana/otel-lgtm + ports: + - containerPort: 4317 + - containerPort: 4318 + - containerPort: 3000 +``` + +Next, apply the manifest: + +```shell +kubectl apply --filename otel-lgtm.yaml +``` + +Your output should return the resources: + +```shell +namespace/observability created + service/otel-lgtm created + deployment.apps/otel-lgtm created +``` + +To verify your resources deployed, use `kubectl get` to display resources with +an `ACTIVE` or `READY` status. + +Next, forward the Grafana port: + +```shell +kubectl port-forward svc/otel-lgtm --namespace observability 3000:3000 +``` + +Now you can access the Grafana UI at http://localhost:3000. + + +## Create an audit-enabled control plane + +To enable audit logging for a control plane, you need to label it so the +`SharedTelemetryConfig` can identify and apply audit settings. This section +creates a new control plane with the `audit-enabled: "true"` label. The +`audit-enabled: "true"` label marks this control plane for audit logging. The +`SharedTelemetryConfig` (created in the next section) finds control planes with +this label and enables audit logging on them. + +Create a new manifest file and paste the configuration below: + +
+```yaml title="ctp-audit.yaml" +apiVersion: v1 +kind: Namespace +metadata: + name: audit-test +--- +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + labels: + audit-enabled: "true" + name: ctp1 + namespace: audit-test +spec: + writeConnectionSecretToRef: + name: kubeconfig-ctp1 + namespace: audit-test +``` +
+ +The `metadata.labels` section contains the `audit-enabled` setting. + +Apply the manifest: + +```shell +kubectl apply --filename ctp-audit.yaml +``` + +Confirm your control plane reaches the `READY` status: + +```shell +kubectl get --filename ctp-audit.yaml +``` + +## Create a `SharedTelemetryConfig` + +The `SharedTelemetryConfig` applies to all control plane objects in a namespace +and enables audit logging and routes logs to your `OTEL` endpoint. + +Create a `SharedTelemetryConfig` manifest file and paste the configuration +below: + +
+```yaml title="sharedtelemetryconfig.yaml" +apiVersion: observability.spaces.upbound.io/v1alpha1 +kind: SharedTelemetryConfig +metadata: + name: apiserver-audit + namespace: audit-test +spec: + apiServer: + audit: + enabled: true + exporters: + otlphttp: + endpoint: http://otel-lgtm.observability:4318 + exportPipeline: + logs: [otlphttp] + controlPlaneSelector: + labelSelectors: + - matchLabels: + audit-enabled: "true" +``` +
+ +This configuration: + +* Sets `apiServer.audit.enabled` to `true` +* Configures the `otlphttp` exporter to point to the `docker-otel-lgtm` service +* Uses `controlPlaneSelector` to match any control plane in the namespace with the `audit-enabled` label set to `true` + +:::note +You can configure the `SharedTelemetryConfig` to select control planes in +several ways. more information on control plane selection, see the [control +plane selection][ctp-selection] documentation. +::: + +Apply the `SharedTelemetryConfig`: + +```shell +kubectl apply --filename sharedtelemetryconfig.yaml +``` + +Confirm the configuration selected the control plane: + +```shell +kubectl get --filename sharedtelemetryconfig.yaml +``` + +The output should return `SELECTED` as `1` and `VALIDATED` as `TRUE`. + +For more detailed status information, use `kubectl get`: + +```shell +kubectl get --filename sharedtelemetryconfig.yaml --output yaml | yq .status +``` + +## Generate and monitor audit events + +You enabled telemetry on your new control plane and can now generate events to +test the audit logging. This guide uses the `nop-provider` to simulate resource +operations. + +Switch your `up` context to the new control plane: + +```shell +up ctx /// +``` + +Create a new Provider manifest: + +```yaml title="provider-nop.yaml" +apiVersion: pkg.crossplane.io/v1 + kind: Provider + metadata: + name: crossplane-contrib-provider-nop + spec: + package: xpkg.upbound.io/crossplane-contrib/provider-nop:v0.4.0 +``` + +Apply the provider manifest: + +```shell +kubectl apply --filename provider-nop.yaml +``` + +Verify the provider installed and returns `HEALTHY` status as `TRUE`. + +Apply an example resource to kick off event generation: + + +```shell +kubectl apply --filename https://raw.githubusercontent.com/crossplane-contrib/provider-nop/refs/heads/main/examples/nopresource.yaml +``` + +In your Grafana dashboard, navigate to **Drilldown** > **Logs** under the +Grafana menu. + + +Filter for `controlplane-audit` log messages. + +Create a query to find `create` events on `nopresources` by filtering: + +* The `verb` field for `create` events +* The `objectRef_resource` field to match the Kind `nopresources` + +Review the audit log results. The log stream displays: + +*The client applying the create operation +* The resource kind +* Client details +* The response code + +Expand the example below for an audit log entry: + +
+ Audit log entry + +```json +{ + "level": "Metadata", + "auditID": "51bbe609-14ad-4874-be78-1289c10d506a", + "stage": "ResponseComplete", + "requestURI": "/apis/nop.crossplane.io/v1alpha1/nopresources?fieldManager=kubectl-client-side-apply&fieldValidation=Strict", + "verb": "create", + "user": { + "username": "kubernetes-admin", + "groups": ["system:masters", "system:authenticated"] + }, + "impersonatedUser": { + "username": "upbound:spaces:host:masterclient", + "groups": [ + "system:authenticated", + "upbound:controlplane:admin", + "upbound:spaces:host:system:masters" + ] + }, + "sourceIPs": ["10.244.0.135", "127.0.0.1"], + "userAgent": "kubectl/v1.32.2 (darwin/arm64) kubernetes/67a30c0", + "objectRef": { + "resource": "nopresources", + "name": "example", + "apiGroup": "nop.crossplane.io", + "apiVersion": "v1alpha1" + }, + "responseStatus": { "metadata": {}, "code": 201 }, + "requestReceivedTimestamp": "2025-09-19T23:03:24.540067Z", + "stageTimestamp": "2025-09-19T23:03:24.557583Z", + "annotations": { + "authorization.k8s.io/decision": "allow", + "authorization.k8s.io/reason": "RBAC: allowed by ClusterRoleBinding \"controlplane-admin\" of ClusterRole \"controlplane-admin\" to Group \"upbound:controlplane:admin\"" + } + } +``` +
+ +## Customize the audit policy + +Spaces `v1.14.0` includes a default audit policy. You can customize this policy +by creating a configuration file and passing the values to +`observability.collectors.apiServer.auditPolicy` in the helm values file. + +An example custom audit policy: + +```yaml +observability: + controlPlanes: + apiServer: + auditPolicy: | + apiVersion: audit.k8s.io/v1 + kind: Policy + rules: + # ============================================================================ + # RULE 1: Exclude health check and version endpoints + # ============================================================================ + - level: None + nonResourceURLs: + - '/healthz*' + - '/readyz*' + - /version + # ============================================================================ + # RULE 2: ConfigMaps - Write operations only + # ============================================================================ + - level: Metadata + resources: + - group: "" + resources: + - configmaps + verbs: + - create + - update + - patch + - delete + omitStages: + - RequestReceived + - ResponseStarted + # ============================================================================ + # RULE 3: Secrets - ALL operations + # ============================================================================ + - level: Metadata + resources: + - group: "" + resources: + - secrets + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + omitStages: + - RequestReceived + - ResponseStarted + # ============================================================================ + # RULE 4: Global exclusion of read-only operations + # ============================================================================ + - level: None + verbs: + - get + - list + - watch + # ========================================================================== + # RULE 5: Exclude standard Kubernetes resources from write operation logging + # ========================================================================== + - level: None + resources: + - group: "" + - group: "apps" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "storage.k8s.io" + - group: "batch" + - group: "autoscaling" + - group: "metrics.k8s.io" + - group: "node.k8s.io" + - group: "scheduling.k8s.io" + - group: "coordination.k8s.io" + - group: "discovery.k8s.io" + - group: "events.k8s.io" + - group: "flowcontrol.apiserver.k8s.io" + - group: "internal.apiserver.k8s.io" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "admissionregistration.k8s.io" + verbs: + - create + - update + - patch + - delete + # ============================================================================ + # RULE 6: Catch-all for ALL custom resources and any missed resources + # ============================================================================ + - level: Metadata + verbs: + - create + - update + - patch + - delete + omitStages: + - RequestReceived + - ResponseStarted + # ============================================================================ + # RULE 7: Final catch-all - exclude everything else + # ============================================================================ + - level: None + omitStages: + - RequestReceived + - ResponseStarted +``` +You can apply this policy during Spaces installation or upgrade using the helm values file. + +Audit policies use rules evaluated in order from top to bottom where the first +matching rule applies. Control plane audit policies follow Kubernetes conventions and use the +following logging levels: + +* **None** - Don't log events matching this rule +* **Metadata** - Log request metadata (user, timestamp, resource, verb) but not request or response bodies +* **Request** - Log metadata and request body but not response body +* **RequestResponse** - Log metadata, request body, and response body + +For more information, review the Kubernetes [Auditing] documentation. + +## Disable audit logging + +You can disable audit logging on a control plane by removing it from the +`SharedTelemetryConfig` selector or by deleting the `SharedTelemetryConfig`. + +### Disable for specific control planes + +Remove the `audit-enabled` label from control planes that should stop sending audit logs: + +```bash +kubectl label controlplane --namespace audit-enabled- +``` + +The `SharedTelemetryConfig` no longer selects this control plane, and audit log collection stops. + +### Disable for all control planes + +Delete the `SharedTelemetryConfig` to stop audit logging for all control planes it manages: + +```bash +kubectl delete sharedtelemetryconfig --namespace +``` + +[ctp-selection]: /spaces/howtos/observability/#control-plane-selection +[Auditing]: https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/ diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/declarative-ctps.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/declarative-ctps.md new file mode 100644 index 000000000..2c3e5331b --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/declarative-ctps.md @@ -0,0 +1,110 @@ +--- +title: Declaratively create control planes +sidebar_position: 99 +description: A tutorial to configure a Space with Argo to declaratively create and + manage control planes +--- + +In this tutorial, you learn how to configure [Argo CD][argo-cd] to communicate with a self-hosted Space. This flow allows you to declaratively create and manage control planes from Git. Argo CD is a continuous delivery tool for Kubernetes that you can use to drive GitOps flows for your control plane infrastructure. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. + +For API specifications on ControlPlane resources and their declarative creation, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +## Prerequisites + +To complete this tutorial, you need the following: + +- Have already deployed an Upbound Space. +- Have already deployed an instance of Argo CD on a Kubernetes cluster. + +## Connect your Space to Argo CD + +Fetch the kubeconfig for the Space cluster, the Kubernetes cluster where you installed the Upbound Spaces software. You must add the Space cluster as a context to Argo. + +```ini +export SPACES_CLUSTER_SERVER="https://url" +export SPACES_CLUSTER_NAME="cluster" +``` + +Switch contexts to the Kubernetes cluster where you've installed Argo. Create a secret on the Argo cluster whose data contains the connection details of the Space cluster. + +:::important +Make sure the following commands are executed against your **Argo** cluster, not your Space cluster. +::: + +Run the following command in a terminal: + +```yaml +cat < +When you install a Crossplane provider on a control plane, memory gets consumed +according to the number of custom resources it defines. Upbound [Official Provider families][official-provider-families] provide higher fidelity control +to platform teams to install providers for only the resources they need, +reducing the bloat of needlessly installing unused custom resources. Still, you +must factor provider memory usage into your calculations to ensure you've +rightsized the memory available in your Spaces cluster. + + +:::important +Be careful not to conflate `managed resource` with `custom resource definition`. +The former is an "instance" of an external resource in Crossplane, while the +latter defines the API schema of that resource. +::: + +It's estimated that each custom resource definition consumes ~3 MB of memory. +The calculation is: + +```bash +number_of_managed_resources_defined_in_provider x 3 MB = memory_required +``` + +For example, if you plan to use [provider-aws-ec2][provider-aws-ec2], [provider-aws-s3][provider-aws-s3], and [provider-aws-iam][provider-aws-iam], the resulting calculation is: + +```bash +provider-aws-ec2: 98 x 3 MB = 294 MB +provider-aws-s3: 23 x 3 MB = 69 MB +provider-aws-iam 22 x 3 MB = 66 MB +--- +total memory: 429 MB +``` + +In this scenario, you should budget ~430 MB of memory for provider usage on this control plane. + +:::tip +Do this calculation for each provider you plan to install on your control plane. +Then do this calculation for each control plane you plan to run in your Space. +::: + + +#### Total memory usage + +Add the memory usage from the previous sections. Given the preceding examples, +they result in a recommendation to budget ~1 GB memory for each control plane +you plan to run in the Space. + +:::important + +The 1 GB recommendation is an example. +You should input your own provider requirements to arrive at a final number for +your own deployment. + +::: + +### CPU considerations + +#### Managed resource CPU usage + +The number of managed resources under management by a control plane is the largest contributing factor for CPU usage in a Space. CPU usage scales linearly according to the number of managed resources under management by your control plane. In Upbound's testing, CPU usage requirements _does_ vary from provider to provider. Using the Upbound Official Provider families as a baseline: + + +| Provider | MR create operation (CPU core seconds) | MR update or reconciliation operation (CPU core seconds) | +| ---- | ---- | ---- | +| provider-family-aws | 10 | 2 to 3 | +| provider-family-gcp | 7 | 1.5 | +| provider-family-azure | 7 to 10 | 1.5 to 3 | + + +When resources are in a non-ready state, Crossplane providers reconcile often (as fast as every 15 seconds). Once a resource reaches `READY`, each Crossplane provider defaults to a 10 minute poll interval. Given this, a 16-core machine has `16x10x60 = 9600` CPU core seconds available. Interpreting this table: + +- A single control plane that needs to create 100 AWS MRs concurrently would consume 1000 CPU core seconds, or about 1.5 cores. +- A single control plane that continuously reconciles 100 AWS MRs once they've reached a `READY` state would consume 300 CPU core seconds, or a little under half a core. + +Since `provider-family-aws` has the highest recorded numbers for CPU time required, you can use that as an upper limit in your calculations. + +Using these calculations and extrapolating values, given a 16 core machine, it's recommended you don't exceed a single control plane managing 1000 MRs. Suppose you plan to run 10 control planes, each managing 1000 MRs. You want to make sure your node pool has capacity for 160 cores. If you are using a machine type that has 16 cores per machine, that would mean having a node pool of size 10. If you are using a machine type that has 32 cores per machine, that would mean having a node pool of size 5. + +#### Cloud API latency + +Oftentimes, you are using Crossplane providers to talk to external cloud APIs. Those external cloud APIs often have global API rate limits (examples: [Azure limits][azure-limits], [AWS EC2 limits][aws-ec2-limits]). + +For Crossplane providers built on [Upjet][upjet] (such as Upbound Official Provider families), these providers use Terraform under the covers. They expose some knobs (such as `--max-reconcile-rate`) you can use to tweak reconciliation rates. + +### Resource buffers + +The guidance in the preceding sections explains how to calculate CPU and memory usage requirements for: + +- a set of control planes in a Space +- tuned to the number of providers you plan to use +- according to the number of managed resource instances you plan to have managed by your control planes + +Upbound recommends budgeting an extra buffer of 20% to your resource capacity calculations. The numbers shared in the preceding sections don't account for peaks or surges since they're based off average measurements. Upbound recommends budgeting this buffer to account for these things. + +## Deploying more than one Space + +You are welcome to deploy more than one Space. You just need to make sure you have a 1:1 mapping of Space to Kubernetes clusters. Spaces are by their nature constrained to a single Kubernetes Cluster, which are regional entities. If you want to offer control planes in multiple cloud environments or multiple public clouds entirely, these are justifications for deploying >1 Spaces. + +## Cert-manager + +A Spaces deployment uses the [Certificate Custom Resource] from cert-manager to +provision certificates within the Space. This establishes a nice API boundary +between what your platform may need and the Certificate requirements of a +Space. + + +In the event you would like more control over the issuing Certificate Authority +for your deployment or the deployment of cert-manager itself, this guide is for +you. + + +### Deploying + +An Upbound Space deployment doesn't have any special requirements for the +cert-manager deployment itself. The only expectation is that cert-manager and +the corresponding Custom Resources exist in the cluster. + +You should be free to install cert-manager in the cluster in any way that makes +sense for your organization. You can find some [installation ideas] in the +cert-manager docs. + +### Issuers + +A default Upbound Space install includes a [ClusterIssuer]. This `ClusterIssuer` +is a `selfSigned` issuer that other certificates are minted from. You have a +couple of options available to you for changing the default deployment of the +Issuer: +1. Changing the issuer name. +2. Providing your own ClusterIssuer. + + +#### Changing the issuer name + +The `ClusterIssuer` name is controlled by the `certificates.space.clusterIssuer` +Helm property. You can adjust this during installation by providing the +following parameter (assuming your new name is 'SpaceClusterIssuer'): +```shell +--set "certificates.space.clusterIssuer=SpaceClusterIssuer" +``` + + + +#### Providing your own ClusterIssuer + +To provide your own `ClusterIssuer`, you need to first setup your own +`ClusterIssuer` in the cluster. The cert-manager docs have a variety of options +for providing your own. See the [Issuer Configuration] docs for more details. + +Once you have your own `ClusterIssuer` set up in the cluster, you need to turn +off the deployment of the `ClusterIssuer` included in the Spaces deployment. +To do that, provide the following parameter during installation: +```shell +--set "certificates.provision=false" +``` + +###### Considerations +If your `ClusterIssuer` has a name that's different from the default name that +the Spaces installation expects ('spaces-selfsigned'), you need to also specify +your `ClusterIssuer` name during install using: +```shell +--set "certificates.space.clusterIssuer=" +``` + +## Ingress + +To route requests from an external client (kubectl, ArgoCD, etc) to a +control plane, a Spaces deployment includes a default [Ingress] manifest. In +order to ease getting started scenarios, the current `Ingress` includes +configurations (properties and annotations) that assume that you installed the +commonly used [ingress-nginx ingress controller] in the cluster. This section +walks you through using a different `Ingress`, if that's something that your +organization needs. + +### Default manifest + +An example of what the current `Ingress` manifest included in a Spaces install +is below: + +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: mxe-router-ingress + namespace: upbound-system + annotations: + nginx.ingress.kubernetes.io/use-regex: "true" + nginx.ingress.kubernetes.io/ssl-redirect: "false" + nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" + nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" + nginx.ingress.kubernetes.io/proxy-request-buffering: "off" + nginx.ingress.kubernetes.io/proxy-body-size: "0" + nginx.ingress.kubernetes.io/proxy-http-version: "1.1" + nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + nginx.ingress.kubernetes.io/proxy-ssl-verify: "on" + nginx.ingress.kubernetes.io/proxy-ssl-secret: "upbound-system/mxp-hostcluster-certs" + nginx.ingress.kubernetes.io/proxy-ssl-name: spaces-router + nginx.ingress.kubernetes.io/configuration-snippet: | + more_set_headers "X-Request-Id: $req_id"; + more_set_headers "Request-Id: $req_id"; + more_set_headers "Audit-Id: $req_id"; +spec: + ingressClassName: nginx + tls: + - hosts: + - {{ .Values.ingress.host }} + secretName: mxe-router-tls + rules: + - host: {{ .Values.ingress.host }} + http: + paths: + - path: "/v1/controlPlanes" + pathType: Prefix + backend: + service: + name: spaces-router + port: + name: http +``` + +The notable pieces are: +1. Namespace + + + +This property represents the namespace that the spaces-router is deployed to. +In most cases this is `upbound-system`. + + + +2. proxy-ssl-* annotations + +The spaces-router pod terminates TLS using certificates located in the +mxp-hostcluster-certs `Secret` located in the `upbound-system` `Namespace`. + +3. proxy-* annotations + +Requests coming into the ingress-controller can be variable depending on what +the client is requesting. For example, `kubectl get crds` has different +requirements for the connection compared to a 'watch', for example +`kubectl get pods -w`. The ingress-controller is configured to be able to +account for either scenario. + + +4. configuration-snippets + +These commands add headers to the incoming requests that help with telemetry +and diagnosing problems within the system. + +5. Rules + +Requests coming into the control planes use a `/v1/controlPlanes` prefix and +need to be routed to the spaces-router. + + +### Using a different ingress manifest + +Operators can choose to use an `Ingress` manifest and ingress controller that +makes the most sense for their organization. If they want to turn off deploying +the default `Ingress` manifest, they can do so during installation by providing +the following parameter during installation: +```shell +--set ".Values.ingress.provision=false" +``` + +#### Considerations + + + + + +Operators will need to take into account the following considerations when +disabling the default `Ingress` deployment. + +1. Ensure the custom `Ingress` manifest is placed in the same namespace as the +`spaces-router` pod. +2. Ensure that the ingress is configured to use a `spaces-router` as a secure +backend and that the secret used is the mxp-hostcluster-certs secret. +3. Ensure that the ingress is configured to handle long-lived connections. +4. Ensure that the routing rule sends requests prefixed with +`/v1/controlPlanes` to the `spaces-router` using the `http` port. + + + + + + +[cert-manager]: https://cert-manager.io/ +[Certificate Custom Resource]: https://cert-manager.io/docs/usage/certificate/ +[ClusterIssuer]: https://cert-manager.io/docs/concepts/issuer/ +[ingress-nginx ingress controller]: https://kubernetes.github.io/ingress-nginx/deploy/ +[installation ideas]: https://cert-manager.io/docs/installation/ +[Ingress]: https://kubernetes.io/docs/concepts/services-networking/ingress/ +[Issuer Configuration]: https://cert-manager.io/docs/configuration/ +[official-provider-families]: /manuals/packages/providers/provider-families +[aws-eks]: https://aws.amazon.com/eks/ +[google-cloud-gke]: https://cloud.google.com/kubernetes-engine +[microsoft-aks]: https://azure.microsoft.com/en-us/products/kubernetes-service +[upbound-account]: https://www.upbound.io/register/?utm_source=docs&utm_medium=cta&utm_campaign=docs_spaces +[provider-aws-ec2]: https://marketplace.upbound.io/providers/upbound/provider-aws-ec2 +[provider-aws-s3]: https://marketplace.upbound.io/providers/upbound/provider-aws-s3 +[provider-aws-iam]: https://marketplace.upbound.io/providers/upbound/provider-aws-iam +[azure-limits]: https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/request-limits-and-throttling +[aws-ec2-limits]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/throttling.html#throttling-limits-rate-based +[upjet]: https://github.com/upbound/upjet diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/dr.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/dr.md new file mode 100644 index 000000000..67ecbfecf --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/dr.md @@ -0,0 +1,412 @@ +--- +title: Disaster Recovery +sidebar_position: 13 +description: Configure Space-wide backups for disaster recovery. +--- + +:::info API Version Information +This guide applies to **Spaces v1.9.0 and later** (Self-Hosted only). Space Backups is a alpha feature enabled by default starting in v1.14.0. + +- **v1.9.0-v1.13.x**: Alpha feature (requires manual enablement) +- **v1.14.0+**: GA (enabled by default) + +For version-specific features and backup resources, see the . control-plane backups, see [Backup and Restore](../backup-and-restore.md). +::: + +:::important +For Connected and Disconnected Spaces, this feature requires Spaces `v1.9.0` and, starting with `v1.14.0`, Spaces enables it by default. + +To enable it on versions earlier than `v1.14.0`, set features.alpha.spaceBackup.enabled=true when you install Spaces. + +```bash +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "features.alpha.spaceBackup.enabled=true" +``` +::: + +Upbound's _Space Backups_ is a built-in Space-wide backup and restore feature. This guide explains how to configure Space Backups and how to restore from one of them in case of disaster recovery. + +This feature is meant for Space administrators. Group or Control Plane users can leverage [Shared Backups][shared-backups] to backup and restore their ControlPlanes. + +## Benefits +The Space Backups feature provides the following benefits: + +* Automatic backups for all resources in a Space and all resources in control planes, without any operational overhead. +* Backup schedules. +* Selectors to specify resources to backup. + +## Prerequisites + +Enabled the Space Backups feature in the Space: + +- Cloud Spaces: Not accessible to users. +- Connected Spaces: Space administrator must enable this feature. +- Disconnected Spaces: Space administrator must enable this feature. + +## Configure a Space Backup Config + +[SpaceBackupConfig][spacebackupconfig] is a cluster-scoped resource. This resource configures the storage details and provider. Whenever a backup executes (either by schedule or manually initiated), it references a SpaceBackupConfig to tell it where store the snapshot. + + +### Backup config provider + + +The `spec.objectStorage.provider` and `spec.objectStorage.config` fields configures: + +* The object storage provider +* The path to the provider +* The credentials needed to communicate with the provider + +You can only set one provider. Upbound currently supports AWS, Azure, and GCP as providers. + + +`spec.objectStorage.config` is a freeform map of configuration options for the object storage provider. See [Thanos object storage][thanos-object-storage] for more information on the formats for each supported cloud provider. `spec.bucket` and `spec.provider` overrides the required values in the config. + + +#### AWS as a storage provider + +This example demonstrates how to use AWS as a storage provider for your backups: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupConfig +metadata: + name: default +spec: + objectStorage: + provider: AWS + bucket: spaces-backup-bucket + config: + endpoint: s3.eu-west-2.amazonaws.com + region: eu-west-2 + credentials: + source: Secret + secretRef: + name: bucket-creds + namespace: upbound-system + key: creds +``` + +This example assumes you've already created an S3 bucket called +`spaces-backup-bucket` in the `eu-west-2` AWS region. To access the bucket, +define the account credentials as a Secret in the specified Namespace +(`upbound-system` in this example). + +#### Azure as a storage provider + +This example demonstrates how to use Azure as a storage provider for your backups: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupConfig +metadata: + name: default + namespace: default +spec: + objectStorage: + provider: Azure + bucket: upbound-backups + config: + storage_account: upbackupstore + container: upbound-backups + endpoint: blob.core.windows.net + credentials: + source: Secret + secretRef: + name: bucket-creds + namespace: upbound-system + key: creds +``` + + +This example assumes you've already created an Azure storage account called +`upbackupstore` and blob `upbound-backups`. To access the blob, +define the account credentials as a Secret in the specified Namespace +(`upbound-system` in this example). + + +#### GCP as a storage provider + +This example demonstrates how to use Google Cloud Storage as a storage provider for your backups: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupConfig +metadata: + name: default + namespace: default +spec: + objectStorage: + provider: GCP + bucket: spaces-backup-bucket + credentials: + source: Secret + secretRef: + name: bucket-creds + namespace: upbound-system + key: creds +``` + + +This example assumes you've already created a Cloud bucket called +"spaces-backup-bucket" and a service account with access to this bucket. Define the key file as a Secret in the specified Namespace +(`upbound-system` in this example). + + +## Configure a Space Backup Schedule + + +[SpaceBackupSchedule][spacebackupschedule] is a cluster-scoped resource. This resource defines a backup schedule for the whole Space. + +Below is an example of a Space Backup Schedule running every day. It backs up all groups having `environment: production` labels and all control planes in those groups having `backup: please` labels. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupSchedule +metadata: + name: daily-schedule +spec: + schedule: "@daily" + configRef: + kind: SpaceBackupConfig + name: default + match: + groups: + labelSelectors: + - matchLabels: + environment: production + controlPlanes: + labelSelectors: + - matchLabels: + backup: please +``` + +### Define a schedule + +The `spec.schedule` field is a [Cron-formatted][cron-formatted] string. Some common examples are below: + +| Entry | Description | +| ----------------- | ------------------------------------------------------------------------------------------------- | +| `@hourly` | Run once an hour. | +| `@daily` | Run once a day. | +| `@weekly` | Run once a week. | +| `0 0/4 * * *` | Run every 4 hours. | +| `0/15 * * * 1-5` | Run every fifteenth minute on Monday through Friday. | +| `@every 1h30m10s` | Run every 1 hour, 30 minutes, and 10 seconds. Hour is the largest measurement of time for @every. | + +### Suspend a schedule + +Use `spec.suspend` field to suspend the schedule. It creates no new backups, but allows running backups to complete. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupSchedule +metadata: + name: daily-schedule +spec: + suspend: true +... +``` + +### Garbage collect backups when the schedule gets deleted + +Set the `spec.useOwnerReferencesInBackup` to garbage collect associated `SpaceBackup` when a `SpaceBackupSchedule` gets deleted. If set to true, backups are garbage collected when the schedule gets deleted. + +### Set the time to live + +Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. + +The time to live is a duration, for example, `168h` for 7 days. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupSchedule +metadata: + name: daily-schedule +spec: + ttl: 168h # Backup is garbage collected after 7 days +... +``` + +## Selecting space resources to backup + +By default, a SpaceBackup selects all groups and, for each of them, all control planes, secrets, and any other group-scoped resources. + +By setting `spec.match`, you can include only specific groups, control planes, secrets, or other Space resources in the backup. + +By setting `spec.exclude`, you can filter out some matched Space API resources from the backup. + +### Including space resources in a backup + +Different fields are available to include resources based on labels or names: +- `spec.match.groups` to include only some groups in the backup. +- `spec.match.controlPlanes` to include only some control planes in the backup. +- `spec.match.secrets` to include only some secrets in the backup. +- `spec.match.extras` to include only some extra resources in the backup. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackup +metadata: + name: my-backup +spec: + configRef: + kind: SpaceBackupConfig + name: default + match: + groups: + labelSelectors: + - matchLabels: + environment: production + controlPlanes: + labelSelectors: + - matchLabels: + backup: please + secrets: + names: + - my-secret + extras: + - apiGroup: "spaces.upbound.io" + kind: "SharedBackupConfig" + names: + - my-shared-backup +``` + +### Excluding Space resources from the backup + +Use the `spec.exclude` field to exclude matched Space API resources from the backup. + +Different fields are available to exclude resources based on labels or names: +- `spec.exclude.groups` to exclude some groups from the backup. +- `spec.exclude.controlPlanes` to exclude some control planes from the backup. +- `spec.exclude.secrets` to exclude some secrets from the backup. +- `spec.exclude.extras` to exclude some extra resources from the backup. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackup +metadata: + name: my-backup +spec: + ttl: 168h # Backup is garbage collected after 7 days + configRef: + kind: SpaceBackupConfig + name: default + match: + groups: + labelSelectors: + - matchLabels: + environment: production + exclude: + groups: + names: + - not-this-one-please +``` + +### Exclude resources in control planes' backups + +By default, it backs up all resources in a selected control plane. + +Use the `spec.controlPlaneBackups.excludedResources` field to exclude resources from control planes' backups. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackup +metadata: + name: my-backup +spec: + ttl: 168h # Backup is garbage collected after 7 days + configRef: + kind: SpaceBackupConfig + name: default + controlPlaneBackups: + excludedResources: + - secrets + - buckets.s3.aws.upbound.io +``` + +## Create a manual backup + +[SpaceBackup][spacebackup] is a cluster-scoped resource that causes a single backup to occur for the whole Space. + +Below is an example of a manual SpaceBackup: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackup +metadata: + name: my-backup +spec: + configRef: + kind: SpaceBackupConfig + name: default + deletionPolicy: Delete +``` + + +The backup specification `DeletionPolicy` defines backup deletion actions, +including the deletion of the backup file from the bucket. The `Deletion Policy` +value defaults to `Orphan`. Set it to `Delete` to remove uploaded files +in the bucket. +For more information on the backup and restore process, review the [Spaces API documentation][spaces-api-documentation]. + +### Set the time to live + +Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackup +metadata: + name: my-backup +spec: + ttl: 168h # Backup is garbage collected after 7 days +... +``` + +## Restore from a space backup + +Space Backup and Restore focuses only on disaster recovery. The restore procedure assumes a new Space installation with no existing resources. The restore procedure is idempotent, so you can run it multiple times without any side effects in case of failures. + +To restore a Space from an existing Space Backup, follow these steps: + +1. Install Spaces from scratch as needed. +2. Create a `SpaceBackupConfig` as needed to access the SpaceBackup from the object storage, for example named `my-backup-config`. +3. Select the backup you want to restore from, for example `my-backup`. +4. Run the following command to restore the Space: + +```shell +export SPACE_BACKUP_CONFIG=my-backup-config +export SPACE_BACKUP=my-backup +kubectl exec -ti -n upbound-system deployments/spaces-controller -c spaces -- hyperspace restore $SPACE_BACKUP $SPACE_BACKUP_CONFIG +``` + +### Restore specific control planes + +:::important +This feature is available from Spaces v1.11. +::: + +Instead of restoring the whole Space, you can choose to restore specific control planes +from a backup using the `--controlplanes` flag. You can also use +the `--skip-space-restore` flag to skip restoring Space objects. +This allows Spaces admins to restore individual control planes without +needing to restore the entire Space. + +```shell +export SPACE_BACKUP_CONFIG=my-backup-config +export SPACE_BACKUP=my-backup +kubectl exec -ti -n upbound-system deployments/spaces-controller -c spaces +-- hyperspace restore $SPACE_BACKUP $SPACE_BACKUP_CONFIG --controlplanes default/ctp1,default/ctp2 --skip-space-restore +``` + + +[shared-backups]: /spaces/howtos/self-hosted/workload-id/backup-restore-config/ +[spacebackupconfig]: /reference/apis/spaces-api/v1_9 +[thanos-object-storage]: https://thanos.io/tip/thanos/storage.md/ +[spacebackupschedule]: /reference/apis/spaces-api/v1_9 +[cron-formatted]: https://en.wikipedia.org/wiki/Cron +[spacebackup]: /reference/apis/spaces-api/v1_9 +[spaces-api-documentation]: /reference/apis/spaces-api/v1_9 + diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/gitops-with-argocd.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/gitops-with-argocd.md new file mode 100644 index 000000000..004247a10 --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/gitops-with-argocd.md @@ -0,0 +1,142 @@ +--- +title: GitOps with ArgoCD in Self-Hosted Spaces +sidebar_position: 80 +description: Set up GitOps workflows with Argo CD in self-hosted Spaces +plan: "business" +--- + +:::info Deployment Model +This guide applies to **self-hosted Spaces** deployments. For Upbound Cloud Spaces, see [GitOps with Upbound Control Planes](/spaces/howtos/cloud-spaces/gitops-on-upbound/). +::: + +GitOps is an approach for managing a system by declaratively describing desired resources' configurations in Git and using controllers to realize the desired state. Upbound's control planes are compatible with this pattern and it's strongly recommended you integrate GitOps in the platforms you build on Upbound. + + +## Integrate with Argo CD + + +[Argo CD][argo-cd] is a project in the Kubernetes ecosystem commonly used for +GitOps. You can use it in tandem with Upbound control planes to achieve GitOps +flows. The sections below explain how to integrate these tools with Upbound. + +### Configure connection secrets for control planes + +You can configure control planes to write their connection details to a secret. +Do this by setting the +[`spec.writeConnectionSecretToRef`][spec-writeconnectionsecrettoref] field in a +control plane manifest. For example: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: ctp1 + namespace: default +spec: + writeConnectionSecretToRef: + name: kubeconfig-ctp1 + namespace: default +``` + + +### Configure Argo CD + + +To configure Argo CD for Annotation resource tracking, edit the Argo CD +ConfigMap in the Argo CD namespace. Add `application.resourceTrackingMethod: +annotation` to the data section as below. + +Next, configure the [auto respect RBAC for the Argo CD +controller][auto-respect-rbac-for-the-argo-cd-controller-1]. By default, Argo CD +attempts to discover some Kubernetes resource types that don't exist in a +control plane. You must configure Argo CD to respect the cluster's RBAC rules so +that Argo CD can sync. Add `resource.respectRBAC: normal` to the data section as +below. + +```bash +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-cm +data: + ... + application.resourceTrackingMethod: annotation + resource.respectRBAC: normal +``` + +:::tip +The `resource.respectRBAC` configuration above tells Argo to respect RBAC for +_all_ cluster contexts. If you're using an Argo CD instance to manage more than +only control planes, you should consider changing the `clusters` string match +for the configuration to apply only to control planes. For example, if every +control plane context name followed the convention of being named +`controlplane-`, you could set the string match to be `controlplane-*` +::: + + +### Create a cluster context definition + + +Once the control plane is ready, extract the following values from the secret +containing the kubeconfig: + +```bash +kubeconfig_content=$(kubectl get secrets kubeconfig-ctp1 -n default -o jsonpath='{.data.kubeconfig}' | base64 -d) +server=$(echo "$kubeconfig_content" | grep 'server:' | awk '{print $2}') +bearer_token=$(echo "$kubeconfig_content" | grep 'token:' | awk '{print $2}') +ca_data=$(echo "$kubeconfig_content" | grep 'certificate-authority-data:' | awk '{print $2}') +``` + +Generate a new secret in the cluster where you installed Argo, using the prior +values extracted: + +```yaml +cat < + +import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; + + + + +:::important +This feature is only available for select Business Critical customers. You can't +set up your own Managed Space without the assistance of Upbound. If you're +interested in this deployment mode, please [contact us][contact]. +::: + + + +A Managed Space deployed on AWS is a single-tenant deployment of a control plane +space in your AWS organization in an isolated sub-account. With Managed Spaces, +you can use the same API, CLI, and Console that Upbound offers, with the benefit +of running entirely in a cloud account that you own and Upbound manages for you. + +The following guide walks you through setting up a Managed Space in your AWS +organization. If you have any questions while working through this guide, +contact your Upbound Account Representative for help. + + + + + +A Managed Space deployed on GCP is a single-tenant deployment of a control plane +space in your GCP organization in an isolated project. With Managed Spaces, you +can use the same API, CLI, and Console that Upbound offers, with the benefit of +running entirely in a cloud account that you own and Upbound manages for you. + +The following guide walks you through setting up a Managed Space in your GCP +organization. If you have any questions while working through this guide, +contact your Upbound Account Representative for help. + + + + +## Managed Space on your cloud architecture + + + +A Managed Space is a deployment of the Upbound Spaces software inside an +Upbound-controlled sub-account in your AWS cloud environment. The Spaces +software runs in this sub-account, orchestrated by Kubernetes. Backups and +billing data get stored inside bucket or blob storage in the same sub-account. +The control planes deployed and controlled by the Spaces software runs on the +Kubernetes cluster which gets deployed into the sub-account. + +The diagram below illustrates the high-level architecture of Upbound Managed Spaces: + +![Upbound Managed Spaces arch](/img/managed-arch-aws.png) + +The Spaces software gets deployed on an EKS Cluster in the region of your +choice. This EKS cluster is where your control planes are ultimately run. +Upbound also deploys buckets, 1 for the collection of the billing data and 1 for +control plane backups. + +Upbound doesn't have access to other sub-accounts nor your organization-level +settings in your cloud environment. Outside of your cloud organization, Upbound +runs the Upbound Console, which includes the Upbound API and web application, +including the dashboard you see at `console.upbound.io`. By default, all +connections are encrypted, but public. Optionally, you also have the option to +use private network connectivity through [AWS PrivateLink][aws-privatelink]. + + + + + + +A Managed Space is a deployment of the Upbound Spaces software inside an +Upbound-controlled project in your GCP cloud environment. The Spaces software +runs in this project, orchestrated by Kubernetes. Backups and billing data get +stored inside bucket or blob storage in the same project. The control planes +deployed and controlled by the Spaces software runs on the Kubernetes cluster +which gets deployed into the project. + +The diagram below illustrates the high-level architecture of Upbound Managed Spaces: + +![Upbound Managed Spaces arch](/img/managed-arch-gcp.png) + +The Spaces software gets deployed on a GKE Cluster in the region of your choice. +This GKE cluster is where your control planes are ultimately run. Upbound also +deploys cloud buckets, 1 for the collection of the billing data and 1 for +control plane backups. + +Upbound doesn't have access to other projects nor your organization-level +settings in your cloud environment. Outside of your cloud organization, Upbound +runs the Upbound Console, which includes the Upbound API and web application, +including the dashboard you see at `console.upbound.io`. By default, all +connections are encrypted, but public. Optionally, you also have the option to +use private network connectivity through [GCP Private Service +Connect][gcp-private-service-connect]. + + + +## Prerequisites + +- An organization created on Upbound + + + +- You should have a preexisting AWS organization to complete this guide. +- You must create a new AWS sub-account. Read the [AWS documentation][aws-documentation] to learn how to create a new sub-account in an existing organization on AWS. + +After the sub-account information gets provided to Upbound, **don't change it +any further.** Any changes made to the sub-account or the resources created by +Upbound for the purposes of the Managed Space deployments voids the SLA you have +with Upbound. If you want to make configuration changes, contact your Upbound +Solutions Architect. + + + + + +- You should have a preexisting GCP organization with an active Cloud Billing account to complete this guide. +- You must create a new GCP project. Read the [GCP documentation][gcp-documentation] to learn how to create a new project in an existing organization on GCP. + +After the project information gets provided to Upbound, **don't change it any +further.** Any changes made to the project or the resources created by Upbound +for the purposes of the Managed Space deployments voids the SLA you have with +Upbound. If you want to make configuration changes, contact your Upbound +Solutions Architect. + + + + + +## Set up cross-account management + +Upbound supports using AWS Key Management Service with cross-account IAM +permissions. This enables the isolation of keys so the infrastructure operated +by Upbound has limited access to symmetric keys. + +In the KMS key's account, apply the baseline key policy: + +```json +{ + "Sid": "Allow Upbound to use this key", + "Effect": "Allow", + "Principal": { + "AWS": ["[Managed Space sub-account ID]"] + }, + "Action": ["kms:Encrypt", "kms:Decrypt", "kms:ReEncrypt*", "kms:GenerateDataKey*", "kms:DescribeKey"], + "Resource": "*" +} +``` + +You need another key policy to let the sub-account create persistent resources +with the KMS key: + +```json +{ + "Sid": "Allow attachment of persistent resources for an Upbound Managed Space", + "Effect": "Allow", + "Principal": { + "AWS": "[Managed Space sub-account ID]" + }, + "Action": ["kms:CreateGrant", "kms:ListGrants", "kms:RevokeGrant"], + "Resource": "*", + "Condition": { + "Bool": { + "kms:GrantIsForAWSResource": "true" + } + } +} +``` + +### Configure PrivateLink + +By default, all connections to the Upbound Console are encrypted, but public. +AWS PrivateLink is a feature that allows VPC peering whereby your traffic +doesn't traverse the public internet. To have this configured, contact your +Upbound Account Representative. + + + + + +## Enable APIs + +Enable the following APIs in the new project: + +- Kubernetes Engine API +- Cloud Resource Manager API +- Compute Engine API +- Cloud DNS API + +:::tip +Read how to enable APIs in a GCP project [here][here]. +::: + +## Create a service account + +Create a service account in the new project. Name the service account, +upbound-sa. Give the service account the following roles: + +- Compute Admin +- Project IAM Admin +- Service Account Admin +- DNS Administrator +- Editor + +Select the service account you just created. Select keys. Add a new key and +select JSON. The key gets downloaded to your machine. Save this for later. + +## Create a DNS Zone + +Create a DNS Zone, set the **Zone type** to `Public`. + +### Configure Private Service Connect + +By default, all connections to the Upbound Console are encrypted, but public. +GCP Private Service Connect is a feature that allows VPC peering whereby your +traffic doesn't traverse the public internet. To have this configured, contact +your Upbound Account Representative. + + + +## Provide information to Upbound + +Once these policies get attached to the key, tell your Upbound Account +Representative, providing them the following: + + + +- the full ARN of the KMS key. +- the name of the organization that you created in Upbound. Use the up CLI command, `up org list`, so see this information. +- Confirmation of which region in AWS you want the deployment to target. + + + + + +- The service account JSON key +- The NS records associated with the DNS name created in the last step. +- the name of the organization that you created in Upbound. Use the up CLI command, `up org list`, so see this information. +- Confirmation of which region in GCP you want the deployment to target. + + + +Once Upbound has this information, the request gets processed in a business day. + +## Use your Managed Space + +Once the Managed Space gets deployed, you can see it in the Space selector when browsing your environment on [`console.upbound.io`][console-upbound-io]. + + + + +[contact]: https://www.upbound.io/contact-us +[aws-privatelink]: #configure-privatelink +[aws-documentation]: https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_create.html#orgs_manage_accounts_create-new +[gcp-private-service-connect]: #configure-private-service-connect +[gcp-documentation]: https://cloud.google.com/resource-manager/docs/creating-managing-organization +[here]: https://cloud.google.com/apis/docs/getting-started#enabling_apis +[console-upbound-io]: https://console.upbound.io/ diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/oidc-configuration.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/oidc-configuration.md new file mode 100644 index 000000000..cbef4dc42 --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/oidc-configuration.md @@ -0,0 +1,289 @@ +--- +title: Configure OIDC +sidebar_position: 20 +description: Configure OIDC in your Space +--- +:::important +This guide is only applicable for administrators who've deployed self-hosted Spaces. general RBAC in Upbound, read [Upbound RBAC][upbound-rbac]. +::: + +Upbound uses the Kubernetes [Structured Authentication Configuration][structured-auth-config] to validate OIDC tokens sent to the API. Upbound stores this +configuration as a `ConfigMap` and authenticates with the Upbound router +component during installation with Helm. + +This guide walks you through how to create and apply an authentication +configuration to validate Upbound with an external identity provider. Each +section focuses on a specific part of the configuration file. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. + +For details on authentication and access control across versions, see the . related platform authentication features, see the [Platform manual](../../../../platform/). +::: + +## Creating the `AuthenticationConfiguration` file + +First, create a file called `config.yaml` with an `AuthenticationConfiguration` +kind. The `AuthenticationConfiguration` is the initial authentication structure +necessary for Upbound to communicate with your chosen identity provider. + +```yaml +apiVersion: apiserver.config.k8s.io/v1beta1 +kind: AuthenticationConfiguration +jwt: +- issuer: + url: oidc-issuer-url + audiences: + - oidc-client-id + claimMappings: # optional + username: + claim: oidc-username-claim + prefix: oidc-username-prefix + groups: + claim: oidc-groups-claim + prefix: oidc-groups-prefix +``` + + +For detailed configuration options, including the CEL-based token validation, +review the feature [documentation][structured-auth-config]. + + +The `AuthenticationConfiguration` allows you to configure multiple JWT +authenticators as separate issuers. + +### Configure an issuer + +The `jwt` array requires an `issuer` specification and typically contains: + +- A `username` claim mapping +- A `groups` claim mapping +Optionally, the configuration may also include: +- A set of claim validation rules +- A set of user validation rules + +The `issuer` URL must be unique across all configured authenticators. + +```yaml +issuer: + url: https://example.com + discoveryUrl: https://discovery.example.com/.well-known/openid-configuration + certificateAuthority: |- + + audiences: + - client-id-a + - client-id-b + audienceMatchPolicy: MatchAny +``` + +By default, the authenticator assumes the OIDC Discovery URL is +`{issuer.url}/.well-known/openid-configuration`. Most identity providers follow +this structure, and you can omit the `discoveryUrl` field. To use a separate +discovery service, specify the full path to the discovery endpoint in this +field. + +If the CA for the Issuer isn't public, provide the PEM encoded CA for the Discovery URL. + +At least one of the `audiences` entries must match the `aud` claim in the JWT. +For OIDC tokens, this is the Client ID of the application attempting to access +the Upbound API. Having multiple values set allows the same configuration to +apply to multiple client applications, for example the `kubectl` CLI and an +Internal Developer Portal. + +If you specify multiple `audiences` , `audienceMatchPolicy` must equal `MatchAny`. + +### Configure `claimMappings` + +#### Username claim mapping + +By default, the authenticator uses the `sub` claim as the user name. To override this, either: + +- specify *both* `claim` and `prefix`. `prefix` may be explicitly set to the empty string. +or + +- specify a CEL `expression` to calculate the user name. + +```yaml +claimMappings: + username: + claim: "sub" + prefix: "keycloak" + # + expression: 'claims.username + ":external-user"' +``` + + +#### Groups claim mapping + +By default, this configuration doesn't map groups, unless you either: + +- specify both `claim` and `prefix`. `prefix` may be explicitly set to the empty string. +or + +- specify a CEL `expression` that returns a string or list of strings. + + +```yaml +claimMappings: + groups: + claim: "groups" + prefix: "" + # + expression: 'claims.roles.split(",")' +``` + + +### Validation rules + + +Validation rules are outside the scope of this document. Review the +[documentation][structured-auth-config] for more information. Examples include +using CEL expressions to validate authentication such as: + + +- Validating that a token claim has a specific value +- Validating that a token has a limited lifetime +- Ensuring usernames and groups don't contain reserved prefixes + +## Required claims + +To interact with Space and ControlPlane APIs, users must have the `upbound.io/aud` claim set to one of the following: + +| Upbound.io Audience | Notes | +| -------------------------------------------------------- | -------------------------------------------------------------------- | +| `[]` | No Access to Space-level or ControlPlane APIs | +| `['upbound:spaces:api']` | This Identity is only for Space-level APIs | +| `['upbound:spaces:controlplanes']` | This Identity is only for ControlPlane APIs | +| `['upbound:spaces:api', 'upbound:spaces:controlplanes']` | This Identity is for both Space-level and ControlPlane APIs | + + +You can set this claim in two ways: + +- In the identity provider mapped in the ID token. +- Inject in the authenticator with the `jwt.claimMappings.extra` array. + +For example: +```yaml +apiVersion: apiserver.config.k8s.io/v1beta1 +kind: AuthenticationConfiguration +jwt: +- issuer: + url: https://keycloak:8443/realms/master + certificateAuthority: |- + + audiences: + - master-realm + audienceMatchPolicy: MatchAny + claimMappings: + username: + claim: "preferred_username" + prefix: "keycloak:" + groups: + claim: "groups" + prefix: "" + extra: + - key: 'upbound.io/aud' + valueExpression: "['upbound:spaces:controlplanes', 'upbound:spaces:api']" +``` + +## Install the `AuthenticationConfiguration` + +Once you create an `AuthenticationConfiguration` file, specify this file as a +`ConfigMap` in the host cluster for the Upbound Space. + +```sh +kubectl create configmap -n upbound-system --from-file=config.yaml=./path/to/config.yaml +``` + + +To enable OIDC authentication and disable Upbound IAM when installing the Space, +reference the configuration and pass an empty value to the Upbound IAM issuer +parameter: + + +```sh +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "authentication.structuredConfig=" \ + --set "router.controlPlane.extraArgs[0]=--upbound-iam-issuer-url=" +``` + +## Configure RBAC + + +In this scenario, the external identity provider handles authentication, but +permissions for Spaces and ControlPlane APIs use standard RBAC objects. + +### Spaces APIs + +The Spaces APIs include: +```yaml +- apiGroups: + - spaces.upbound.io + resources: + - controlplanes + - sharedexternalsecrets + - sharedsecretstores + - backups + - backupschedules + - sharedbackups + - sharedbackupconfigs + - sharedbackupschedules +- apiGroups: + - observability.spaces.upbound.io + resources: + - sharedtelemetryconfigs +``` + +### ControlPlane APIs + + + +Crossplane specifies three [roles][crossplane-managed-clusterroles] for a +ControlPlane: admin, editor, and viewer. These map to the verbs `admin`, `edit`, +and `view` on the `controlplanes/k8s` resource in the `spaces.upbound.io` API +group. + + +### Control access + +The `groups` claim in the `AuthenticationConfiguration` allows you to control +resource access when you create a `ClusterRoleBinding`. A `ClusterRole` defines +the role parameters and a `ClusterRoleBinding` subject. + +The example below allows `admin` permissions for all ControlPlanes to members of +the `ctp-admins` group: + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: allow-ctp-admin +rules: +- apiGroups: + - spaces.upbound.io + resources: + - controlplanes/k8s + verbs: + - admin +``` + +ctp-admins ClusterRoleBinding +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: allow-ctp-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: allow-ctp-admin +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: Group + name: ctp-admins +``` + +[structured-auth-config]: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#using-authentication-configuration +[crossplane-managed-clusterroles]: https://github.com/crossplane/crossplane/blob/master/design/design-doc-rbac-manager.md#managed-rbac-clusterroles +[upbound-rbac]: /manuals/platform/concepts/authorization/upbound-rbac diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/proxies-config.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/proxies-config.md new file mode 100644 index 000000000..3802e4cb0 --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/proxies-config.md @@ -0,0 +1,31 @@ +--- +title: Proxied configuration +sidebar_position: 20 +description: Configure Upbound within a proxied environment +--- + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments in proxied environments. Proxy configuration procedures are consistent across versions. + +For version-specific deployment considerations, see the . +::: + + + +When you install Upbound with Helm in a proxied environment, please update the specified registry with your internal registry. + + + +```bash +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "authentication.hubIdentities=true" \ + --set "authorization.hubRBAC=true" \ + --set "registry=registry.company.corp/spaces" \ + --set "controlPlanes.uxp.registryOverride=registry.company.corp/xpkg.upbound.io" \ + --set "controlPlanes.uxp.repository=registry.company.corp/spaces" \ + --wait +``` diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/query-api.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/query-api.md new file mode 100644 index 000000000..c112e9001 --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/query-api.md @@ -0,0 +1,396 @@ +--- +title: Deploy Query API infrastructure +weight: 130 +description: Query API +aliases: + - /all-spaces/self-hosted-spaces/query-api + - /self-hosted-spaces/query-api + - all-spaces/self-hosted-spaces/query-api +--- + + + + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). Query API has evolved across versions: + +- **Cloud Spaces**: Available since v1.6 (enabled by default) +- **Self-Hosted**: Available since v1.8 (requires manual enablement) + +For details on Query API availability across versions, see the . +::: + +:::important + +This feature is in preview. The Query API is available in the Cloud Space offering in `v1.6` and enabled by default. + +This is a requirement to be able to connect a Space since `v1.8.0`, and is off by default, see below to enable it. + +::: + +Upbound's Query API allows users to inspect objects and resources within their control planes. The read-only `up alpha query` and `up alpha get` CLI commands allow you to gather information on your control planes in a fast and efficient package. These commands follow the [`kubectl` conventions][kubectl-conventions] for filtering, sorting, and retrieving information from your Space. + +Query API requires a PostgreSQL database to store the data. You can use the default PostgreSQL instance provided by Upbound or bring your own PostgreSQL instance. + +## Managed setup + +:::tip +If you don't have specific requirements for your setup, Upbound recommends following this approach. +::: + +To enable this feature, set `features.alpha.apollo.enabled=true` and `apollo.apollo.storage.postgres.create=true` when installing Spaces. + +However, you need to install CloudNativePG (`CNPG`) to provide the PostgreSQL instance. You can let the `up` CLI do this for you, or install it manually. + +For more customization, see the [Helm chart reference][helm-chart-reference]. You can modify the number +of PostgreSQL instances, pooling instances, storage size, and more. + +If you have specific requirements not addressed in the Helm chart, see below for more information on how to bring your own [PostgreSQL setup][postgresql-setup]. + +### Using the up CLI + +Before you begin, make sure you have the most recent version of the [`up` CLI installed][up-cli-installed]. + +To enable this feature, set `features.alpha.apollo.enabled=true` and `apollo.apollo.storage.postgres.create=true` when installing Spaces: + +```bash +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "features.alpha.apollo.enabled=true" \ + --set "apollo.apollo.storage.postgres.create=true" +``` + +`up space init` and `up space upgrade` install CloudNativePG automatically, if needed. + +### Helm chart + +If you are installing the Helm chart in some other way, you can manually install CloudNativePG in one of the [supported ways][supported-ways], for example: + +```shell +kubectl apply --server-side -f \ + https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.24.1/cnpg-1.24.1.yaml +kubectl rollout status -n cnpg-system deployment cnpg-controller-manager -w --timeout 120s +``` + +Next, install the Spaces Helm chart with the necessary values, for example: + +```shell +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + ... + --set "features.alpha.apollo.enabled=true" \ + --set "apollo.apollo.storage.postgres.create=true" \ + --wait +``` + +## Self-hosted PostgreSQL configuration + + +If your workflow requires more customization, you can provide your own +PostgreSQL instance and configure credentials manually. + +Using your own PostgreSQL instance requires careful architecture consideration. +Review the architecture and requirements guidelines. + +### Architecture + +The Query API architecture uses three components, other than a PostgreSQL database: +* **Apollo Syncers**: Watching `ETCD` for changes and syncing them to PostgreSQL. One, or more, per control plane. +* **Apollo Server**: Serving the Query API out of the data in PostgreSQL. One, or more, per Space. + +The default setup also uses the `PgBouncer` connection pooler to manage connections from the syncers. +```mermaid +graph LR + User[User] + + subgraph Cluster["Cluster (Spaces)"] + direction TB + Apollo[apollo] + + subgraph ControlPlanes["Control Planes"] + APIServer[API Server] + Syncer[apollo-syncer] + end + end + + PostgreSQL[(PostgreSQL)] + + User -->|requests| Apollo + + Apollo -->|connects| PostgreSQL + Apollo -->|creates schemas & users| PostgreSQL + + Syncer -->|watches| APIServer + Syncer -->|writes| PostgreSQL + + PostgreSQL -->|data| Apollo + + style PostgreSQL fill:#e1f5ff,stroke:#333,stroke-width:2px,color:#000 + style Apollo fill:#ffe1e1,stroke:#333,stroke-width:2px,color:#000 + style Cluster fill:#f0f0f0,stroke:#333,stroke-width:2px,color:#000 + style ControlPlanes fill:#fff,stroke:#666,stroke-width:1px,stroke-dasharray: 5 5,color:#000 +``` + + +Each component needs to connect to the PostgreSQL database. + +In the event of database issues, you can provide a new database and the syncers +automatically repopulate the data. + +### Requirements + +* A PostgreSQL 16 instance or cluster. +* A database, for example named `upbound`. +* **Optional**: A dedicated user for the Apollo Syncers, otherwise the Spaces Controller generates a dedicated set of credentials per syncer with the necessary permissions, for example named `syncer`. +* A dedicated **superuser or admin account** for the Apollo Server. +* **Optional**: A connection pooler, like PgBouncer, to manage connections from the Apollo Syncers. If you didn't provide the optional users, you might have to configure the pooler to allow users to connect using the same credentials as PostgreSQL. +* **Optional**: A read replica for the Apollo Syncers to connect to, to reduce load on the primary database, this might cause a slight delay in the data being available through the Query API. + +Below you can find examples of setups to get you started, you can mix and match the examples to suit your needs. + +### In-cluster setup + +:::tip + +If you don't have strong opinions on your setup, but still want full control on +the resources created for some unsupported customizations, Upbound recommends +the in-cluster setup. + +::: + +For more customization than the managed setup, you can use CloudNativePG for +PostgreSQL in the same cluster. + +For in-cluster setup, manually deploy the operator in one of the [supported ways][supported-ways-1], for example: + +```shell +kubectl apply --server-side -f \ + https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.24.1/cnpg-1.24.1.yaml +kubectl rollout status -n cnpg-system deployment cnpg-controller-manager -w --timeout 120s +``` + +Then create a `Cluster` and `Pooler` in the `upbound-system` namespace, for example: + +```shell +kubectl create ns upbound-system + +kubectl apply -f - < + +### External setup + + +:::tip + +If you want to run your PostgreSQL instance outside the cluster, but are fine with credentials being managed by the `apollo` user, this is the suggested way to proceed. + +::: + +When using this setup, you must manually create the required Secrets in the +`upbound-system` namespace. The `apollo` user must have permissions to create +schemas and users. + +```shell + +kubectl create ns upbound-system + +# A Secret containing the necessary credentials to connect to the PostgreSQL instance +kubectl create secret generic spaces-apollo-pg-app -n upbound-system \ + --from-literal=password=supersecret + +# A Secret containing the necessary CA certificate to verify the connection to the PostgreSQL instance +kubectl create secret generic spaces-apollo-pg-ca -n upbound-system \ + --from-file=ca.crt=/path/to/ca.crt +``` + +Next, install Spaces with the necessary settings: + +```shell +export PG_URL=your-postgres-host:5432 +export PG_POOLED_URL=your-pgbouncer-host:5432 # this could be the same as above + +helm upgrade --install ... \ + --set "features.alpha.apollo.enabled=true" \ + --set "apollo.apollo.storage.postgres.create=false" \ + --set "apollo.apollo.storage.postgres.connection.url=$PG_URL" \ + --set "apollo.apollo.storage.postgres.connection.credentials.secret.name=spaces-apollo-pg-app" \ + --set "apollo.apollo.storage.postgres.connection.credentials.format=basicauth" \ + --set "apollo.apollo.storage.postgres.connection.ca.name=spaces-apollo-pg-ca" \ + --set "apollo.apollo.storage.postgres.connection.syncer.url=$PG_POOLED_URL" +``` + +### External setup with all custom credentials + +For custom credentials with Apollo Syncers or Server, create a new secret in the +`upbound-system` namespace: + +```shell +export APOLLO_SYNCER_USER=syncer +export APOLLO_SERVER_USER=apollo + +kubectl create ns upbound-system + +# A Secret containing the necessary credentials to connect to the PostgreSQL instance +kubectl create secret generic spaces-apollo-pg-app -n upbound-system \ + --from-literal=password=supersecret + +# A Secret containing the necessary CA certificate to verify the connection to the PostgreSQL instance +kubectl create secret generic spaces-apollo-pg-ca -n upbound-system \ + --from-file=ca.crt=/path/to/ca.crt + +# A Secret containing the necessary credentials for the Apollo Syncers to connect to the PostgreSQL instance. +# These will be used by all Syncers in the Space. +kubectl create secret generic spaces-apollo-pg-syncer -n upbound-system \ + --from-literal=username=$APOLLO_SYNCER_USER \ + --from-literal=password=supersecret + +# A Secret containing the necessary credentials for the Apollo Server to connect to the PostgreSQL instance. +kubectl create secret generic spaces-apollo-pg-apollo -n upbound-system \ + --from-literal=username=$APOLLO_SERVER_USER \ + --from-literal=password=supersecret +``` + +Next, install Spaces with the necessary settings: + +```shell +export PG_URL=your-postgres-host:5432 +export PG_POOLED_URL=your-pgbouncer-host:5432 # this could be the same as above + +helm ... \ + --set "features.alpha.apollo.enabled=true" \ + --set "apollo.apollo.storage.postgres.create=false" \ + --set "apollo.apollo.storage.postgres.connection.url=$PG_URL" \ + --set "apollo.apollo.storage.postgres.connection.credentials.secret.name=spaces-apollo-pg-app" \ + --set "apollo.apollo.storage.postgres.connection.credentials.format=basicauth" \ + --set "apollo.apollo.storage.postgres.connection.ca.name=spaces-apollo-pg-ca" \ + --set "apollo.apollo.storage.postgres.connection.syncer.url=$PG_POOLED_URL" \ + + #. the syncers + --set "apollo.apollo.storage.postgres.connection.syncer.credentials.format=basicauth" \ + --set "apollo.apollo.storage.postgres.connection.syncer.credentials.user=$APOLLO_SYNCER_USER" \ + --set "apollo.apollo.storage.postgres.connection.syncer.credentials.secret.name=spaces-apollo-pg-syncer" \ + + #. the server + --set "apollo.apollo.storage.postgres.connection.apollo.credentials.format=basicauth" \ + --set "apollo.apollo.storage.postgres.connection.apollo.credentials.user=$APOLLO_SERVER_USER" \ + --set "apollo.apollo.storage.postgres.connection.apollo.credentials.secret.name=spaces-apollo-pg-apollo" \ + --set "apollo.apollo.storage.postgres.connection.apollo.url=$PG_POOLED_URL" +``` + + +## Using the Query API + + +See the [Query API documentation][query-api-documentation] for more information on how to use the Query API. + + + + +[postgresql-setup]: #self-hosted-postgresql-configuration +[up-cli-installed]: /manuals/cli/overview +[query-api-documentation]: /spaces/howtos/query-api + +[helm-chart-reference]: /reference/helm-reference +[kubectl-conventions]: https://kubernetes.io/docs/reference/kubectl/generated/kubectl_get/ +[supported-ways]: https://cloudnative-pg.io/documentation/current/installation_upgrade/ +[supported-ways-1]: https://cloudnative-pg.io/documentation/current/installation_upgrade/ +[cloudnativepg-documentation]: https://cloudnative-pg.io/documentation/1.24/storage/#configuration-via-a-pvc-template +[postgresql-cluster]: https://cloudnative-pg.io/documentation/1.24/resource_management/ +[pooler]: https://cloudnative-pg.io/documentation/1.24/connection_pooling/#pod-templates +[postgresql-cluster-2]: https://cloudnative-pg.io/documentation/1.24/replication/ +[pooler-3]: https://cloudnative-pg.io/documentation/1.24/connection_pooling/#high-availability-ha +[postgresql-cluster-4]: https://cloudnative-pg.io/documentation/1.24/operator_capability_levels/#override-of-operand-images-through-the-crd +[pooler-5]: https://cloudnative-pg.io/documentation/1.24/connection_pooling/#pod-templates +[cloudnativepg-documentation-6]: https://cloudnative-pg.io/documentation/1.24/postgresql_conf/ diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/scaling-resources.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/scaling-resources.md new file mode 100644 index 000000000..7bb04d2c2 --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/scaling-resources.md @@ -0,0 +1,184 @@ +--- +title: Scaling vCluster and etcd Resources +weight: 950 +description: A guide for scaling vCluster and etcd resources in self-hosted Spaces +aliases: + - /all-spaces/self-hosted-spaces/scaling-resources + - /spaces/scaling-resources +--- + +In large workloads or control plane migration, you may performance impacting +resource constraints. This guide explains how to scale vCluster and `etcd` +resources for optimal performance in your self-hosted Space. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Scaling procedures are consistent across versions. + +For version-specific resource requirements and capacity planning, see the . +::: + +## Signs of resource constraints + +You may need to scale your vCluster or `etcd` resources if you observe: + +- API server timeout errors such as `http: Handler timeout` +- Error messages about `too many requests` and requests to `try again later` +- Operations like provider installation failing with errors like `cannot apply provider package secret` +- vCluster pods experiencing continuous restarts +- API performance degrades with high resource volume + + +## Scaling vCluster resources + + +The vCluster component handles Kubernetes API requests for your control planes. +Deployments with multiple control planes or providers may exceed default resource allocations. + +```yaml +# Default settings +controlPlanes.vcluster.resources.limits.cpu: "3000m" +controlPlanes.vcluster.resources.limits.memory: "3960Mi" +controlPlanes.vcluster.resources.requests.cpu: "170m" +controlPlanes.vcluster.resources.requests.memory: "1320Mi" +``` + +For larger workloads, like migrating from an existing control plane with several +providers, increase these resource limits in your Spaces `values.yaml` file. + +```yaml +controlPlanes: + vcluster: + resources: + limits: + cpu: "4000m" # Increase to 4 cores + memory: "6Gi" # Increase to 6GB memory + requests: + cpu: "500m" # Increase baseline CPU request + memory: "2Gi" # Increase baseline memory request +``` + +## Scaling `etcd` storage + +Kubernetes relies on `etcd` performance, which can lead to IOPS (input/output +operations per second) bottlenecks. Upbound allocates `50Gi` volumes for `etcd` +in cloud environments to ensure adequate IOPS performance. + +```yaml +# Default setting +controlPlanes.etcd.persistence.size: "5Gi" +``` + +For production environments or when migrating large control planes, increase +`etcd` volume size and specify an appropriate storage class: + +```yaml +controlPlanes: + etcd: + persistence: + size: "50Gi" # Recommended for production + storageClassName: "fast-ssd" # Use a high-performance storage class +``` + +### Storage class considerations + +For AWS: +- Use GP3 volumes with adequate IOPS +-. AWS GP3 volumes, IOPS scale with volume size (3000 IOPS baseline) +-. optimal performance, provision at least 32Gi to support up to 16,000 IOPS + +For GCP and Azure: +- Use SSD-based persistent disk types for optimal performance +- Consider premium storage options for high-throughput workloads + +## Scaling Crossplane resources + +Crossplane manages provider resources in your control planes. You may need to increase provider resources for larger deployments: + +```yaml +# Default settings +controlPlanes.uxp.resourcesCrossplane.requests.cpu: "370m" +controlPlanes.uxp.resourcesCrossplane.requests.memory: "400Mi" +``` + + +For environments with many providers or managed resources: + + +```yaml +controlPlanes: + uxp: + resourcesCrossplane: + limits: + cpu: "1000m" # Add CPU limit + memory: "1Gi" # Add memory limit + requests: + cpu: "500m" # Increase CPU request + memory: "512Mi" # Increase memory request +``` + +## High availability configuration + +For production environments, enable High Availability mode to ensure resilience: + +```yaml +controlPlanes: + ha: + enabled: true +``` + +## Best practices for migration scenarios + +When migrating from existing control planes into a self-hosted Space: + +1. **Pre-scale resources**: Scale up resources before performing the migration +2. **Monitor resource usage**: Watch resource consumption during and after migration with `kubectl top pods` +3. **Scale incrementally**: If issues persist, increase resources incrementally until performance stabilizes +4. **Consider storage performance**: `etcd` is sensitive to storage I/O performance + +## Helm values configuration + +Apply these settings through your Spaces Helm values file: + +```yaml +controlPlanes: + vcluster: + resources: + limits: + cpu: "4000m" + memory: "6Gi" + requests: + cpu: "500m" + memory: "2Gi" + etcd: + persistence: + size: "50Gi" + storageClassName: "gp3" # Use your cloud provider's fast storage class + uxp: + resourcesCrossplane: + limits: + cpu: "1000m" + memory: "1Gi" + requests: + cpu: "500m" + memory: "512Mi" + ha: + enabled: true #. production environments +``` + +Apply the configuration using Helm: + +```bash +helm upgrade --install spaces oci://xpkg.upbound.io/spaces-artifacts/spaces \ + -f values.yaml \ + -n upbound-system +``` + +## Considerations + +- **Provider count**: Each provider adds resource overhead - consider using provider families to optimize resource usage +- **Managed resources**: The number of managed resources impacts CPU usage more than memory +- **Vertical pod autoscaling**: Consider using vertical pod autoscaling in Kubernetes to automatically adjust resources based on usage +- **Storage performance**: Storage performance is as important as capacity for etcd +- **Network latency**: Low-latency connections between components improve performance + + diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/self-hosted-spaces-deployment.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/self-hosted-spaces-deployment.md new file mode 100644 index 000000000..e549e3939 --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/self-hosted-spaces-deployment.md @@ -0,0 +1,461 @@ +--- +title: Deployment Workflow +sidebar_position: 3 +description: A quickstart guide for Upbound Spaces +tier: "business" +--- +import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; + + + + + +This guide deploys a self-hosted Upbound cluster in AWS. + + + + + +This guide deploys a self-hosted Upbound cluster in Azure. + + + + + +This guide deploys a self-hosted Upbound cluster in GCP. + + + +Disconnected Spaces allows you to host control planes in your preferred environment. + +## Prerequisites + +To get started deploying your own Disconnected Space, you need: + +- An Upbound organization account string, provided by your Upbound account representative +- A `token.json` license, provided by your Upbound account representative + + + +- An AWS account and the AWS CLI + + + + + +- An Azure account and the Azure CLI + + + + + +- An GCP account and the GCP CLI + + + +:::important +Disconnected Spaces are a business critical feature of Upbound and requires a license token to successfully complete the installation. [Contact Upbound][contact-upbound] if you want to try out Upbound with Disconnected Spaces. +::: + +## Provision the hosting environment + +### Create a cluster + + + +Configure the name and target region you want the EKS cluster deployed to. + +```ini +export SPACES_CLUSTER_NAME=upbound-space-quickstart +export SPACES_REGION=us-east-1 +``` + +Provision a 3-node cluster using eksctl. + +```bash +cat < + + + +Configure the name and target region you want the AKS cluster deployed to. + +```ini +export SPACES_RESOURCE_GROUP_NAME=upbound-space-quickstart +export SPACES_CLUSTER_NAME=upbound-space-quickstart +export SPACES_LOCATION=westus +``` + +Provision a new Azure resource group. + +```bash +az group create --name ${SPACES_RESOURCE_GROUP_NAME} --location ${SPACES_LOCATION} +``` + +Provision a 3-node cluster. + +```bash +az aks create -g ${SPACES_RESOURCE_GROUP_NAME} -n ${SPACES_CLUSTER_NAME} \ + --enable-managed-identity \ + --node-count 3 \ + --node-vm-size Standard_D4s_v4 \ + --enable-addons monitoring \ + --enable-msi-auth-for-monitoring \ + --generate-ssh-keys \ + --network-plugin kubenet \ + --network-policy calico +``` + +Get the kubeconfig of your AKS cluster. + +```bash +az aks get-credentials --resource-group ${SPACES_RESOURCE_GROUP_NAME} --name ${SPACES_CLUSTER_NAME} +``` + + + + + +Configure the name and target region you want the GKE cluster deployed to. + +```ini +export SPACES_PROJECT_NAME=upbound-spaces-project +export SPACES_CLUSTER_NAME=upbound-spaces-quickstart +export SPACES_LOCATION=us-west1-a +``` + +Create a new project and set it as the current project. + +```bash +gcloud projects create ${SPACES_PROJECT_NAME} +gcloud config set project ${SPACES_PROJECT_NAME} +``` + +Provision a 3-node cluster. + +```bash +gcloud container clusters create ${SPACES_CLUSTER_NAME} \ + --enable-network-policy \ + --num-nodes=3 \ + --zone=${SPACES_LOCATION} \ + --machine-type=e2-standard-4 +``` + +Get the kubeconfig of your GKE cluster. + +```bash +gcloud container clusters get-credentials ${SPACES_CLUSTER_NAME} --zone=${SPACES_LOCATION} +``` + + + +## Configure the pre-install + +### Set your Upbound organization account details + +Set your Upbound organization account string as an environment variable for use in future steps + +```ini +export UPBOUND_ACCOUNT= +``` + +### Set up pre-install configurations + +Export the path of the license token JSON file provided by your Upbound account representative. + +```ini {copy-lines="2"} +# Change the path to where you saved the token. +export SPACES_TOKEN_PATH="/path/to/token.json" +``` + +Set the version of Spaces software you want to install. + +```ini +export SPACES_VERSION= +``` + +Set the router host and cluster type. The `SPACES_ROUTER_HOST` is the domain name that's used to access the control plane instances. It's used by the ingress controller to route requests. + +```ini +export SPACES_ROUTER_HOST="proxy.upbound-127.0.0.1.nip.io" +``` + +:::important +Make sure to replace the placeholder text in `SPACES_ROUTER_HOST` and provide a real domain that you own. +::: + + +## Install the Spaces software + + +### Install cert-manager + +Install cert-manager. + +```bash +kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.11.0/cert-manager.yaml +kubectl wait deployment -n cert-manager cert-manager-webhook --for condition=Available=True --timeout=360s +``` + + + +### Install ALB Load Balancer + +```bash +helm install aws-load-balancer-controller aws-load-balancer-controller --namespace kube-system \ + --repo https://aws.github.io/eks-charts \ + --set clusterName=${SPACES_CLUSTER_NAME} \ + --set serviceAccount.create=false \ + --set serviceAccount.name=aws-load-balancer-controller \ + --wait +``` + + + +### Install ingress-nginx + +Starting with Spaces v1.10.0, you need to configure the ingress-nginx +controller to allow SSL-passthrough mode. You can do so by passing the +`--enable-ssl-passthrough=true` command-line option to the controller. +The following Helm install command enables this with the `controller.extraArgs` +parameter: + + + +```bash +helm upgrade --install ingress-nginx ingress-nginx \ + --create-namespace --namespace ingress-nginx \ + --repo https://kubernetes.github.io/ingress-nginx \ + --version 4.12.1 \ + --set 'controller.service.type=LoadBalancer' \ + --set 'controller.extraArgs.enable-ssl-passthrough=true' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-type=external' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-scheme=internet-facing' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-nlb-target-type=ip' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-healthcheck-protocol=http' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-healthcheck-path=/healthz' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-healthcheck-port=10254' \ + --wait +``` + + + + + +```bash +helm upgrade --install ingress-nginx ingress-nginx \ + --create-namespace --namespace ingress-nginx \ + --repo https://kubernetes.github.io/ingress-nginx \ + --version 4.12.1 \ + --set 'controller.service.type=LoadBalancer' \ + --set 'controller.extraArgs.enable-ssl-passthrough=true' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/azure-load-balancer-health-probe-request-path=/healthz' \ + --wait +``` + + + + + +```bash +helm upgrade --install ingress-nginx ingress-nginx \ + --create-namespace --namespace ingress-nginx \ + --repo https://kubernetes.github.io/ingress-nginx \ + --version 4.12.1 \ + --set 'controller.service.type=LoadBalancer' \ + --set 'controller.extraArgs.enable-ssl-passthrough=true' \ + --wait +``` + + + +### Install Upbound Spaces software + +Create an image pull secret so that the cluster can pull Upbound Spaces images. + +```bash +kubectl create ns upbound-system +kubectl -n upbound-system create secret docker-registry upbound-pull-secret \ + --docker-server=https://xpkg.upbound.io \ + --docker-username="$(jq -r .accessId $SPACES_TOKEN_PATH)" \ + --docker-password="$(jq -r .token $SPACES_TOKEN_PATH)" +``` + +Log in with Helm to be able to pull chart images for the installation commands. + +```bash +jq -r .token $SPACES_TOKEN_PATH | helm registry login xpkg.upbound.io -u $(jq -r .accessId $SPACES_TOKEN_PATH) --password-stdin +``` + +Install the Spaces software. + +```bash +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "authentication.hubIdentities=true" \ + --set "authorization.hubRBAC=true" \ + --wait +``` + +### Create a DNS record + +:::important +If you chose to create a public ingress, you also need to create a DNS record for the load balancer of the public facing ingress. Do this before you create your first control plane. +::: + +Create a DNS record for the load balancer of the public facing ingress. To get the address for the Ingress, run the following: + + + +```bash +kubectl get ingress \ + -n upbound-system mxe-router-ingress \ + -o jsonpath='{.status.loadBalancer.ingress[0].hostname}' +``` + + + + + +```bash +kubectl get ingress \ + -n upbound-system mxe-router-ingress \ + -o jsonpath='{.status.loadBalancer.ingress[0].ip}' +``` + + + + + +```bash +kubectl get ingress \ + -n upbound-system mxe-router-ingress \ + -o jsonpath='{.status.loadBalancer.ingress[0].ip}' +``` + + + +If the preceding command doesn't return a load balancer address then your provider may not have allocated it yet. Once it's available, add a DNS record for the `ROUTER_HOST` to point to the given load balancer address. If it's an IPv4 address, add an A record. If it's a domain name, add a CNAME record. + +## Configure the up CLI + +With your kubeconfig pointed at the Kubernetes cluster where you installed +Upbound Spaces, create a new profile in the `up` CLI. This profile interacts +with your Space: + +```bash +up profile create --use ${SPACES_CLUSTER_NAME} --type=disconnected --organization ${UPBOUND_ACCOUNT} +``` + +Optionally, log in to your Upbound account using the new profile so you can use the Upbound Marketplace with this profile as well: + +```bash +up login +``` + + +## Connect to your Space + + +Use `up ctx` to create a kubeconfig context pointed at your new Space: + +```bash +up ctx disconnected/$(kubectl config current-context) +``` + +## Create your first control plane + +You can now create a control plane with the `up` CLI: + +```bash +up ctp create ctp1 +``` + +You can also create a control plane with kubectl: + +```yaml +cat < +```yaml +observability: + spacesCollector: + env: + - name: API_KEY + valueFrom: + secretKeyRef: + name: my-secret + key: api-key + config: + exporters: + otlphttp: + endpoint: "" + headers: + api-key: ${env:API_KEY} + exportPipeline: + logs: + - otlphttp + metrics: + - otlphttp + traces: + - otlphttp +``` + + +You can export metrics, logs, and traces from your Crossplane installation, Spaces +infrastructure (controller, API, router, etc.), provider-helm, and +provider-kubernetes. + +### Router metrics + +The Spaces router component uses Envoy as a reverse proxy and exposes detailed +metrics about request handling, circuit breakers, and connection pooling. +Upbound collects these metrics in your Space after you enable Space-level +observability. + +Envoy metrics in Upbound include: + +- **Upstream cluster metrics** - Request status codes, timeouts, retries, and latency for traffic to control planes and services +- **Circuit breaker metrics** - Connection and request circuit breaker state for both `DEFAULT` and `HIGH` priority levels +- **Downstream listener metrics** - Client connections and requests received +- **HTTP connection manager metrics** - End-to-end HTTP request processing and latency + +For a complete list of available router metrics and example PromQL queries, see the [Router metrics reference][router-ref]. + +### Router tracing + +The Spaces router generates distributed traces through OpenTelemetry integration, +providing end-to-end visibility into request flow across the system. Use these +traces to debug latency issues, understand request paths, and correlate errors +across services. + +The router uses: + +- **Protocol**: OTLP (OpenTelemetry Protocol) over gRPC +- **Service name**: `spaces-router` +- **Transport**: TLS-encrypted connection to telemetry collector + +#### Trace configuration + +Enable tracing and configure the sampling rate with the following Helm values: + +```yaml +observability: + enabled: true + tracing: + enabled: true + sampling: + rate: 0.1 # Sample 10% of new traces (0.0-1.0) +``` + +The sampling behavior depends on whether a parent trace context exists: + +- **With parent context**: If a `traceparent` header is present, the parent's + sampling decision is respected, enabling proper distributed tracing across services. +- **Root spans**:. new traces without a parent, Envoy samples based on + `x-request-id` hashing. The default sampling rate is 10%. + +#### TLS configuration for external collectors + +To send traces to an external OTLP collector, configure the endpoint and TLS settings: + +```yaml +observability: + enabled: true + tracing: + enabled: true + endpoint: "otlp-gateway.example.com" + port: 443 + tls: + caBundleSecretRef: "custom-ca-secret" +``` + +If `caBundleSecretRef` is set, the router uses the CA bundle from the referenced +Kubernetes secret. The secret must contain a key named `ca.crt` with the +PEM-encoded CA bundle. If not set, the router uses the Spaces CA for the +in-cluster collector. + +#### Custom trace tags + +The router adds custom tags to every span to enable filtering and grouping by +control plane: + +| Tag | Source | Description | +|-----|--------|-------------| +| `controlplane.id` | `x-upbound-mxp-id` header | Control plane UUID | +| `controlplane.name` | `x-upbound-mxp-host` header | Internal vcluster hostname | +| `hostcluster.id` | `x-upbound-hostcluster-id` header | Host cluster identifier | + +These tags enable queries like "show all slow requests to control plane X" or +"find errors for control planes in host cluster Y." + +#### Example trace + +The following example shows the attributes from a successful GET request: + +```text +Span: ingress +├─ Service: spaces-router +├─ Duration: 8.025ms +├─ Attributes: +│ ├─ http.method: GET +│ ├─ http.status_code: 200 +│ ├─ upstream_cluster: ctp-b2b37aaa-ee55-492c-ba0c-4d561a6325fa-api-cluster +│ ├─ controlplane.id: b2b37aaa-ee55-492c-ba0c-4d561a6325fa +│ ├─ controlplane.name: vcluster.mxp-b2b37aaa-ee55-492c-ba0c-4d561a6325fa-system +│ └─ response_size: 1827 +``` + +## Available metrics + +Space-level observability collects metrics from multiple infrastructure components: + +### Infrastructure component metrics + +- Crossplane controller metrics +- Spaces controller, API, and router metrics +- Provider metrics (provider-helm, provider-kubernetes) + +### Router metrics + +The router component exposes Envoy proxy metrics for monitoring traffic flow and +service health. Key metric categories include: + +- `envoy_cluster_upstream_rq_*` - Upstream request metrics (status codes, timeouts, retries, latency) +- `envoy_cluster_circuit_breakers_*` - Circuit breaker state and capacity +- `envoy_listener_downstream_*` - Client connection and request metrics +- `envoy_http_downstream_*` - HTTP request processing metrics + +Example query to monitor total request rate: + +```promql +sum(rate(envoy_cluster_upstream_rq_total{job="spaces-router-envoy"}[5m])) +``` + +Example query for P95 latency: + +```promql +histogram_quantile( + 0.95, + sum by (le) ( + rate(envoy_cluster_upstream_rq_time_bucket{job="spaces-router-envoy"}[5m]) + ) +) +``` + +For detailed router metrics documentation and more query examples, see the [Router metrics reference][router-ref]. + + +## OpenTelemetryCollector image + + +Control plane (`SharedTelemetry`) and Space observability deploy the same custom +OpenTelemetry Collector image. The OpenTelemetry Collector image supports +`otlhttp`, `datadog`, and `debug` exporters. + +For more information on observability configuration, review the [Helm chart reference][helm-chart-reference]. + +## Observability in control planes + +Read the [observability documentation][observability-documentation] to learn +about the features Upbound offers for collecting telemetry from control planes. + + +## Router metrics reference {#router-ref} + +To avoid overwhelming observability tools with hundreds of Envoy metrics, an +allow-list filters metrics to only the following metric families. + +### Upstream cluster metrics + +Metrics tracking requests sent from Envoy to configured upstream clusters. +Individual control planes, spaces-api, and other services are each considered +an upstream cluster. Use these metrics to monitor service health, identify +upstream errors, and measure backend latency. + +| Metric | Description | +|--------|-------------| +| `envoy_cluster_upstream_rq_xx_total` | HTTP status codes (2xx, 3xx, 4xx, 5xx) with label `envoy_response_code_class` | +| `envoy_cluster_upstream_rq_timeout_total` | Requests that timed out waiting for upstream | +| `envoy_cluster_upstream_rq_retry_limit_exceeded_total` | Requests that exhausted retry attempts | +| `envoy_cluster_upstream_rq_total` | Total upstream requests | +| `envoy_cluster_upstream_rq_time_bucket` | Latency histogram (for P50/P95/P99 calculations) | +| `envoy_cluster_upstream_rq_time_sum` | Sum of request durations | +| `envoy_cluster_upstream_rq_time_count` | Count of requests | + +### Circuit breaker metrics + + + +Metrics tracking circuit breaker state and remaining capacity. Circuit breakers +prevent cascading failures by limiting connections and concurrent requests to +unhealthy upstreams. Two priority levels exist: `DEFAULT` for watch requests and +`HIGH` for API requests. + + +| Name | Description | +|--------|-------------| +| `envoy_cluster_circuit_breakers_default_cx_open` | `DEFAULT` priority connection circuit breaker open (gauge) | +| `envoy_cluster_circuit_breakers_default_rq_open` | `DEFAULT` priority request circuit breaker open (gauge) | +| `envoy_cluster_circuit_breakers_default_remaining_cx` | Available `DEFAULT` priority connections (gauge) | +| `envoy_cluster_circuit_breakers_default_remaining_rq` | Available `DEFAULT` priority request slots (gauge) | +| `envoy_cluster_circuit_breakers_high_cx_open` | `HIGH` priority connection circuit breaker open (gauge) | +| `envoy_cluster_circuit_breakers_high_rq_open` | `HIGH` priority request circuit breaker open (gauge) | +| `envoy_cluster_circuit_breakers_high_remaining_cx` | Available `HIGH` priority connections (gauge) | +| `envoy_cluster_circuit_breakers_high_remaining_rq` | Available `HIGH` priority request slots (gauge) | + +### Downstream listener metrics + +Metrics tracking requests received from clients such as kubectl and API consumers. +Use these metrics to monitor client connection patterns, overall request volume, +and responses sent to external users. + +| Name | Description | +|--------|-------------| +| `envoy_listener_downstream_rq_xx_total` | HTTP status codes for responses sent to clients | +| `envoy_listener_downstream_rq_total` | Total requests received from clients | +| `envoy_listener_downstream_cx_total` | Total connections from clients | +| `envoy_listener_downstream_cx_active` | Currently active client connections (gauge) | + + + +### HTTP connection manager metrics + + +Metrics from Envoy's HTTP connection manager tracking end-to-end request +processing. These metrics provide a comprehensive view of the HTTP request +lifecycle including status codes and client-perceived latency. + +| Name | Description | +|--------|-------------| +| `envoy_http_downstream_rq_xx` | HTTP status codes (note: no `_total` suffix for this metric family) | +| `envoy_http_downstream_rq_total` | Total HTTP requests received | +| `envoy_http_downstream_rq_time_bucket` | Downstream request latency histogram | +| `envoy_http_downstream_rq_time_sum` | Sum of downstream request durations | +| `envoy_http_downstream_rq_time_count` | Count of downstream requests | + +[router-ref]: #router-ref +[observability-documentation]: /spaces/howtos/observability +[opentelemetry-collector]: https://opentelemetry.io/docs/collector/ +[opentelemetry-operator]: https://opentelemetry.io/docs/kubernetes/operator/ +[helm-chart-reference]: /reference/helm-reference diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/spaces-management.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/spaces-management.md new file mode 100644 index 000000000..3df61c306 --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/spaces-management.md @@ -0,0 +1,219 @@ +--- +title: Interacting with Disconnected Spaces +sidebar_position: 10 +description: Common operations in Spaces +--- + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Spaces management procedures are consistent across versions. + +For version compatibility details, see the . +::: + +## Spaces management + +### Create a Space + +To install an Upbound Space into a cluster, it's recommended you dedicate an entire Kubernetes cluster for the Space. You can use [up space init][up-space-init] to install an Upbound Space. Below is an example: + +```bash +up space init "v1.9.0" +``` +:::tip +For a full guide to get started with Spaces, read the [quickstart][quickstart] guide: +::: + +You can also install the helm chart for Spaces directly. In order for a Spaces install to succeed, you must install some prerequisites first and configure them. This includes: + +- UXP +- provider-helm and provider-kubernetes +- cert-manager + +Furthermore, the Spaces chart requires a pull secret, which Upbound must provide to you. + +```bash +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "v1.9.0" \ + --set "ingress.host=your-host.com" \ + --set "clusterType=eks" \ + --set "account=your-upbound-account" \ + --wait +``` +For a complete tutorial of the helm install, read one of the deployment guides for [AWS][aws], [Azure][azure] , or [GCP][gcp] which cover the step-by-step process. + +### Upgrade a Space + +To upgrade a Space from one version to the next, use [up space upgrade][up-space-upgrade]. Spaces supports upgrading from version `ver x.N.*` to version `ver x.N+1.*`. + +```bash +up space upgrade "v1.9.0" +``` + +You can also upgrade a Space by manually bumping the Helm chart version. Before +upgrading, review the release notes for any breaking changes or +special requirements: + +1. Review the release notes for the target version in the [Spaces Release Notes][spaces-release-notes] +2. Upgrade the Space by updating the helm chart version: + +```bash +helm -n upbound-system upgrade spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "v1.9.0" \ + --reuse-values \ + --wait +``` + +For major version upgrades or configuration changes, extract your current values +and adjust: + +```bash +# Extract current values to a file +helm -n upbound-system get values spaces > spaces-values.yaml + +# Upgrade with modified values +helm -n upbound-system upgrade spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "v1.9.0" \ + -f spaces-values.yaml \ + --wait +``` + +### Downgrade a Space + +To rollback a Space from one version to the previous, use [up space upgrade][up-space-upgrade-1]. Spaces supports downgrading from version `ver x.N.*` to version `ver x.N-1.*`. + +```bash +up space upgrade --rollback +``` + +You can also downgrade a Space manually using Helm by specifying an earlier version: + +```bash +helm -n upbound-system upgrade spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "v1.8.0" \ + --reuse-values \ + --wait +``` + +When downgrading, make sure to: +1. Check the [release notes][release-notes] for specific downgrade instructions +2. Verify compatibility between the downgraded Space and any control planes +3. Back up any critical data before proceeding + +### Uninstall a Space + +To uninstall a Space from a Kubernetes cluster, use [up space destroy][up-space-destroy]. A destroy operation uninstalls core components and orphans control planes and their associated resources. + +```bash +up space destroy +``` + +## Control plane management + +You can manage control planes in a Space via the [up CLI][up-cli] or the Spaces-local Kubernetes API. When you install a Space, it defines new a API type, `kind: Controlplane`, that you can use to create and manage control planes in the Space. + +### Create a control plane + +To create a control plane in a Space using `up`, run the following: + +```bash +up ctp create ctp1 +``` + +You can also declare a new control plane like the example below and apply it to your Spaces cluster: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: ctp1 + namespace: default +spec: + writeConnectionSecretToRef: + name: kubeconfig-ctp1 + namespace: default +``` + +This manifest: + +- Creates a new control plane in the space called `ctp1`. +- Publishes the kubeconfig to connect to the control plane to a secret in the Spaces cluster, called `kubeconfig-ctp1` + +### Connect to a control plane + +To connect to a control plane in a Space using `up`, run the following: + +```bash +up ctp connect new-control-plane +``` + +The command changes your kubeconfig's current context to the control plane you specify. If you want to change your kubeconfig back to a previous context, run: + +```bash +up ctp disconnect +``` + +If you configured your control plane to publish connection details, you can also access it this way. Once the control plane is ready, use the secret (containing connection details) to connect to the API server of your control plane. + +```bash +kubectl get secret -n default -o jsonpath='{.data.kubeconfig}' | base64 -d > /tmp/.yaml +``` + +Reference the kubeconfig whenever you want to interact directly with the API server of the control plane (vs the Space's API server): + +```bash +kubectl get providers --kubeconfig=/tmp/.yaml +``` + +### Configure a control plane + +Spaces offers a built-in feature that allows you to connect a control plane to a Git source. This experience is like when a control plane runs in [Upbound's SaaS environment][upbound-s-saas-environment]. Upbound recommends using the built-in Git integration to drive configuration of your control planes in a Space. + +Learn more in the [Spaces Git integration][spaces-git-integration] documentation. + +### List control planes + +To list all control planes in a Space using `up`, run the following: + +```bash +up ctp list +``` + +Or you can use Kubernetes-style semantics to list the control plane: + +```bash +kubectl get controlplanes +``` + + +### Delete a control plane + +To delete a control plane in a Space using `up`, run the following: + +```bash +up ctp delete ctp1 +``` + +Or you can use Kubernetes-style semantics to delete the control plane: + +```bash +kubectl delete controlplane ctp1 +``` + + +[up-space-init]: /reference/cli-reference +[quickstart]: / +[aws]: /spaces/howtos/self-hosted/self-hosted-spaces-deployment +[azure]:/spaces/howtos/self-hosted/self-hosted-spaces-deployment +[gcp]:/spaces/howtos/self-hosted/self-hosted-spaces-deployment +[up-space-upgrade]: /reference/cli-reference +[spaces-release-notes]: /reference/release-notes/spaces +[up-space-upgrade-1]: /reference/cli-reference +[release-notes]: /reference/release-notes/spaces +[up-space-destroy]: /reference/cli-reference +[up-cli]: /reference/cli-reference +[upbound-s-saas-environment]: /spaces/howtos/self-hosted/spaces-management +[spaces-git-integration]: /spaces/howtos/self-hosted/gitops diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/troubleshooting.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/troubleshooting.md new file mode 100644 index 000000000..8d1ca6517 --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/troubleshooting.md @@ -0,0 +1,132 @@ +--- +title: Troubleshooting +sidebar_position: 100 +description: A guide for troubleshooting an issue that occurs in a Space +--- + +Find guidance below on how to find solutions for issues you encounter when deploying and using an Upbound Space. Use the tips below as a supplement to the observability metrics discussed in the [Observability][observability] page. + +## General tips + +Most issues fall into two general categories: + +1. issues with the Spaces management plane +2. issues on a control plane + +If your control plane doesn't reach a `Ready` state, it's indicative of the former. If your control plane is in a created and running state, but resources aren't reconciling, it's indicative of the latter. + +### Spaces component layout + +Run `kubectl get pods -A` against the cluster hosting a Space. You should see a variety of pods across several namespaces. It should look something like this: + +```bash +NAMESPACE NAME READY STATUS RESTARTS AGE +cert-manager cert-manager-6d6769565c-mc5df 1/1 Running 0 25m +cert-manager cert-manager-cainjector-744bb89575-nw4fg 1/1 Running 0 25m +cert-manager cert-manager-webhook-759d6dcbf7-ps4mq 1/1 Running 0 25m +ingress-nginx ingress-nginx-controller-7f8ccfccc6-6szlp 1/1 Running 0 25m +kube-system coredns-5d78c9869d-4p477 1/1 Running 0 26m +kube-system coredns-5d78c9869d-pdxt6 1/1 Running 0 26m +kube-system etcd-kind-control-plane 1/1 Running 0 26m +kube-system kindnet-8s7pq 1/1 Running 0 26m +kube-system kube-apiserver-kind-control-plane 1/1 Running 0 26m +kube-system kube-controller-manager-kind-control-plane 1/1 Running 0 26m +kube-system kube-proxy-l68r8 1/1 Running 0 26m +kube-system kube-scheduler-kind-control-plane 1/1 Running 0 26m +local-path-storage local-path-provisioner-6bc4bddd6b-qsdjt 1/1 Running 0 26m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system coredns-5dc69d6447-f56rh-x-kube-system-x-vcluster 1/1 Running 0 21m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system crossplane-6b6d67bc66-6b8nx-x-upbound-system-x-vcluster 1/1 Running 0 20m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system crossplane-rbac-manager-78f6fc7cb4-pjkhc-x-upbound-s-12253c3c4e 1/1 Running 0 20m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system kube-state-metrics-7f8f4dcc5b-8p8c4 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system mxp-gateway-68f546b9c8-xnz5j-x-upbound-system-x-vcluster 1/1 Running 0 20m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system mxp-ksm-config-54655667bb-hv9br 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system mxp-readyz-5f7f97d967-b98bw 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system otlp-collector-56d7d46c8d-g5sh5-x-upbound-system-x-vcluster 1/1 Running 0 20m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-67c9fb8959-ppb2m 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-api-6bfbccc49d-ffgpj 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-controller-7cc6855656-8c46b 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-etcd-0 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vector-754b494b84-wljw4 1/1 Running 0 22m +mxp-system mxp-charts-chartmuseum-7587f77558-8tltb 1/1 Running 0 23m +upbound-system crossplane-b4dc7b4c9-6hjh5 1/1 Running 0 25m +upbound-system crossplane-contrib-provider-helm-ce18dd03e6e4-7945d8985-4gcwr 1/1 Running 0 24m +upbound-system crossplane-contrib-provider-kubernetes-1f1e32c1957d-577756gs2x4 1/1 Running 0 24m +upbound-system crossplane-rbac-manager-d8cb49cbc-gbvvf 1/1 Running 0 25m +upbound-system spaces-controller-6647677cf9-5zl5q 1/1 Running 0 24m +upbound-system spaces-router-bc78c96d7-kzts2 2/2 Running 0 24m +``` + +What you are seeing is: + +- Pods in the `upbound-system` namespace are components required to run the management plane of the Space. This includes the `spaces-controller`, `spaces-router`, and install of UXP. +- Pods in the `mxp-{GUID}-system` namespace are components that collectively power a control plane. Notable call outs include pod names that look like `vcluster-api-{GUID}` and `vcluster-controller-{GUID}`, which are integral components of a control plane. +- Pods in other notable namespaces, including `cert-manager` and `ingress-nginx`, are prerequisite components that support a Space's successful operation. + + + +### Troubleshooting tips for the Spaces management plane + +Start by getting the status of all the pods in a Space: + +1. Make sure the current context of your kubeconfig points at the Kubernetes cluster hosting your Space +2. Get the status of all the pods in the Space: +```bash +kubectl get pods -A +``` +3. Scan the `Status` column to see if any of the pods report a status besides `Running`. +4. Scan the `Restarts` column to see if any of the pods have restarted. +5. If you notice a Status other than `Running` or see pods that restarted, you should investigate their events by running +```bash +kubectl describe pod -n +``` + +Next, inspect the status of objects and releases: + +1. Make sure the current context of your kubeconfig points at the Kubernetes cluster hosting your Space +2. Inspect the objects in your Space. If any are unhealthy, describe those objects to get the events: +```bash +kubectl get objects +``` +3. Inspect the releases in your Space. If any are unhealthy, describe those releases to get the events: +```bash +kubectl get releases +``` + +### Troubleshooting tips for control planes in a Space + +General troubleshooting in a control plane starts by fetching the events of the control plane: + +1. Make sure the current context of your kubeconfig points at the Kubernetes cluster hosting your Space +2. Run the following to fetch your control planes. +```bash +kubectl get ctp +``` +3. Describe the control plane by providing its name, found in the preceding instruction. +```bash +kubectl describe controlplanes.spaces.upbound.io +``` + +## Issues + + +### Your control plane is stuck in a 'creating' state + +#### Error: unknown field "ports" in io.k8s.api.networking.v1.NetworkPolicySpec + +This error is emitted by a Helm release named `control-plane-host-policies` attempting to be installed by the Spaces software. The full error is: + +_CannotCreateExternalResource failed to install release: unable to build kubernetes objects from release manifest: error validating "": error validating data: ValidationError(NetworkPolicy.spec): unknown field "ports" in io.k8s.api.networking.v1.NetworkPolicySpec_ + +This error may be caused by running a Space on an earlier version of Kubernetes than is supported (`v1.26 or later`). To resolve this issue, upgrade the host Kubernetes cluster version to 1.25 or later. + +### Your Spaces install fails + +#### Error: You tried to install a Space on a previous Crossplane installation + +If you try to install a Space on an existing cluster that previously had Crossplane or UXP on it, you may encounter errors. Due to how the Spaces installer tests for the presence of UXP, it may detect orphaned CRDs that weren't cleaned up by the previous uninstall of Crossplane. You may need to manually [remove old Crossplane CRDs][remove-old-crossplane-crds] for the installer to properly detect the UXP prerequisite. + + + + +[observability]: /spaces/howtos/observability +[remove-old-crossplane-crds]: https://docs.crossplane.io/latest/guides/uninstall-crossplane/ diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/use-argo.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/use-argo.md new file mode 100644 index 000000000..0862feb13 --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/use-argo.md @@ -0,0 +1,228 @@ +--- +title: Use ArgoCD Plugin +sidebar_position: 15 +description: A guide for integrating Argo with control planes in a Space. +aliases: + - /all-spaces/self-hosted-spaces/use-argo + - /deploy/disconnected-spaces/use-argo-flux + - /all-spaces/self-hosted-spaces/use-argo-flux + - /connect/use-argo +--- + + +:::info API Version Information +This guide covers the ArgoCD Plugin, a preview feature available in all supported versions (v1.9-v1.14+) for self-hosted deployments. + +For general GitOps guidance, see [Automation and GitOps Overview](/spaces/howtos/automation-and-gitops/overview/). +::: + +:::important +This feature is in preview and is off by default. To enable, set `features.alpha.argocdPlugin.enabled=true` when installing Spaces: + +```bash +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "features.alpha.argocdPlugin.enabled=true" +``` +::: + +Spaces provides an optional plugin to assist with integrating a control plane in a Space with Argo CD. You must enable the plugin for the entire Space at Spaces install or upgrade time. The plugin's job is to propagate the connection details of each control plane in a Space to Argo CD. By default, Upbound stores these connection details in a Kubernetes secret named after the control plane. To run Argo CD across multiple namespaces, Upbound recommends enabling the `features.alpha.argocdPlugin.useUIDFormatForCTPSecrets` flag to use a UID-based format for secret names to avoid conflicts. + +:::tip +For general guidance on integrating Upbound with GitOps flows, see [GitOps with Control Planes][gitops-with-control-planes]. +::: + +## On cluster Argo CD + +If you are running Argo CD on the same cluster as the Space, run the following to enable the plugin: + + + + + + +```bash {hl_lines="3-4"} +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "features.alpha.argocdPlugin.enabled=true" \ + --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ + --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" +``` + + + + + +```bash {hl_lines="7-8"} +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "features.alpha.argocdPlugin.enabled=true" \ + --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ + --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ + --wait +``` + + + + + + +The important flags are: + +- `features.alpha.argocdPlugin.enabled=true` +- `features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true` +- `features.alpha.argocdPlugin.target.secretNamespace=argocd` + +The first flag enables the feature and the second indicates the namespace on the cluster where you installed Argo CD. + +Be sure to [configure Argo][configure-argo] after it's installed. + +## External cluster Argo CD + +If you are running Argo CD on an external cluster from where you installed your Space, you need to provide some extra flags: + + + + + + +```bash {hl_lines="3-7"} +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "features.alpha.argocdPlugin.enabled=true" \ + --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ + --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ + --set "features.alpha.argocdPlugin.target.externalCluster.enabled=true" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig" +``` + + + + + +```bash {hl_lines="7-11"} +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "features.alpha.argocdPlugin.enabled=true" \ + --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ + --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ + --set "features.alpha.argocdPlugin.target.externalCluster.enabled=true" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig" \ + --wait +``` + + + + + +```bash +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "features.alpha.argocdPlugin.enabled=true" \ + --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ + --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ + --set "features.alpha.argocdPlugin.target.externalCluster.enabled=true" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig" \ + --wait +``` + +The extra flags are: + +- `features.alpha.argocdPlugin.target.externalCluster.enabled=true` +- `features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true` +- `features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster` +- `features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig` + +These flags tell the plugin (running in Spaces) where your Argo CD instance is. After you've done this at install-time, you also need to create a `Secret` on the Spaces cluster. This secret must contain a kubeconfig pointing to your Argo CD instance. The secret needs to be in the same namespace as the `spaces-controller`, which is `upbound-system`. + +Once you enable the plugin and configure it, the plugin automatically propagates connection details for your control planes to your Argo CD instance. You can then target the control plane and use Argo to sync Crossplane-related objects to it. + +Be sure to [configure Argo][configure-argo-1] after it's installed. + +## Configure Argo + +Argo's default configuration causes it to try to query for resource kinds that don't exist in control planes. You should configure Argo's [general configmap][general-configmap] to include the resource group/kinds which make sense in the context of control planes. example, the concept of `nodes` isn't exposed in control planes. + +To configure Argo CD, connect to the cluster where you've installed it and edit the configmap: + +```bash +kubectl edit configmap argocd-cm -n argocd +``` + +Adjust the resource inclusions and exclusions under the `data` field of the configmap: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-cm + namespace: argocd +data: + resource.exclusions: | + - apiGroups: + - "*" + kinds: + - "*" + clusters: + - "*" + resource.inclusions: | + - apiGroups: + - "*" + kinds: + - Provider + - Configuration + clusters: + - "*" +``` + +The preceding configuration causes Argo to exclude syncing **all** resource group/kinds--except Crossplane `providers` and `configurations`--for **all** control planes. You're encouraged to adjust the `resource.inclusions` to include the types that make sense for your control plane, such as an `XRD` you've built with Crossplane. You're also encouraged to customize the `clusters` pattern to selectively apply these exclusions/inclusions to control planes (for example, `control-plane-prod-*`). + +## Control plane connection secrets + +To deploy control planes through Argo CD, you need to configure the `writeConnectionSecretToRef` field in your control plane spec. This field specifies where to store the control plane's `kubeconfig` and makes connection details available to Argo CD. + +### Basic Configuration + +In your control plane manifest, include the `writeConnectionSecretToRef` field: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: my-control-plane + namespace: my-control-plane-group +spec: + writeConnectionSecretToRef: + name: kubeconfig-my-control-plane + namespace: my-control-plane-group + # ... other control plane configuration +``` + +### Parameters + +The `writeConnectionSecretToRef` field requires two parameters: + +- `name`: A unique name for the secret containing the kubeconfig (`kubeconfig-my-control-plane`) +- `namespace`: The Kubernetes namespace where you store the secret, which must match the metadata namespace. The system copies it into the `argocd` namespace when you set the `features.alpha.argocdPlugin.target.secretNamespace=argocd` configuration parameter. + +Control plane labels automatically propagate to the connection secret, which allows you to use label selectors in Argo CD for automated discovery and management. + +This configuration enables Argo CD to automatically discover and manage resources on your control planes. + + +[gitops-with-control-planes]: /spaces/howtos/cloud-spaces/gitops +[configure-argo]: #configure-argo +[configure-argo-1]: #configure-argo +[general-configmap]: https://argo-cd.readthedocs.io/en/stable/operator-manual/argocd-cm-yaml/ diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/workload-id/_category_.json b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/workload-id/_category_.json new file mode 100644 index 000000000..c5ecc93f6 --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/workload-id/_category_.json @@ -0,0 +1,11 @@ +{ + "label": "Workload Identity Configuration", + "position": 2, + "collapsed": true, + "customProps": { + "plan": "business" + } + +} + + diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/workload-id/backup-restore-config.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/workload-id/backup-restore-config.md new file mode 100644 index 000000000..935ca69ec --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/workload-id/backup-restore-config.md @@ -0,0 +1,384 @@ +--- +title: Backup and Restore Workload ID +weight: 1 +description: Configure workload identity for Spaces Backup and Restore +--- +import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; + + + + + + + +Workload-identity authentication lets you use access policies to grant temporary +AWS credentials to your Kubernetes pod with a service account. Assigning IAM roles and service accounts allows the pod to assume the IAM role dynamically and much more securely than static credentials. + +This guide walks you through creating an IAM trust role policy and applying it +to your EKS cluster to handle backup and restore storage. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary Azure credentials to your Kubernetes pod based on +a service account. Assigning managed identities and service accounts allows the pod to +authenticate with Azure resources dynamically and much more securely than static credentials. + +This guide walks you through creating a managed identity and federated credential for your AKS +cluster to handle backup and restore storage. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary GCP credentials to your Kubernetes pod based on +a service account. Assigning IAM roles and service accounts allows the pod to +access cloud resources dynamically and much more securely than static credentials. + +This guide walks you through configuring workload identity for your GKE +cluster to handle backup and restore storage. + + + +## Prerequisites + + +To set up a workload-identity, you'll need: + + +- A self-hosted Space cluster +- Administrator access in your cloud provider +- Helm and `kubectl` + +## About the backup and restore component + +The `mxp-controller` component handles backup and restore workloads. It needs to +access your cloud storage to store and retrieve backups. By default, this +component runs in each control plane's host namespace. + +## Configuration + + + +Upbound supports workload-identity configurations in AWS with IAM Roles for +Service Accounts and EKS pod identity association. + +#### IAM Roles for Service Accounts (IRSA) + +With IRSA, you can associate a Kubernetes service account in an EKS cluster with +an AWS IAM role. Upbound authenticates workloads with that service account as +the IAM role using temporary credentials instead of static role credentials. +IRSA relies on AWS `AssumeRoleWithWebIdentity` `STS` to exchange OIDC ID tokens with +the IAM role's temporary credentials. IRSA uses the `eks.amazon.aws/role-arn` +annotation to link the service account and the IAM role. + +First, create an IAM role with appropriate permissions to access your S3 bucket: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:ListBucket", + "s3:DeleteObject" + ], + "Resource": [ + "arn:aws:s3:::${YOUR_BACKUP_BUCKET}", + "arn:aws:s3:::${YOUR_BACKUP_BUCKET}/*" + ] + } + ] +} +``` + +Next, ensure your EKS cluster has an OIDC identity provider: + +```shell +eksctl utils associate-iam-oidc-provider --cluster ${YOUR_CLUSTER_NAME} --approve +``` + +Configure the IAM role trust policy with the namespace for each +provisioned control plane. + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:oidc-provider/${YOUR_OIDC_PROVIDER}" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + "${YOUR_OIDC_PROVIDER}:aud": "sts.amazonaws.com", + "${YOUR_OIDC_PROVIDER}:sub": "system:serviceaccount:${YOUR_NAMESPACE}:mxp-controller" + } + } + } + ] +} +``` + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the Backup and Restore component: + +```shell +--set controlPlanes.mxpController.serviceAccount.annotations."eks\.amazonaws\.com/role-arn"="${SPACES_BR_IAM_ROLE_ARN}" +``` + +This command allows the backup and restore component to authenticate with your +dedicated IAM role in your EKS cluster environment. + +#### EKS pod identities + +Upbound also supports EKS Pod Identity configuration. EKS Pod Identities allow +you to create a pod identity association with your Kubernetes namespace, a +service account, and an IAM role, which allows the EKS control plane to +automatically handle the credential exchange. + +First, create an IAM role with appropriate permissions to access your S3 bucket: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:ListBucket", + "s3:DeleteObject" + ], + "Resource": [ + "arn:aws:s3:::${YOUR_BACKUP_BUCKET}", + "arn:aws:s3:::${YOUR_BACKUP_BUCKET}/*" + ] + } + ] +} +``` + +When you install or upgrade your Space with Helm, add the backup/restore values: + +```shell +helm upgrade spaces spaces-helm-chart \ + --set "billing.enabled=true" \ + --set "backup.enabled=true" \ + --set "backup.storage.provider=aws" \ + --set "backup.storage.aws.region= ${YOUR_AWS_REGION}" \ + --set "backup.storage.aws.bucket= ${YOUR_BACKUP_BUCKET}" +``` + +After Upbound provisions your control plane, create a Pod Identity Association +with the `aws` CLI: + +```shell +aws eks create-pod-identity-association \ + --cluster-name ${YOUR_CLUSTER_NAME} \ + --namespace ${YOUR_CONTROL_PLANE_NAMESPACE} \ + --service-account mxp-controller \ + --role-arn arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/backup-restore-role +``` + + + + + +Upbound supports workload-identity configurations in Azure with Azure's built-in +workload identity feature. + +#### Prepare your cluster + +First, enable the OIDC issuer and workload identity in your AKS cluster: + +```shell +az aks update --resource-group ${YOUR_RESOURCE_GROUP} --name ${YOUR_AKS_CLUSTER_NAME} --enable-oidc-issuer --enable-workload-identity +``` + +Next, find and store the OIDC issuer URL as an environment variable: + +```shell +export AKS_OIDC_ISSUER="$(az aks show --name ${YOUR_AKS_CLUSTER_NAME} --resource-group ${YOUR_RESOURCE_GROUP} --query "oidcIssuerProfile.issuerUrl" --output tsv)" +``` + +#### Create a User-Assigned Managed Identity + +Create a new managed identity to associate with the backup and restore component: + +```shell +az identity create --name backup-restore-identity --resource-group ${YOUR_RESOURCE_GROUP} --location ${YOUR_LOCATION} +``` + +Retrieve the client ID and store it as an environment variable: + +```shell +export USER_ASSIGNED_CLIENT_ID="$(az identity show --name backup-restore-identity --resource-group ${YOUR_RESOURCE_GROUP} --query clientId -otsv)" +``` + +Grant the managed identity you created to access your Azure Storage account: + +```shell +az role assignment create \ + --role "Storage Blob Data Contributor" \ + --assignee ${USER_ASSIGNED_CLIENT_ID} \ + --scope /subscriptions/${YOUR_SUBSCRIPTION_ID}/resourceGroups/${YOUR_RESOURCE_GROUP}/providers/Microsoft.Storage/storageAccounts/${YOUR_STORAGE_ACCOUNT} +``` + +#### Apply the managed identity role + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the backup and restore component: + +```shell +--set controlPlanes.mxpController.serviceAccount.annotations."azure\.workload\.identity/client-id"="${YOUR_USER_ASSIGNED_CLIENT_ID}" +--set controlPlanes.mxpController.pod.customLabels."azure\.workload\.identity/use"="true" +``` + +#### Create a Federated Identity credential + +```shell +az identity federated-credential create \ + --name backup-restore-federated-identity \ + --identity-name backup-restore-identity \ + --resource-group ${YOUR_RESOURCE_GROUP} \ + --issuer ${AKS_OIDC_ISSUER} \ + --subject system:serviceaccount:${YOUR_CONTROL_PLANE_NAMESPACE}:mxp-controller +``` + + + + + +Upbound supports workload-identity configurations in GCP with IAM principal +identifiers and service account impersonation. + +#### Prepare your cluster + +First, enable Workload Identity Federation on your GKE cluster: + +```shell +gcloud container clusters update ${YOUR_CLUSTER_NAME} \ + --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ + --region=${YOUR_REGION} +``` + +#### Create a Google Service Account + +Create a service account for the backup and restore component: + +```shell +gcloud iam service-accounts create backup-restore-sa \ + --display-name "Backup Restore Service Account" \ + --project ${YOUR_PROJECT_ID} +``` + +Grant the service account access to your Google Cloud Storage bucket: + +```shell +gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ + --member "serviceAccount:backup-restore-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" \ + --role "roles/storage.objectAdmin" +``` + +#### Configure Workload Identity + +Create an IAM binding to grant the Kubernetes service account access to the Google service account: + +```shell +gcloud iam service-accounts add-iam-policy-binding \ + backup-restore-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com \ + --role roles/iam.workloadIdentityUser \ + --member "serviceAccount:${YOUR_PROJECT_ID}.svc.id.goog[${YOUR_CONTROL_PLANE_NAMESPACE}/mxp-controller]" +``` + +#### Apply the service account configuration + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the backup and restore component: + +```shell +--set controlPlanes.mxpController.serviceAccount.annotations."iam\.gke\.io/gcp-service-account"="backup-restore-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" +``` + + + +## Verify your configuration + +After you apply the configuration use `kubectl` to verify the service account +has the correct annotation: + +```shell +kubectl get serviceaccount mxp-controller -n ${YOUR_CONTROL_PLANE_NAMESPACE} -o yaml +``` + +Verify the `mxp-controller` pod is running: + +```shell +kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep mxp-controller +``` + +## Restart workload + +You must manually restart a workload's pod when you add the workload identity annotations to the running pod's service account. + + + +This restart enables the EKS pod identity webhook to inject the necessary +environment for using IRSA. + + + + + +This restart enables the workload identity webhook to inject the necessary +environment for using Azure workload identity. + + + + + +This restart enables the workload identity webhook to inject the necessary +environment for using GCP workload identity. + + + +```shell +kubectl rollout restart deployment mxp-controller -n ${YOUR_CONTROL_PLANE_NAMESPACE} +``` + +## Use cases + + +Configuring backup and restore with workload identity eliminates the need for +static credentials in your cluster and the overhead of credential rotation. +These benefits are helpful in: + +* Disaster recovery scenarios +* Control plane migration +* Compliance requirements +* Rollbacks after unsuccessful upgrades + +## Next steps + +Now that you have a workload identity configured for the backup and restore +component, visit the [Backup Configuration][backup-restore-guide] documentation. + +Other workload identity guides are: +* [Billing][billing] +* [Shared Secrets][secrets] + +[backup-restore-guide]: /spaces/howtos/backup-and-restore +[billing]: /spaces/howtos/self-hosted/workload-id/billing-config +[secrets]: /spaces/howtos/self-hosted/workload-id/eso-config diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/workload-id/billing-config.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/workload-id/billing-config.md new file mode 100644 index 000000000..323a6122f --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/workload-id/billing-config.md @@ -0,0 +1,454 @@ +--- +title: Billing Workload ID +weight: 1 +description: Configure workload identity for Spaces Billing +--- +import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; + + + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary AWS credentials to your Kubernetes pod based on +a service account. Assigning IAM roles and service accounts allows the pod to +assume the IAM role dynamically and much more securely than static credentials. + +This guide walks you through creating an IAM trust role policy and applying it to your EKS +cluster for billing in your Space cluster. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary Azure credentials to your Kubernetes pod based on +a service account. Assigning managed identities and service accounts allows the pod to +authenticate with Azure resources dynamically and much more securely than static credentials. + +This guide walks you through creating a managed identity and federated credential for your AKS +cluster for billing in your Space cluster. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary GCP credentials to your Kubernetes pod based on +a service account. Assigning IAM roles and service accounts allows the pod to +access cloud resources dynamically and much more securely than static +credentials. + +This guide walks you through configuring workload identity for your GKE +cluster's billing component. + + + +## Prerequisites + + +To set up a workload-identity, you'll need: + + +- A self-hosted Space cluster +- Administrator access in your cloud provider +- Helm and `kubectl` + +## About the billing component + +The `vector.dev` component handles billing metrics collection in spaces. It +stores account data in your cloud storage. By default, this component runs in +each control plane's host namespace. + +## Configuration + + + +Upbound supports workload-identity configurations in AWS with IAM Roles for +Service Accounts and EKS pod identity association. + +#### IAM Roles for Service Accounts (IRSA) + +With IRSA, you can associate a Kubernetes service account in an EKS cluster with +an AWS IAM role. Upbound authenticates workloads with that service account as +the IAM role using temporary credentials instead of static role credentials. +IRSA relies on AWS `AssumeRoleWithWebIdentity` `STS` to exchange OIDC ID tokens with +the IAM role's temporary credentials. IRSA uses the `eks.amazon.aws/role-arn` +annotation to link the service account and the IAM role. + +**Create an IAM role and trust policy** + +First, create an IAM role appropriate permissions to access your S3 bucket: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:ListBucket", + "s3:DeleteObject" + ], + "Resource": [ + "arn:aws:s3:::${YOUR_BILLING_BUCKET}", + "arn:aws:s3:::${YOUR_BILLING_BUCKET}/*" + ] + } + ] +} +``` + +You must configure the IAM role trust policy with the exact match for each +provisioned control plane. An example of a trust policy for a single control +plane is below: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:oidc-provider/${YOUR_OIDC_PROVIDER}" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + ":aud": "sts.amazonaws.com", + ":sub": "system:serviceaccount:${YOUR_NAMESPACE}:vector" + } + } + } + ] +} +``` + +**Configure the EKS OIDC provider** + +Next, ensure your EKS cluster has an OIDC identity provider: + +```shell +eksctl utils associate-iam-oidc-provider --cluster ${YOUR_CLUSTER_NAME} --approve +``` + +**Apply the IAM role** + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the Billing component: + +```shell +--set "billing.enabled=true" +--set "billing.storage.provider=aws" +--set "billing.storage.aws.region=${YOUR_AWS_REGION}" +--set "billing.storage.aws.bucket=${YOUR_BILLING_BUCKET}" +--set "billing.storage.secretRef.name=" +--set controlPlanes.vector.serviceAccount.customAnnotations."eks\.amazonaws\.com/role-arn"="arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_BILLING_ROLE_NAME}" +``` + +:::important +You **must** set the `billing.storage.secretRef.name` to an empty string to +enable workload identity for the billing component +::: + +#### EKS pod identities + +Upbound also supports EKS Pod Identity configuration. EKS Pod Identities allow +you to create a pod identity association with your Kubernetes namespace, a +service account, and an IAM role, which allows the EKS control plane to +automatically handle the credential exchange. + +**Create an IAM role** + +First, create an IAM role appropriate permissions to access your S3 bucket: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:ListBucket" + ], + "Resource": [ + "arn:aws:s3:::${YOUR_BILLING_BUCKET}", + "arn:aws:s3:::${YOUR_BILLING_BUCKET}/*" + ] + } + ] +} +``` + +**Configure your Space with Helm** + +When you install or upgrade your Space with Helm, add the billing values: + +```shell +helm upgrade spaces spaces-helm-chart \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=aws" \ + --set "billing.storage.aws.region=${YOUR_AWS_REGION}" \ + --set "billing.storage.aws.bucket=${YOUR_BILLING_BUCKET}" \ + --set "billing.storage.secretRef.name=" +``` + +**Create a Pod Identity Association** + +After Upbound provisions your control plane, create a Pod Identity Association +with the `aws` CLI: + +```shell +aws eks create-pod-identity-association \ + --cluster-name ${YOUR_CLUSTER_NAME} \ + --namespace ${YOUR_CONTROL_PLANE_NAMESPACE} \ + --service-account vector \ + --role-arn arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_BILLING_ROLE_NAME} +``` + + + + + +Upbound supports workload-identity configurations in Azure with Azure's built-in +workload identity feature. + +First, enable the OIDC issuer and workload identity in your AKS cluster: + +```shell +az aks update --resource-group ${YOUR_RESOURCE_GROUP} --name ${YOUR_AKS_CLUSTER_NAME} --enable-oidc-issuer --enable-workload-identity +``` + +Next, find and store the OIDC issuer URL as an environment variable: + +```shell +export AKS_OIDC_ISSUER="$(az aks show --name ${YOUR_AKS_CLUSTER_NAME} --resource-group ${YOUR_RESOURCE_GROUP} --query "oidcIssuerProfile.issuerUrl" --output tsv)" +``` + +Create a new managed identity to associate with the billing component: + +```shell +az identity create --name billing-identity --resource-group ${YOUR_RESOURCE_GROUP} --location ${YOUR_LOCATION} +``` + +Retrieve the client ID and store it as an environment variable: + +```shell +export USER_ASSIGNED_CLIENT_ID="$(az identity show --name billing-identity --resource-group ${YOUR_RESOURCE_GROUP} --query clientId -otsv)" +``` + +Grant the managed identity you created to access your Azure Storage account: + +```shell +az role assignment create --role "Storage Blob Data Contributor" --assignee $USER_ASSIGNED_CLIENT_ID --scope /subscriptions/${YOUR_SUBSCRIPTION_ID}/resourceGroups/${YOUR_RESOURCE_GROUP}/providers/Microsoft.Storage/storageAccounts/${YOUR_STORAGE_ACCOUNT} +``` + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the billing component: + +```shell +--set "billing.enabled=true" +--set "billing.storage.provider=azure" +--set "billing.storage.azure.storageAccount=${SPACES_BILLING_STORAGE_ACCOUNT}" +--set "billing.storage.azure.container=${SPACES_BILLING_STORAGE_CONTAINER}" +--set "billing.storage.secretRef.name=" +--set controlPlanes.vector.serviceAccount.customAnnotations."azure\.workload\.identity/client-id"="${SPACES_BILLING_APP_ID}" +--set controlPlanes.vector.pod.customLabels."azure\.workload\.identity/use"="true" +``` + +Create a federated credential to establish trust between the managed identity +and your AKS OIDC provider: + +```shell +az identity federated-credential create \ + --name billing-federated-identity \ + --identity-name billing-identity \ + --resource-group ${YOUR_RESOURCE_GROUP} \ + --issuer ${AKS_OIDC_ISSUER} \ + --subject system:serviceaccount:${YOUR_CONTROL_PLANE_NAMESPACE}:vector +``` + + + + + +Upbound supports workload-identity configurations in GCP with IAM principal +identifiers or service account impersonation. + +#### IAM principal identifiers + +IAM principal identifiers allow you to grant permissions directly to +Kubernetes service accounts without additional annotation. Upbound recommends +this approach for ease-of-use and flexibility. + +First, enable Workload Identity Federation on your GKE cluster: + +```shell +gcloud container clusters update ${YOUR_CLUSTER_NAME} \ + --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ + --region=${YOUR_REGION} +``` + +Next, configure your Spaces installation with the Spaces Helm chart parameters: + +```shell +--set "billing.enabled=true" +--set "billing.storage.provider=gcp" +--set "billing.storage.gcp.bucket=${YOUR_BILLING_BUCKET}" +--set "billing.storage.secretRef.name=" +``` + +:::important +You **must** set the `billing.storage.secretRef.name` to an empty string to +enable workload identity for the billing component. +::: + +Grant the necessary permissions to your Kubernetes service account: + +```shell +gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ + --member="principalSet://iam.googleapis.com/projects/${YOUR_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${YOUR_PROJECT_ID}.svc.id.goog/attribute.kubernetes_namespace/${YOUR_CONTROL_PLANE_NAMESPACE}/attribute.kubernetes_service_account/vector" \ + --role="roles/storage.objectAdmin" +``` + +Enable uniform bucket-level access on your storage bucket: + +```shell +gcloud storage buckets update gs://${YOUR_BILLING_BUCKET} --uniform-bucket-level-access +``` + +#### Service account impersonation + +Service account impersonation allows you to link a Kubernetes service account to +a GCP service account. The Kubernetes service account assumes the permissions of +the GCP service account you specify. + +Enable workload id federation on your GKE cluster: + +```shell +gcloud container clusters update ${YOUR_CLUSTER_NAME} \ + --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ + --region=${YOUR_REGION} +``` + +Next, create a dedicated service account for your billing operations: + +```shell +gcloud iam service-accounts create billing-sa \ + --project=${YOUR_PROJECT_ID} +``` + +Grant storage permissions to the service account you created: + +```shell +gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ + --member="serviceAccount:billing-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" \ + --role="roles/storage.objectAdmin" +``` + +Link the Kubernetes service account to the GCP service account: + +```shell +gcloud iam service-accounts add-iam-policy-binding \ + billing-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com \ + --role="roles/iam.workloadIdentityUser" \ + --member="serviceAccount:${YOUR_PROJECT_ID}.svc.id.goog[${YOUR_CONTROL_PLANE_NAMESPACE}/vector]" +``` + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the billing component: + +```shell +--set "billing.enabled=true" +--set "billing.storage.provider=gcp" +--set "billing.storage.gcp.bucket=${YOUR_BILLING_BUCKET}" +--set "billing.storage.secretRef.name=" +--set controlPlanes.vector.serviceAccount.customAnnotations."iam\.gke\.io/gcp-service-account"="billing-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" +``` + + + +## Verify your configuration + +After you apply the configuration use `kubectl` to verify the service account +has the correct annotation: + +```shell +kubectl get serviceaccount vector -n ${YOUR_CONTROL_PLANE_NAMESPACE} -o yaml +``` + +Verify the `vector` pod is running: + +```shell +kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep vector +``` + +## Restart workload + + + +You must manually restart a workload's pod when you add the +`eks.amazonaws.com/role-arn key` annotation to the running pod's service +account. + +This restart enables the EKS pod identity webhook to inject the necessary +environment for using IRSA. + + + + + +You must manually restart a workload's pod when you add the workload identity annotations to the running pod's service account. + +This restart enables the workload identity webhook to inject the necessary +environment for using Azure workload identity. + + + + + +GCP workload identity doesn't require pod restarts after configuration changes. +If you do need to restart the workload, use the `kubectl` command to force the +component restart: + + + +```shell +kubectl rollout restart deployment vector +``` + + +## Use cases + + +Using workload identity authentication for billing eliminates the need for static +credentials in your cluster as well as the overhead of credential rotation. +These benefits are helpful in: + +* Resource usage tracking across teams/projects +* Cost allocation for multi-tenant environments +* Financial auditing requirements +* Capacity billing and resource optimization +* Automated billing workflows + +## Next steps + +Now that you have workload identity configured for the billing component, visit +the [Billing guide][billing-guide] for more information. + +Other workload identity guides are: +* [Backup and restore][backuprestore] +* [Shared Secrets][secrets] + +[billing-guide]: /spaces/howtos/self-hosted/billing +[backuprestore]: /spaces/howtos/self-hosted/workload-id/backup-restore-config +[secrets]: /spaces/howtos/self-hosted/workload-id/eso-config diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/workload-id/eso-config.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/workload-id/eso-config.md new file mode 100644 index 000000000..c1418c171 --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/workload-id/eso-config.md @@ -0,0 +1,503 @@ +--- +title: Shared Secrets Workload ID +weight: 1 +description: Configure workload identity for Spaces Shared Secrets +--- +import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; + + + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary AWS credentials to your Kubernetes pod based on +a service account. Assigning IAM roles and service accounts allows the pod to +assume the IAM role dynamically and much more securely than static credentials. + +This guide walks you through creating an IAM trust role policy and applying it to your EKS +cluster for secret sharing with Kubernetes. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary Azure credentials to your Kubernetes pod based on +a service account. Assigning managed identities and service accounts allows the pod to +authenticate with Azure resources dynamically and much more securely than static credentials. + +This guide walks you through creating a managed identity and federated credential for your AKS +cluster for shared secrets in your Space cluster. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary GCP credentials to your Kubernetes pod based on +a service account. Assigning IAM roles and service accounts allows the pod to +access cloud resources dynamically and much more securely than static +credentials. + +This guide walks you through configuring workload identity for your GKE +cluster's Shared Secrets component. + + + +## Prerequisites + + +To set up a workload-identity, you'll need: + + +- A self-hosted Space cluster +- Administrator access in your cloud provider +- Helm and `kubectl` + + +## About the Shared Secrets component + + + + +The External Secrets Operator (ESO) runs in each control plane's host namespace as `external-secrets-controller`. It needs to access +your external secrets management service like AWS Secrets Manager. + +To configure your shared secrets workflow controller, you must: + +* Annotate the Kubernetes service account to associate it with a cloud-side + principal (such as an IAM role, service account, or enterprise application). The workload must then + use this service account. +* Label the workload (pod) to allow the injection of a temporary credential set, + enabling authentication. + + + + + +The External Secrets Operator (ESO) component runs in each control plane's host +namespace as `external-secrets-controller`. It synchronizes secrets from +external APIs into Kubernetes secrets. Shared secrets allow you to manage +credentials outside your Kubernetes cluster while making them available to your +application + + + + + +The External Secrets Operator (ESO) component runs in each control plane's host +namespace as `external-secrets-controller`. It synchronizes secrets from +external APIs into Kubernetes secrets. Shared secrets allow you to manage +credentials outside your Kubernetes cluster while making them available to your +application + + + +## Configuration + + + +Upbound supports workload-identity configurations in AWS with IAM Roles for +Service Accounts or EKS pod identity association. + +#### IAM Roles for Service Accounts (IRSA) + +With IRSA, you can associate a Kubernetes service account in an EKS cluster with +an AWS IAM role. Upbound authenticates workloads with that service account as +the IAM role using temporary credentials instead of static role credentials. +IRSA relies on AWS `AssumeRoleWithWebIdentity` `STS` to exchange OIDC ID tokens with +the IAM role's temporary credentials. IRSA uses the `eks.amazon.aws/role-arn` +annotation to link the service account and the IAM role. + +**Create an IAM role and trust policy** + +First, create an IAM role with appropriate permissions to access AWS Secrets Manager: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "secretsmanager:GetSecretValue", + "secretsmanager:DescribeSecret", + "ssm:GetParameter" + ], + "Resource": [ + "arn:aws:secretsmanager:${YOUR_REGION}:${YOUR_AWS_ACCOUNT_ID}:secret:${YOUR_SECRET_PREFIX}*", + "arn:aws:ssm:${YOUR_REGION}:${YOUR_AWS_ACCOUNT_ID}:parameter/${YOUR_PARAMETER_PREFIX}*" + ] + } + ] +} +``` + +You must configure the IAM role trust policy with the exact match for each +provisioned control plane. An example of a trust policy for a single control +plane is below: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:oidc-provider/${YOUR_OIDC_PROVIDER}" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + ":aud": "sts.amazonaws.com" + }, + "StringLike": { + ":sub": "system:serviceaccount:*:external-secrets-controller" + } + } + } + ] +} +``` + +**Configure the EKS OIDC provider** + +Next, ensure your EKS cluster has an OIDC identity provider: + +```shell +eksctl utils associate-iam-oidc-provider --cluster ${YOUR_CLUSTER_NAME} --approve +``` + +**Apply the IAM role** + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the shared secrets component: + +```yaml +--set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."eks\.amazonaws\.com/role-arn"="arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_ESO_ROLE_NAME}" +``` + +This command allows the shared secrets component to authenticate with your +dedicated IAM role in your EKS cluster environment. + +#### EKS pod identities + +Upbound also supports EKS Pod Identity configuration. EKS Pod Identities allow +you to create a pod identity association with your Kubernetes namespace, a +service account, and an IAM role, which allows the EKS control plane to +automatically handle the credential exchange. + +**Create an IAM role** + +First, create an IAM role with appropriate permissions to access AWS Secrets Manager: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "secretsmanager:GetSecretValue", + "secretsmanager:DescribeSecret", + "ssm:GetParameter" + ], + "Resource": [ + "arn:aws:secretsmanager:${YOUR_AWS_REGION}:${YOUR_AWS_ACCOUNT_ID}:secret:${YOUR_SECRET_PREFIX}*", + "arn:aws:ssm:${YOUR_AWS_REGION}:${YOUR_AWS_ACCOUNT_ID}:parameter/${YOUR_PARAMETER_PREFIX}*" + ] + } + ] +} +``` + +**Configure your Space with Helm** + +When you install or upgrade your Space with Helm, add the shared secrets value: + +```shell +helm upgrade spaces spaces-helm-chart \ + --set "sharedSecrets.enabled=true" +``` + +**Create a Pod Identity Association** + +After Upbound provisions your control plane, create a Pod Identity Association +with the `aws` CLI: + +```shell +aws eks create-pod-identity-association \ + --cluster-name ${YOUR_CLUSTER_NAME} \ + --namespace ${YOUR_CONTROL_PLANE_NAMESPACE} \ + --service-account external-secrets-controller \ + --role-arn arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_ROLE_NAME} +``` + + + + + +Upbound supports workload-identity configurations in Azure with Azure's built-in +workload identity feature. + +First, enable the OIDC issuer and workload identity in your AKS cluster: + +```shell +az aks update --resource-group ${YOUR_RESOURCE_GROUP} --name ${YOUR_AKS_CLUSTER_NAME} --enable-oidc-issuer --enable-workload-identity +``` + +Next, find and store the OIDC issuer URL as an environment variable: + +```shell +export AKS_OIDC_ISSUER="$(az aks show --name ${YOUR_AKS_CLUSTER_NAME} --resource-group ${YOUR_RESOURCE_GROUP} --query "oidcIssuerProfile.issuerUrl" --output tsv)" +``` + +Create a new managed identity to associate with the shared secrets component: + +```shell +az identity create --name secrets-identity --resource-group ${YOUR_RESOURCE_GROUP} --location ${YOUR_LOCATION} +``` + +Retrieve the client ID and store it as an environment variable: + +```shell +export USER_ASSIGNED_CLIENT_ID="$(az identity show --name secrets-identity --resource-group ${YOUR_RESOURCE_GROUP} --query clientId -otsv)" +``` + +Grant the managed identity you created to access your Azure Storage account: + +```shell +az keyvault set-policy --name ${YOUR_KEY_VAULT_NAME} \ + --resource-group ${YOUR_RESOURCE_GROUP} \ + --object-id $(az identity show --name secrets-identity --resource-group ${YOUR_RESOURCE_GROUP} --query principalId -otsv) \ + --secret-permissions get list +``` + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the shared secrets component: + +```shell +--set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."azure\.workload\.identity/client-id"="${USER_ASSIGNED_CLIENT_ID}" +--set controlPlanes.sharedSecrets.pod.customLabels."azure\.workload\.identity/use"="true" +``` + +Next, create a federated credential to establish trust between the managed identity +and your AKS OIDC provider: + +```shell +az identity federated-credential create \ + --name secrets-federated-identity \ + --identity-name secrets-identity \ + --resource-group ${YOUR_RESOURCE_GROUP} \ + --issuer ${AKS_OIDC_ISSUER} \ + --subject system:serviceaccount:${YOUR_CONTROL_PLANE_NAMESPACE}:external-secrets-controller +``` + + + + + +Upbound supports workload-identity configurations in GCP with IAM principal +identifiers or service account impersonation. + +#### IAM principal identifiers + +IAM principal identifiers allow you to grant permissions directly to +Kubernetes service accounts without additional annotation. Upbound recommends +this approach for ease-of-use and flexibility. + +First, enable Workload Identity Federation on your GKE cluster: + +```shell +gcloud container clusters update ${YOUR_CLUSTER_NAME} \ + --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ + --region=${YOUR_REGION} +``` + +Next, grant the necessary permissions to your Kubernetes service account: + +```shell +gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ + --member="principalSet://iam.googleapis.com/projects/${YOUR_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${YOUR_PROJECT_ID}.svc.id.goog/attribute.kubernetes_namespace/${YOUR_CONTROL_PLANE_NAMESPACE}/attribute.kubernetes_service_account/external-secrets-controller" \ + --role="roles/secretmanager.secretAccessor" +``` + +#### Service account impersonation + +Service account impersonation allows you to link a Kubernetes service account to +a GCP service account. The Kubernetes service account assumes the permissions of +the GCP service account you specify. + +Enable workload id federation on your GKE cluster: + +```shell +gcloud container clusters update ${YOUR_CLUSTER_NAME} \ + --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ + --region=${YOUR_REGION} +``` + +Next, create a dedicated service account for your secrets operations: + +```shell +gcloud iam service-accounts create secrets-sa \ + --project=${YOUR_PROJECT_ID} +``` + +Grant secret access permissions to the service account you created: + +```shell +gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ + --member="serviceAccount:secrets-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" \ + --role="roles/secretmanager.secretAccessor" +``` + +Link the Kubernetes service account to the GCP service account: + +```shell +gcloud iam service-accounts add-iam-policy-binding \ + secrets-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com \ + --role="roles/iam.workloadIdentityUser" \ + --member="serviceAccount:${YOUR_PROJECT_ID}.svc.id.goog[${YOUR_CONTROL_PLANE_NAMESPACE}/external-secrets-controller]" +``` + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the shared secrets component: + +```shell +--set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."iam\.gke\.io/gcp-service-account"="secrets-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" +``` + + + +## Verify your configuration + +After you apply the configuration use `kubectl` to verify the service account +has the correct annotation: + +```shell +kubectl get serviceaccount external-secrets-controller -n ${YOUR_CONTROL_PLANE_NAMESPACE} -o yaml +``` + + + +Verify the `external-secrets` pod is running correctly: + +```shell +kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep external-secrets +``` + + + + + +Verify the External Secrets Operator pod is running correctly: + +```shell +kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep external-secrets +``` + + + + + +Verify the `external-secrets` pod is running correctly: + +```shell +kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep external-secrets +``` + + + +## Restart workload + + + +You must manually restart a workload's pod when you add the +`eks.amazonaws.com/role-arn key` annotation to the running pod's service +account. + +This restart enables the EKS pod identity webhook to inject the necessary +environment for using IRSA. + + + + + +You must manually restart a workload's pod when you add the workload identity annotations to the running pod's service account. + +This restart enables the workload identity webhook to inject the necessary +environment for using Azure workload identity. + + + + + +GCP workload identity doesn't require pod restarts after configuration changes. +If you do need to restart the workload, use the `kubectl` command to force the +component restart: + + + +```shell +kubectl rollout restart deployment external-secrets +``` + +## Use cases + + + + +Shared secrets with workload identity eliminates the need for static credentials +in your cluster. These benefits are particularly helpful in: + +* Secure application credentials management +* Database connection string storage +* API token management +* Compliance with secret rotation security standards +* Multi-environment configuration with centralized secret management + + + + + +Using workload identity authentication for shared secrets eliminates the need for static +credentials in your cluster as well as the overhead of credential rotation. +These benefits are particularly helpful in: + +* Secure application credentials management +* Database connection string storage +* API token management +* Compliance with secret rotation security standards + + + + + +Configuring the external secrets operator with workload identity eliminates the need for +static credentials in your cluster and the overhead of credential rotation. +These benefits are particularly helpful in: + +* Secure application credentials management +* Database connection string storage +* API token management +* Compliance with secret rotation security standards + + + +## Next steps + +Now that you have workload identity configured for the shared secrets component, visit +the [Shared Secrets][eso-guide] guide for more information. + +Other workload identity guides are: +* [Backup and restore][backuprestore] +* [Billing][billing] + +[eso-guide]: /spaces/howtos/secrets-management +[backuprestore]: /spaces/howtos/self-hosted/workload-id/backup-restore-config +[billing]: /spaces/howtos/self-hosted/workload-id/billing-config diff --git a/spaces_versioned_docs/version-v1.15/howtos/simulations.md b/spaces_versioned_docs/version-v1.15/howtos/simulations.md new file mode 100644 index 000000000..26cb0e657 --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/howtos/simulations.md @@ -0,0 +1,110 @@ +--- +title: Simulate changes to your Control Plane Projects +sidebar_position: 100 +description: Use the Up CLI to mock operations before deploying to your environments. +--- + +:::info API Version Information +This guide covers Simulations, available in v1.10+ (GA since v1.13). version-specific availability and features, see the . + +For API specifications on the Simulations CRD, see the [Spaces API Reference](../../../reference/apis/spaces-api/). +::: + +:::important +The Simulations feature is in private preview. more information, [reach out to Upbound][reach-out-to-upbound]. +::: + +Control plane simulations allow you to preview changes to your resources before +applying them to your control planes. Like a plan or dry-run operation, +simulations expose the impact of updates to compositions or claims without +changing your actual resources. + +A control plane simulation creates a temporary copy of your control plane and +returns a preview of the desired changes. The simulation change plan helps you +reduce the risk of unexpected behavior based on your changes. + +## Simulation benefits + +Control planes are dynamic systems that automatically reconcile resources to +match your desired state. Simulations provide visibility into this +reconciliation process by showing: + + +* New resources to create +* Existing resources to change +* Existing resources to delete +* How configuration changes propagate through the system + +These insights are crucial when planning complex changes or upgrading Crossplane +packages. + +## Requirements + +Simulations are available to select customers on Upbound Cloud with Team +Tier or higher. more information, [reach out to Upbound][reach-out-to-upbound-1]. + +## How to simulate your control planes + +Before you start a simulation, build your project and use the `up +project run` command to run your control plane. + +Use the `up project simulate` command with your control plane name to start the +simulation: + +```ini {copy-lines="all"} +up project simulate --complete-after=60s --terminate-on-finish +``` + +The `complete-after` flag determines how long to run the simulation before it completes and calculates the results. Depending on the change, a simulation may not complete within your defined interval leaving unaffected resources as `unchanged`. + +The `terminate-on-finish` flag terminates the simulation after the time +you set - deleting the control plane that ran the simulation. + +At the end of your simulation, your CLI returns: +* A summary of the resources created, modified, or deleted +* Diffs for each resource affected + +## View your simulation in the Upbound Console +You can also view your simulation results in the Upbound Console: + +1. Navigate to your base control plane in the Upbound Console +2. Select the "Simulations" tab in the menu +3. Select a simulation object for a change list of all + resources affected. + +The Console provides visual indications of changes: + +- Created Resources: Marked with green +- Modified Resources: Marked with yellow +- Deleted Resources: Marked with red +- Unchanged Resources: Displayed in gray + +![Upbound Console Simulation](/img/simulations.png) + +## Considerations + +Simulations is a **private preview** feature. + +Be aware of the following limitations: + +- Simulations can't predict the exact behavior of external systems due to the + complexity and non-deterministic reconciliation pattern in Crossplane. + +- The only completion criteria for a simulation is time. Your simulation may not + receive a conclusive result within that interval. Upbound recommends the + default `60s` value. + +- Providers don't run in simulations. Simulations can't compose resources that + rely on the status of Managed Resources. + + +The Upbound team is working to improve these limitations. Your feedback is always appreciated. + +## Next steps + +For more information, follow the [tutorial][tutorial] on Simulations. + + +[tutorial]: /manuals/cli/howtos/simulations +[reach-out-to-upbound]: https://www.upbound.io/contact-us +[reach-out-to-upbound-1]: https://www.upbound.io/contact-us diff --git a/spaces_versioned_docs/version-v1.15/overview/_category_.json b/spaces_versioned_docs/version-v1.15/overview/_category_.json new file mode 100644 index 000000000..54bb16430 --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/overview/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Overview", + "position": 0 +} diff --git a/spaces_versioned_docs/version-v1.15/overview/index.md b/spaces_versioned_docs/version-v1.15/overview/index.md new file mode 100644 index 000000000..b199ea0b2 --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/overview/index.md @@ -0,0 +1,14 @@ +--- +title: Spaces Overview +sidebar_position: 0 +--- + +# Upbound Spaces + +Welcome to the Upbound Spaces documentation. This section contains comprehensive documentation for Spaces API and Spaces operations across all supported versions (v1.9 through v1.15). + +## Get Started + +- **[Concepts](/spaces/concepts/control-planes/)** - Core concepts for Spaces +- **[How-To Guides](/spaces/howtos/auto-upgrade/)** - Step-by-step guides for operating Spaces +- **[API Reference](/spaces/reference/)** - API specifications and resources diff --git a/spaces_versioned_docs/version-v1.15/reference/_category_.json b/spaces_versioned_docs/version-v1.15/reference/_category_.json new file mode 100644 index 000000000..4a6a139c4 --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/reference/_category_.json @@ -0,0 +1,5 @@ +{ + "label": "Spaces API", + "position": 1, + "collapsed": true +} diff --git a/spaces_versioned_docs/version-v1.15/reference/index.md b/spaces_versioned_docs/version-v1.15/reference/index.md new file mode 100644 index 000000000..5e68b0768 --- /dev/null +++ b/spaces_versioned_docs/version-v1.15/reference/index.md @@ -0,0 +1,72 @@ +--- +title: Spaces API Reference +description: Documentation for the Spaces API resources (v1.15 - Latest) +sidebar_position: 1 +--- +import CrdDocViewer from '@site/src/components/CrdViewer'; + + +This page documents the Custom Resource Definitions (CRDs) for the Spaces API. + + +## Control Planes +### Control Planes + + +## Observability +### Shared Telemetry Configs + + +## `pkg` +### Controller Revisions + + +### Controller Runtime Configs + + +### Controllers + + +### Remote Configuration Revisions + + +### Remote Configurations + + +## Policy +### Shared Upbound Policies + + +## References +### Referenced Objects + + +## Scheduling +### Environments + + +## Secrets +### Shared External Secrets + + +### Shared Secret Stores + + +## Simulations + + +## Spaces Backups +### Backups + + +### Backup Schedules + + +### Shared Backup Configs + + +### Shared Backups + + +### Shared Backup Schedules + diff --git a/spaces_versioned_docs/version-v1.9/concepts/_category_.json b/spaces_versioned_docs/version-v1.9/concepts/_category_.json new file mode 100644 index 000000000..4b8667e29 --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/concepts/_category_.json @@ -0,0 +1,7 @@ +{ + "label": "Concepts", + "position": 2, + "collapsed": true +} + + diff --git a/spaces_versioned_docs/version-v1.9/concepts/control-planes.md b/spaces_versioned_docs/version-v1.9/concepts/control-planes.md new file mode 100644 index 000000000..7066343de --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/concepts/control-planes.md @@ -0,0 +1,227 @@ +--- +title: Control Planes +weight: 1 +description: An overview of control planes in Upbound +--- + + +Control planes in Upbound are fully isolated Crossplane control plane instances that Upbound manages for you. This means: + +- the underlying lifecycle of infrastructure (compute, memory, and storage) required to power your instance. +- scaling of the infrastructure. +- the maintenance of the core Crossplane components that make up a control plane. + +This lets users focus on building their APIs and operating their control planes, while Upbound handles the rest. Each control plane has its own dedicated API server connecting users to their control plane. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). + +For detailed information about Control Plane API specifications and CRD fields, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +## Control plane architecture + +![Managed Control Plane Architecture](/img/mcp.png) + +Along with underlying infrastructure, Upbound manages the Crossplane system components. You don't need to manage the Crossplane API server or core resource controllers because Upbound manages your control plane lifecycle from creation to deletion. + +### Crossplane API + +Each control plane offers a unified endpoint. You interact with your control plane through Kubernetes and Crossplane API calls. Each control plane runs a Kubernetes API server to handle API requests. You can make API calls in the following ways: + +- Direct calls: HTTP/gRPC +- Indirect calls: the up CLI, Kubernetes clients such as kubectl, or the Upbound Console. + +Like in Kubernetes, the API server is the hub for all communication for the control plane. All internal components such as system processes and provider controllers act as clients of the API server. + +Your API requests tell Crossplane your desired state for the resources your control plane manages. Crossplane attempts to constantly maintain that state. Crossplane lets you configure objects in the API either imperatively or declaratively. + +### Crossplane versions and features + +Upbound automatically upgrades Crossplane system components on control planes to new Crossplane versions for updated features and improvements in the open source project. With [automatic upgrades][automatic-upgrades], you choose the cadence that Upbound automatically upgrades the system components in your control plane. You can also choose to manually upgrade your control plane to a different Crossplane version. + +For detailed information on versions and upgrades, refer to the [release notes][release-notes] and the automatic upgrade documentation. If you don't enroll a control plane in a release channel, Upbound doesn't apply automatic upgrades. + +Features considered "alpha" in Crossplane are by default not supported in a control plane unless otherwise specified. + +### Hosting environments + +Every control plane in Upbound belongs to a [control plane group][control-plane-group]. Control plane groups are a logical grouping of one or more control planes with shared objects (such as secrets or backup configuration). Every group resides in a [Space][space] in Upbound, which are hosting environments for control planes. + +Think of a Space as being conceptually the same as an AWS, Azure, or GCP region. Regardless of the Space type you run a control plane in, the core experience is identical. + +## Management + +### Create a control plane + +You can create a new control plane from the Upbound Console, [up CLI][up-cli], or with Kubernetes clients such as `kubectl`. + + + + + +To use the CLI, run the following: + +```shell +up ctp create +``` + +To learn more about control plane-related commands in `up`, go to the [CLI reference][cli-reference] documentation. + + + +You can create and manage control planes declaratively in Upbound. Before you +begin, ensure you're logged into Upbound and set the correct context: + +```bash +up login +# Example: acmeco/upbound-gcp-us-west-1/default +up ctx ${yourOrganization}/${yourSpace}/${yourGroup} +```` + +```yaml +#controlplane-a.yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: controlplane-a +spec: + crossplane: + autoUpgrade: + channel: Rapid +``` + +```bash +kubectl apply -f controlplane-a.yaml +``` + + + + + +### Connect directly to your control plane + +Each control plane offers a unified endpoint. You interact with your control plane through Kubernetes and Crossplane API calls. Each control plane runs a Kubernetes API server to handle API requests. + +You can connect to a control plane's API server directly via the up CLI. Use the [`up ctx`][up-ctx] command to set your kubeconfig's current context to a control plane: + +```shell +# Example: acmeco/upbound-gcp-us-west-1/default/ctp1 +up ctx ${yourOrganization}/${yourSpace}/${yourGroup}/${yourControlPlane} +``` + +To disconnect from your control plane and revert your kubeconfig's current context to the previous entry, run the following: + +```shell +up ctx .. +``` + +You can also generate a `kubeconfig` file for a control plane with [`up ctx -f`][up-ctx-f]. + +```shell +up ctx ${yourOrganization}/${yourSpace}/${yourGroup}/${yourControlPlane} -f - > ctp-kubeconfig.yaml +``` + +:::tip +To learn more about how to use `up ctx` to navigate different contexts in Upbound, read the [CLI documentation][cli-documentation]. +::: + +## Configuration + +When you create a new control plane, Upbound provides you with a fully isolated instance of Crossplane. Configure your control plane by installing packages that extend its capabilities, like to create and manage the lifecycle of new types of infrastructure resources. + +You're encourage to install any available Crossplane package type (Providers, Configurations, Functions) available in the [Upbound Marketplace][upbound-marketplace] on your control planes. + +### Install packages + +Below are a couple ways to install Crossplane packages on your control plane. + + + + + + +Use the `up` CLI to install Crossplane packages from the [Upbound Marketplace][upbound-marketplace-1] on your control planes. Connect directly to your control plane via `up ctx`. Then, to install a provider: + +```shell +up ctp provider install xpkg.upbound.io/upbound/provider-family-aws +``` + +To install a Configuration: + +```shell +up ctp configuration install xpkg.upbound.io/upbound/platform-ref-aws +``` + +To install a Function: + +```shell +up ctp function install xpkg.upbound.io/crossplane-contrib/function-kcl +``` + + +You can use kubectl to directly apply any Crossplane manifest. Below is an example for installing a Crossplane provider: + +```yaml +cat < + + + +For production-grade scenarios, it's recommended you configure your control plane declaratively via Git plus a Continuous Delivery (CD) Engine such as Argo. guidance on this topic, read [GitOps with control planes][gitops-with-control-planes]. + + + + + + +### Configure Crossplane ProviderConfigs + +#### ProviderConfigs with OpenID Connect + +Use OpenID Connect (`OIDC`) to authenticate to Upbound control planes without credentials. OIDC lets your control plane exchange short-lived tokens directly with your cloud provider. Read how to [connect control planes to external services][connect-control-planes-to-external-services] to learn more. + +#### Generic ProviderConfigs + +The Upbound Console doesn't allow direct editing of ProviderConfigs that don't support `Upbound` authentication. To edit these ProviderConfigs on your control plane, connect to the control plane directly by following the instructions in the previous section and using `kubectl`. + +### Configure secrets + +Upbound gives users the ability to configure the synchronization of secrets from external stores into control planes. Configure this capability at the group-level, explained in the [Spaces documentation][spaces-documentation]. + +### Configure backups + +Upbound gives users the ability to configure backup schedules, take impromptu backups, and conduct self-service restore operations. Configure this capability at the group-level, explained in the [Spaces documentation][spaces-documentation-1]. + +### Configure telemetry + + +Upbound gives users the ability to configure the collection of telemetry (logs, metrics, and traces) in their control planes. Using Upbound's built-in [OTEL][otel] support, you can stream this data out to your preferred observability solution. Configure this capability at the group-level, explained in the [Spaces documentation][spaces-documentation-2]. + + + +[automatic-upgrades]: /spaces/howtos/auto-upgrade +[release-notes]: https://github.com/upbound/universal-crossplane/releases +[control-plane-group]: /spaces/concepts/groups +[space]: /spaces/overview +[up-cli]: /reference/cli-reference +[cli-reference]: /reference/cli-reference +[up-ctx]: /reference/cli-reference +[up-ctx-f]: /reference/cli-reference +[cli-documentation]: /manuals/cli/concepts/contexts +[upbound-marketplace]: https://marketplace.upbound.io +[upbound-marketplace-1]: https://marketplace.upbound.io +[gitops-with-control-planes]: /spaces/howtos/cloud-spaces/gitops +[connect-control-planes-to-external-services]: /manuals/platform/howtos/oidc +[spaces-documentation]: /spaces/howtos/secrets-management +[spaces-documentation-1]: /spaces/howtos/backup-and-restore +[otel]: https://otel.com +[spaces-documentation-2]: /spaces/howtos/observability diff --git a/spaces_versioned_docs/version-v1.9/concepts/deployment-modes.md b/spaces_versioned_docs/version-v1.9/concepts/deployment-modes.md new file mode 100644 index 000000000..f5e718f88 --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/concepts/deployment-modes.md @@ -0,0 +1,53 @@ +--- +title: Deployment Modes +sidebar_position: 10 +description: An overview of deployment modes for Spaces +--- + +Upbound Spaces can be deployed and used in a variety of modes: + +- **Cloud Spaces:** Multi-tenant Upbound-hosted, Upbound-managed Space environment. Cloud Spaces provide a typical SaaS experience. +- **[Dedicated Spaces][dedicated-spaces]:** Single-tenant Upbound-hosted, Upbound-managed Space environment. Dedicated Spaces provide a SaaS experience, with additional isolation guarantees that your workloads run in a fully isolated context. +- **[Managed Spaces][managed-spaces]:** Single-tenant customer-hosted, Upbound-managed Space environment. Managed Spaces provide a SaaS-like experience, with additional guarantees of all hosting infrastructure being served from your own cloud account. +- **[Self-Hosted Spaces][self-hosted-spaces]:** Single-tenant customer-hosted, customer-managed Space environment. This is a fully self-hosted, self-managed software experience for using Spaces. Upbound delivers the Spaces software and you run it yourself. + +The Upbound platform uses a federated model to connect each Space back to a +central service called the [Upbound Console][console], which is deployed and +managed by Upbound. + +By default, customers have access to a set of Cloud Spaces. + +## Supported clouds + +You can use host Upbound Spaces on Amazon Web Services (AWS), Microsoft Azure, +and Google Cloud Platform (GCP). Regardless of the hosting platform, you can use +Spaces to deploy control planes that manage the lifecycle of your resources. + +## Supported regions + +This table lists the cloud service provider regions supported by Upbound. + +### GCP + +| Region | Location | +| --- | --- | +| `us-west-1` | Western US (Oregon) +| `us-central-1` | Central US (Iowa) +| `eu-west-3` | Eastern Europe (Frankfurt) + +### AWS + +| Region | Location | +| --- | --- | +| `us-east-1` | Eastern US (Northern Virginia) + +### Azure + +| Region | Location | +| --- | --- | +| `us-east-1` | Eastern US (Iowa) + +[dedicated-spaces]: /spaces/howtos/cloud-spaces/dedicated-spaces-deployment +[managed-spaces]: /spaces/howtos/self-hosted/managed-spaces-deployment +[self-hosted-spaces]: /spaces/howtos/self-hosted/self-hosted-spaces-deployment +[console]: /manuals/console/upbound-console/ diff --git a/spaces_versioned_docs/version-v1.9/concepts/groups.md b/spaces_versioned_docs/version-v1.9/concepts/groups.md new file mode 100644 index 000000000..d2ccacdb3 --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/concepts/groups.md @@ -0,0 +1,115 @@ +--- +title: Control Plane Groups +sidebar_position: 2 +description: An introduction to the Control Plane Groups in Upbound +plan: "enterprise" +--- + + + +In Upbound, Control Plane Groups (or just, 'groups') are a logical grouping of one or more control planes with shared resources like [secrets][secrets] or [backups][backups]. It's a mechanism for isolating these groups of resources within a single [Space][space]. All role-based access control in Upbound happens at the control plane group-level. + +## When to use multiple groups + +You should use groups in environments where there's a need to have Crossplane manage infrastructure across multiple cloud accounts or projects. For users who only need to deploy and manage resources in a couple cloud accounts, you shouldn't need to think about groups at all. + +Groups are a way to divide access in Upbound between multiple teams. Think of a group as being analogous to a Kubernetes _namespace_. + +## The 'default' group + +Every Cloud Space in Upbound has a group named _default_ available. + +## Working with groups + +### View groups + +You can list groups in a Space using: + +```shell +up group list +``` + +If you're operating in a single-tenant Space and have access to the underlying cluster, you can list namespaces that have the group label: + +```shell +kubectl get namespaces -l spaces.upbound.io/group=true +``` + +### Set the group for a request + +Several commands in _up_ have a group context. To set the group for a request, use the `--group` flag: + +```shell +up ctp list --group=team1 +``` +```shell +up ctp create new-ctp --group=team2 +``` + +### Set the group preference + +The _up_ CLI operates upon a single [Upbound context][upbound-context]. Whatever context gets set is then used as the preference for other commands. An Upbound context is capable of pointing at a variety of altitudes: + +1. A Space in Upbound +2. A group within a Space +3. a control plane within a group + +To set the group preference, use `up ctx` to choose a group as your preferred Upbound context. For example: + +```shell +# This sets the context for the up CLI to the default group in an Upbound-managed Cloud Space (gcp-us-west-1) for an organization called 'acmeco' +up ctx acmeco/upbound-gcp-us-west-1/default/ +``` + +### Create a group + +To create a group, login to Upbound and set your context to your desired Space: + +```shell +up login +up ctx '/' +# Example: up ctx acmeco/upbound-gcp-us-west-1 +``` + + +Create a group: + +```shell +up group create my-new-group +``` + +### Delete a group + +To delete a group, login to Upbound and set your context to your desired Space: + +```shell +up login +up ctx '/' +# Example: up ctx acmeco/upbound-gcp-us-west-1 +``` + +Delete a group: + +```shell +up group delete my-new-group +``` + +### Protected groups + +Once a control plane gets created in a group, Upbound enforces a protection policy on the group. Upbound prevents accidental deletion of the group. To delete a group that has control planes in it, you should first delete all control planes in the group. + +## Groups in the context of single-tenant Spaces + +Upbound offers a variety of deployment models to use the product. If you deploy your own single-tenant Upbound Space (whether connected or disconnected), you're self-hosting Upbound software in a Kubernetes cluster. In these environments, a control plane group maps to a corresponding namespace in the cluster which hosts the Space. + +Most Kubernetes clusters come with some set of predefined namespaces. Because a group maps to a corresponding Kubernetes namespace, whenever a group gets created, there too must be a Kubernetes namespace accordingly. When the Spaces software is newly installed, no groups exist. You _can_ elevate a Kubernetes namespace to become a group by doing the following: + +1. Creating a group with the same name as a preexisting Kubernetes namespace +2. Creating a control plane in a preexisting Kubernetes namespace +3. Labeling a Kubernetes namespace with the label `spaces.upbound.io/group=true` + + +[secrets]: /spaces/howtos/secrets-management +[backups]: /spaces/howtos/self-hosted/workload-id/backup-restore-config/ +[space]: /spaces/overview +[upbound-context]: /manuals/cli/concepts/contexts diff --git a/spaces_versioned_docs/version-v1.9/howtos/_category_.json b/spaces_versioned_docs/version-v1.9/howtos/_category_.json new file mode 100644 index 000000000..d3a8547aa --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/_category_.json @@ -0,0 +1,7 @@ +{ + "label": "How-tos", + "position": 3, + "collapsed": true +} + + diff --git a/spaces_versioned_docs/version-v1.9/howtos/api-connector.md b/spaces_versioned_docs/version-v1.9/howtos/api-connector.md new file mode 100644 index 000000000..a14468f52 --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/api-connector.md @@ -0,0 +1,413 @@ +--- +title: API Connector +weight: 90 +description: Connect Kubernetes clusters to remote Crossplane control planes for resource synchronization +aliases: + - /api-connector + - /concepts/api-connector +--- +:::info API Version Information +This guide covers API Connector, a preview feature available in all supported versions (v1.9-v1.14+). + +For related API specifications and available resources, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +:::warning +API Connector is currently in **Preview**. The feature is under active +development and subject to breaking changes. Use for testing and evaluation +purposes only. +::: + +API Connector enables seamless integration between Kubernetes application +clusters consuming APIs and remote Crossplane control planes providing and +reconciling APIs. + +You can use the API Connector to decouple where Crossplane is running (for +example in an Upbound control plane), and where APIs are consumed +(for example in an existing Kubernetes cluster). This gives you flexibility and +consistency in your control plane operations. + + + +Unlike the [Control Plane Connector](ctp-connector.md) which offers only +coarse-grained connectivity between app clusters and a control plane, API +connector offers fine-grained configuration of which APIs get offered along with +multi-cluster connectivity. + +## Architecture overview + +![API Connector Architecture](/img/api-connector.png) + +API Connector uses a **provider-consumer** model: + +- **Provider control plane**: The Upbound control plane that provides APIs and manages infrastructure. +- **Consumer cluster**: Any Kubernetes cluster where its users wants to use APIs provided by the provider control plane, without having to run Crossplane. API connector gets installed in the consumer cluster, and bidirectionally syncs API objects to the provider. + +### Key components + +**Custom Resource Definitions (CRDs)**: + + +- `ClusterConnection`: Establishes a connection from the consumer to the provider cluster. Pulls bindable CRD APIs from the provider into the consumer cluster for use. + +- `ClusterAPIBinding`: Instructs API connector to sync all API objects cluster-wide with a given API group to a given provider cluster. +- `APIBinding`: Namespaced version of `ClusterAPIBinding`. Instructs API connector to sync API objects within a given namespace and with a given API group to a given provider cluster. + + +## Prerequisites + +Before using API Connector, ensure: + +1. **Consumer cluster** has network access to the provider control plane +1. You have an license to use API connector. If you are unsure, [contact Upbound][contact] or your sales representative. + +This guide walks through how to automate connecting your cluster to an Upbound +control plane. You can also manually configure the API Connector. + +## Publishing APIs in the provider cluster + + + + +First, log in to your provider control plane, and choose which CRD APIs you want +to make accessible to the consumer cluster's. API connector only syncs +these "bindable" CRDs. + + + + + + +Use the `up` CLI to login: + +```bash +up login +``` + +Connect to your control plane: + +```bash +up ctx +``` + +Check what CRDs are available: + +```bash +kubectl get crds +``` + + +Label all CRDs you want to publish with the bindable label: + + +```bash +kubectl label crd 'connect.upbound.io/bindable'='true' --overwrite +``` + + + + +Change context to the provider cluster: +```bash +kubectl config set-context +``` + +Check what CRDs are available: +```bash +kubectl get crds +``` + + +Label all CRDs you want to publish with the bindable label + +```bash +kubectl label crd 'connect.upbound.io/bindable'='true' --overwrite +``` + + + +## Installation + + + + +The up CLI provides the simplest installation method with automatic +configuration: + +Make sure the current Kubeconfig context is set to the **provider control plane** +```bash +up ctx + +up controlplane api-connector install --consumer-kubeconfig [OPTIONS] +``` + +The command: +1. creates a Robot account (named ``) in the Upbound Cloud organization ``, +1. Gives the created robot account `admin` permissions to the provider control plane `` +1. Generates a JWT token for the robot account, and stores it in a Kubernetes Secret in the consumer cluster. +1. Installs the API connector Helm chart in the consumer cluster. +1. Creates a `ClusterConnection` object in the consumer cluster, referring to the newly generated Secret, so that API connector can authenticate successfully to the provider control plane. +1. API connector pulls all published CRDs from the previous step into the consumer cluster. + +**Example**: +```bash +up controlplane api-connector install \ + --consumer-kubeconfig ~/.kube/config \ + --consumer-context my-cluster \ + --upbound-token +``` + +This command uses provided token to authenticate with the **Provider control plane** +and create a `ClusterConnection` resource in the **Consumer cluster** to connect to the +**Provider control plane**. + +**Key Options**: +- `--consumer-kubeconfig`: Path to consumer cluster kubeconfig (required) +- `--consumer-context`: Context name for consumer cluster (required) +- `--name`: Custom name for connection resources (optional) +- `--upbound-token`: API token for authentication (optional) +- `--upgrade`: Upgrade existing installation (optional) +- `--version`: Specific version to install (optional) + + + + +For manual installation or custom configurations: + +```bash +helm upgrade --install api-connector oci://xpkg.upbound.io/spaces-artifacts/api-connector \ + --namespace upbound-system \ + --create-namespace \ + --version \ + --set consumerClusterDisplayName= +``` + +### Authentication methods + +API Connector supports two authentication methods: + + + + +For Upbound Spaces integration: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: spaces-secret + namespace: upbound-system +type: Opaque +stringData: + token: + organization: + spacesBaseURL: + controlPlaneGroupName: + controlPlaneName: +``` + + + +For direct cluster access: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: provider-kubeconfig + namespace: upbound-system +type: Opaque +data: + kubeconfig: +``` + + + + +### Connection setup + +Create a `ClusterConnection` to establish connectivity: + + + + +```yaml +apiVersion: connect.upbound.io/v1alpha1 +kind: ClusterConnection +metadata: + name: spaces-connection + namespace: upbound-system +spec: + secretRef: + kind: UpboundRobotToken + name: spaces-secret + namespace: upbound-system + crdManagement: + pullBehavior: Pull +``` + + + + +```yaml +apiVersion: connect.upbound.io/v1alpha1 +kind: ClusterConnection +metadata: + name: provider-connection + namespace: upbound-system +spec: + secretRef: + kind: KubeConfig + name: provider-kubeconfig + namespace: upbound-system + crdManagement: + pullBehavior: Pull +``` + + + + + + + +### Configuration + +Bind APIs to make them available in your consumer cluster: + +```yaml +apiVersion: connect.upbound.io/v1alpha1 +kind: ClusterAPIBinding +metadata: + name: +spec: + connectionRef: + kind: ClusterConnection + name: # Or --name value +``` + + + + +The `ClusterAPIBinding` name must match the **Resource.Group** (name of the CustomResourceDefinition) of the CRD you want to bind. + + + + +## Usage example + +After configuration, you can create API objects (in the consumer cluster) that +will be synchronized to the provider cluster: + +```yaml +apiVersion: nop.example.org/v1alpha1 +kind: NopResource +metadata: + name: my-resource + namespace: default +spec: + coolField: "Synchronized resource" + compositeDeletePolicy: Foreground +``` + +Verify the resource status: + +```bash +kubectl get nopresource my-resource -o yaml + +``` +When the `APIBound=True` condition is present, it means that the API object has +been synced to the provider cluster, and is being reconciled there. Whenever the +API object in the provider cluster gets status updates (for example +`Ready=True`), that status is synced back to the consumer cluster. + +Switch contexts to the provider cluster to see the API object being created: + +```bash +up ctx +# or kubectl config set-context +``` + +```bash +kubectl get nopresource my-resource -o yaml +``` + +Note that in the provider cluster, the API object is labeled with information on +where the API object originates from, and `connect.upbound.io/managed=true`. + +## Monitoring and troubleshooting + +### Check connection status + +```bash +kubectl get clusterconnection +``` + +Expected output: +``` +NAME STATUS MESSAGE +spaces-connection Ready Provider controlplane is available +``` + +### View available APIs + +```bash +kubectl get clusterconnection spaces-connection -o jsonpath='{.status.offeredAPIs[*].name}' +``` + +### Check API binding status + +```bash +kubectl get clusterapibinding +``` + +### Debug resource synchronization + +```bash +kubectl describe +``` + +## Removal + +### Using the up CLI + +```bash +up controlplane api-connector uninstall \ + --consumer-kubeconfig ~/.kube/config \ + --all +``` + +The `--all` flag removes all resources including connections and secrets. +Without the flag, only runtime related resources won't be removed. + +:::note +Uninstall doesn't remove any API objects in the provider control plane. If you +want to clean up all API objects there, delete all API objects from the consumer +cluster before API connector uninstallation, and wait for the objects to get +deleted. +::: + + +### Using Helm + +```bash +helm uninstall api-connector -n upbound-system +``` + +## Limitations + +- **Preview feature**: Subject to breaking changes. Not yet production grade. +- **CRD updates**: CRDs are pulled once but not automatically updated. If multiple Crossplane clusters offer the same CRD API, API changes must be synchronized out of band, for example using a [Crossplane Configuration](https://docs.crossplane.io/latest/packages/). +- **Network requirements**: Consumer cluster must have direct network access to provider cluster. +- **Wide permissions needed in consumer cluster**: Because the API connector doesn't know up front the names of the APIs it needs to reconcile, it currently runs with full "root" privileges in the consumer cluster. + +- **Connector polling**: API Connector checks for drift between the consumer and provider cluster + periodically through polling. The poll interval can be changed with the `pollInterval` Helm value. + + +## Advanced configuration + +### Multiple connections + +You can connect to multiple provider clusters simultaneously by creating multiple `ClusterConnection` resources with different names and configurations. + +[contact]: https://www.upbound.io/contact-us diff --git a/spaces_versioned_docs/version-v1.9/howtos/auto-upgrade.md b/spaces_versioned_docs/version-v1.9/howtos/auto-upgrade.md new file mode 100644 index 000000000..249056fb4 --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/auto-upgrade.md @@ -0,0 +1,131 @@ +--- +title: Automatically upgrade control planes +sidebar_position: 50 +description: How to configure automatic upgrades of Crossplane in a control plane +plan: "standard" +--- + + + +Upbound Spaces can automatically upgrade the version of Upbound Crossplane in managed control plane instances. You can edit the `spec.crossplane.autoUpgrade` field in your `ControlPlane` specification with the available release channels below. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). The `spec.crossplane.autoUpgrade` field has been available since v1.9. + +For ControlPlane API specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +| Channel | Description | Example | +|------------|-----------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------| +| **None** | Disables auto upgrades. | _Uses version specified in `spec.crossplane.version`._ | +| **Patch** | Upgrades to the latest supported patch release. | _Control plane version 1.12.2-up.2 auto upgrades to 1.12.3-up.1 upon release._ | +| **Stable** | Default setting. Upgrades to the latest supported patch release on minor version _N-1_ where N is the latest supported minor version. | _If latest supported minor version is 1.14, auto upgrades to latest patch - 1.13.2-up.3_ | +| **Rapid** | Upgrades to the latest supported patch release on the latest supported minor version. | _If the latest supported minor version is 1.14, auto upgrades to the latest patch of minor version. 1.14 upgrades to 1.14.5-up.1_ | + + +:::warning + +The `Rapid` channel is only recommended for users willing to accept the risk of new features and potentially breaking changes. + +::: + +## Examples + +The specs below are examples of how to edit the `autoUpgrade` channel in your `ControlPlane` specification. + +To run a control plane with the `Rapid` auto upgrade channel, your spec should look like this: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: example-ctp +spec: + crossplane: + autoUpgrade: + channel: Rapid + writeConnectionSecretToRef: + name: kubeconfig-example-ctp +``` + +To run a control plane with a pinned version of Crossplane, specify in the `version` field: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: example-ctp +spec: + crossplane: + version: 1.14.3-up.1 + autoUpgrade: + channel: None + writeConnectionSecretToRef: + name: kubeconfig-example-ctp +``` + +## Supported Crossplane versions + +Spaces supports the three [preceding minor versions][preceding-minor-versions] from the last supported minor version. example, if the last supported minor version is `1.14`, minor versions `1.13` and `1.12` are also supported. Versions older than the three most recent minor versions aren't supported. Only supported Crossplane versions are valid specifications for new control planes. + +Current Crossplane version support by Spaces version: + +| Spaces Version | Crossplane Version Min | Crossplane Version Max | +|:--------------:|:----------------------:|:----------------------:| +| 1.2 | 1.13 | 1.15 | +| 1.3 | 1.13 | 1.15 | +| 1.4 | 1.14 | 1.16 | +| 1.5 | 1.14 | 1.16 | +| 1.6 | 1.14 | 1.16 | +| 1.7 | 1.14 | 1.16 | +| 1.8 | 1.15 | 1.17 | +| 1.9 | 1.16 | 1.18 | +| 1.10 | 1.16 | 1.18 | +| 1.11 | 1.16 | 1.18 | +| 1.12 | 1.17 | 1.19 | + + +Upbound offers extended support for all installed Crossplane versions released within a 12 month window since the last Spaces release. Contact your Upbound sales representative for more information on version support. + + +:::warning + +If the auto upgrade channel is `Stable` or `Rapid`, the Crossplane version will always stay within the support window after auto upgrade. If set to `Patch` or `None`, the minor version may be outside the support window. You are responsible for upgrading to a supported version + +::: + +To view the support status of a control plane instance, use `kubectl get ctp`. + +```bash +kubectl get ctp +NAME CROSSPLANE VERSION SUPPORTED READY MESSAGE AGE +example-ctp 1.13.2-up.3 True True 31m + +``` + +Unsupported versions return `SUPPORTED: False`. + +```bash +kubectl get ctp +NAME CROSSPLANE VERSION SUPPORTED READY MESSAGE AGE +example-ctp 1.11.5-up.1 False True 31m + +``` + +For more information, use the `-o yaml` flag to return more information. + +```bash +kubectl get controlplanes.spaces.upbound.io example-ctp -o yaml +status: +conditions: +... +- lastTransitionTime: "2024-01-23T06:36:10Z" + message: Crossplane version 1.11.5-up.1 is outside of the support window. + Oldest supported minor version is 1.12. + reason: UnsupportedCrossplaneVersion + status: "False" + type: Supported +``` + + +[preceding-minor-versions]: /reference/usage/lifecycle/#maintenance-and-updates diff --git a/spaces_versioned_docs/version-v1.9/howtos/automation-and-gitops/_category_.json b/spaces_versioned_docs/version-v1.9/howtos/automation-and-gitops/_category_.json new file mode 100644 index 000000000..b65481af6 --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/automation-and-gitops/_category_.json @@ -0,0 +1,8 @@ +{ + "label": "Automation & GitOps", + "position": 11, + "collapsed": true, + "customProps": { + "plan": "business" + } +} diff --git a/spaces_versioned_docs/version-v1.9/howtos/automation-and-gitops/overview.md b/spaces_versioned_docs/version-v1.9/howtos/automation-and-gitops/overview.md new file mode 100644 index 000000000..57eeb15fc --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/automation-and-gitops/overview.md @@ -0,0 +1,138 @@ +--- +title: Automation and GitOps Overview +sidebar_label: Overview +sidebar_position: 1 +description: Guide to automating control plane deployments with GitOps and Argo CD +plan: "business" +--- + +Automating control plane deployments with GitOps enables declarative, version-controlled infrastructure management. This section covers integrating GitOps workflows with Upbound control planes using Argo CD and related tools. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). GitOps patterns are compatible across all versions of Upbound Spaces. + +For details on deployment models and their differences, see the [Deployment Modes](../../concepts/deployment-modes.md) concept guide. version-specific features, see the . +::: + +## What is GitOps? + +GitOps is an approach for managing infrastructure by: +- **Declaratively describing** desired system state in Git +- **Using controllers** to continuously reconcile actual state with desired state +- **Treating Git as the source of truth** for all configuration and deployments + +Upbound control planes are fully compatible with GitOps patterns and we strongly recommend integrating GitOps in the platforms you build on Upbound. + +## Key Concepts + +### Argo CD +[Argo CD](https://argo-cd.readthedocs.io/) is a popular Kubernetes-native GitOps controller. It continuously monitors Git repositories and automatically applies changes to your infrastructure when commits are detected. + +### Deployment Models + +The way you configure GitOps depends on your deployment model: + +| Aspect | Cloud Spaces | Self-Hosted Spaces | +|--------|--------------|-------------------| +| **Access Method** | Upbound API with tokens | Kubernetes native (secrets/kubeconfig) | +| **Configuration** | Kubeconfig via `up` CLI | Control plane connection secrets | +| **Setup Complexity** | More involved (API integration) | Simpler (native Kubernetes) | +| **Typical Use Case** | Managing Upbound resources | Managing workloads on control planes | + +## Getting Started + +**Choose your path based on your deployment model:** + +###. Cloud Spaces +If you're using Upbound Cloud Spaces (Dedicated or Managed): +1. Start with [GitOps with Upbound Control Planes](../cloud-spaces/gitops-on-upbound.md) +2. Learn how to integrate Argo CD with Cloud Spaces +3. Manage both control plane infrastructure and Upbound resources declaratively + +###. Self-Hosted Spaces +If you're running self-hosted Spaces: +1. Start with [GitOps with ArgoCD in Self-Hosted Spaces](../self-hosted/gitops-with-argocd.md) +2. Learn how to configure control plane connection secrets +3. Manage workloads deployed to your control planes + +## Common Workflows + +### Workflow 1: Managing Control Planes with GitOps +Create and manage control planes themselves declaratively using provider-kubernetes: + +```yaml +apiVersion: kubernetes.crossplane.io/v1alpha2 +kind: Object +metadata: + name: my-controlplane +spec: + forProvider: + manifest: + apiVersion: spaces.upbound.io/v1beta1 + kind: ControlPlane + # ... control plane configuration +``` + +### Workflow 2: Managing Workloads on Control Planes +Deploy applications and resources to control planes using standard Kubernetes GitOps patterns: + +```yaml +apiVersion: v1 +kind: Namespace +metadata: + name: my-app +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: my-app + namespace: my-app +# ... deployment configuration +``` + +### Workflow 3: Managing Upbound Resources +Use provider-upbound to manage Upbound IAM and repository resources: + +- Teams +- Robots and their team memberships +- Repositories and permissions + +## Advanced Topics + +### Argo CD Plugin for Upbound +Learn more in the [ArgoCD Plugin guide](../self-hosted/use-argo.md) for enhanced integration with self-hosted Spaces. + +### Declarative Control Plane Creation +See [Declaratively create control planes](../self-hosted/declarative-ctps.md) for advanced automation patterns. + +### Consuming Control Plane APIs +Understand how to [consume control plane APIs in your app cluster](../mcp-connector-guide.md) with Argo CD. + +## Prerequisites + +Before implementing GitOps with control planes, ensure you have: + +**For Cloud Spaces:** +- Access to Upbound Cloud Spaces +- `up` CLI installed and configured +- API token with appropriate permissions +- Argo CD or similar GitOps controller running +- Familiarity with Kubernetes RBAC + +**For Self-Hosted Spaces:** +- Self-hosted Spaces deployed and running +- Argo CD deployed in your infrastructure +- Kubectl access to the cluster hosting Spaces +- Understanding of control plane architecture + +## Next Steps + +1. **Choose your deployment model** above +2. **Review the relevant getting started guide** +3. **Set up your GitOps controller** (Argo CD) +4. **Deploy your first automated control plane** +5. **Explore advanced topics** as needed + +:::tip +Start with simple deployments to test your GitOps workflow before moving to production. Use [simulations](../simulations.md) to preview changes before applying them. +::: diff --git a/spaces_versioned_docs/version-v1.9/howtos/backup-and-restore.md b/spaces_versioned_docs/version-v1.9/howtos/backup-and-restore.md new file mode 100644 index 000000000..3b8d026cb --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/backup-and-restore.md @@ -0,0 +1,530 @@ +--- +title: Backup and restore +sidebar_position: 13 +description: Configure and manage backups in your Upbound Space. +plan: "enterprise" +--- + + + +Upbound's _Shared Backups_ is a built-in backup and restore feature. Shared Backups lets you configure automatic schedules for taking snapshots of your control planes. You can restore data from these backups by making new control planes. This guide explains how to use Shared Backups for disaster recovery or upgrade scenarios. + +:::info API Version Information & Available Versions +This guide applies to **all supported versions** (v1.9-v1.15+). + +**Select your API version**: +- [Spaces API v1.15 (Latest)](../../../next/reference/apis/spaces-api/) +- [Spaces API v1.14](../../../1.14/reference/apis/spaces-api/) +- [Spaces API v1.13](../../../1.13/reference/apis/spaces-api/) +- [Spaces API v1.12](../../../1.12/reference/apis/spaces-api/) +- [Spaces API v1.11](../../../1.11/reference/apis/spaces-api/) +- [Spaces API v1.10](../../../1.10/reference/apis/spaces-api/) +- [Spaces API v1.9](../../../1.9/reference/apis/spaces-api/) + +For version support policy, see . version compatibility details, see the . +::: + +## Benefits + +The Shared Backups feature provides the following benefits: + +* Automatic backups for control planes without any operational overhead +* Backup schedules for multiple control planes in a group +* Shared Backups are available across all hosting environments of Upbound (Disconnected, Connected or Cloud Spaces) + + +## Configure a Shared Backup Config + + +[SharedBackupConfig][sharedbackupconfig] is a [group-scoped][group-scoped] resource. You should create them in a group containing one or more control planes. This resource configures the storage details and provider. Whenever a backup executes (either by schedule or manually initiated), it references a SharedBackupConfig to tell it where store the snapshot. + + +### Backup config provider + + +The `spec.objectStorage.provider` and `spec.objectStorage.config` fields configures: + +* The object storage provider +* The path to the provider +* The credentials needed to communicate with the provider + +You can only set one provider. Upbound currently supports AWS, Azure, and GCP as providers. + + +`spec.objectStorage.config` is a freeform map of configuration options for the object storage provider. See [Thanos object storage][thanos-object-storage] for more information on the formats for each supported cloud provider. `spec.bucket` and `spec.provider` overrides the required values in the config. + + + +#### AWS as a storage provider + +:::important +For Cloud Spaces, static credentials are currently the only supported auth method. +::: + +This example demonstrates how to use AWS as a storage provider for your backups: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupConfig +metadata: + name: default + namespace: default +spec: + objectStorage: + provider: AWS + bucket: spaces-backup-bucket + config: + endpoint: s3.eu-west-2.amazonaws.com + region: eu-west-2 + credentials: + source: Secret + secretRef: + name: bucket-creds + key: creds +``` + + +This example assumes you've already created an S3 bucket called "spaces-backup-bucket" in AWS `eu-west-2` region. The account credentials to access the bucket should exist in a secret of the same namespace as the Shared Backup Config. + +#### Azure as a storage provider + +:::important +For Cloud Spaces, static credentials are currently the only supported auth method. +::: + +This example demonstrates how to use Azure as a storage provider for your backups: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupConfig +metadata: + name: default + namespace: default +spec: + objectStorage: + provider: Azure + bucket: upbound-backups + config: + storage_account: upbackupstore + container: upbound-backups + endpoint: blob.core.windows.net + credentials: + source: Secret + secretRef: + name: bucket-creds + key: creds +``` + + +This example assumes you've already created an Azure storage account called `upbackupstore` and blob `upbound-backups`. The storage account key to access the blob should exist in a secret of the same namespace as the Shared Backup Config. + + +#### GCP as a storage provider + +:::important +For Cloud Spaces, static credentials are currently the only supported auth method. +::: + +This example demonstrates how to use Google Cloud Storage as a storage provider for your backups: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupConfig +metadata: + name: default + namespace: default +spec: + objectStorage: + provider: GCP + bucket: spaces-backup-bucket + credentials: + source: Secret + secretRef: + name: bucket-creds + key: creds +``` + + +This example assumes you've already created a Cloud bucket called "spaces-backup-bucket" and a service account with access to this bucket. The key file should exist in a secret of the same namespace as the Shared Backup Config. + + +## Configure a Shared Backup Schedule + + +[SharedBackupSchedule][sharedbackupschedule] is a [group-scoped][group-scoped-1] resource. You should create them in a group containing one or more control planes. This resource defines a backup schedule for control planes within its corresponding group. + +Below is an example of a Shared Backup Schedule that takes backups every day of all control planes having `environment: production` labels: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: daily-schedule + namespace: default +spec: + schedule: "@daily" + configRef: + kind: SharedBackupConfig + name: default + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +### Define a schedule + +The `spec.schedule` field is a [Cron-formatted][cron-formatted] string. Some common examples are below: + + +| Entry | Description | +| ----------------- | ------------------------------------------------------------------------------------------------- | +| `@hourly` | Run once an hour. | +| `@daily` | Run once a day. | +| `@weekly` | Run once a week. | +| `0 0/4 * * *` | Run every 4 hours. | +| `0/15 * * * 1-5` | Run every fifteenth minute on Monday through Friday. | +| `@every 1h30m10s` | Run every 1 hour, 30 minutes, and 10 seconds. Hour is the largest measurement of time for @every. | + + +### Exclude resources from the backup + +The `spec.excludedResources` field is an array of resource names to exclude from each backup. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: daily-schedule +spec: + excludedResources: + - "xclusters.aws.platformref.upbound.io" + - "xdatabase.aws.platformref.upbound.io" + - "xrolepolicyattachment.iam.aws.crossplane.io" +``` + +:::warning +You must specify resource names in lowercase "resource.group" format (for example, `xclusters.aws.platformref.upbound.io`). Using only the resource kind (for example, `XCluster`) isn't supported. +::: + +### Suspend a schedule + +Use `spec.suspend` field to suspend the schedule. It creates no new backups, but allows running backups to complete. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: daily-schedule +spec: + suspend: true +``` + +### Set the time to live + +Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: daily-schedule +spec: + ttl: 168h # Backup is garbage collected after 7 days +``` +:::tip +By default, this setting doesn't delete uploaded files. Review the next section to define +the deletion policy. +::: + +### Define the deletion policy + +Set the `spec.deletionPolicy` to define backup deletion actions, including the +deletion of the backup file from the bucket. The Deletion Policy value defaults +to `Orphan`. Set it to `Delete` to remove uploaded files in the bucket. more +information on the backup and restore process, review the [Spaces API +documentation][spaces-api-documentation]. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: daily-schedule +spec: + ttl: 168h # Backup is garbage collected after 7 days + deletionPolicy: Delete # Defaults to Orphan +``` + +### Garbage collect backups when the schedule gets deleted + +Set the `spec.useOwnerReferencesInBackup` to garbage collect associated backups when a shared schedule gets deleted. If set to true, backups are garbage collected when the schedule gets deleted. + +### Control plane selection + +To configure which control planes in a group you want to create a backup schedule for, use the `spec.controlPlaneSelector` field. You can either use `labelSelectors` or the `names` of a control plane directly. A control plane matches if any of the label selectors match. + +This example matches all control planes in the group that have `environment: production` as a label: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: my-backup-schedule +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +You can use the more complex `matchExpressions` to match labels based on an expression. This example matches control planes that have label `environment: production` or `environment: staging`: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: my-backup-schedule +spec: + controlPlaneSelector: + labelSelectors: + - matchExpressions: + - { key: environment, operator: In, values: [production,staging] } +``` + +You can also specify the names of control planes directly: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackupSchedule +metadata: + name: my-backup-schedule +spec: + controlPlaneSelector: + names: + - controlplane-dev + - controlplane-staging + - controlplane-prod +``` + + +## Configure a Shared Backup + + + +[SharedBackup][sharedbackup] is a [group-scoped][group-scoped-2] resource. You should create them in a group containing one or more control planes. This resource causes a backups to occur for control planes within its corresponding group. + +Below is an example of a Shared Backup that takes a backup of all control planes having `environment: production` labels: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup + namespace: default +spec: + configRef: + kind: SharedBackupConfig + name: default + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +### Exclude resources from the backup + +The `spec.excludedResources` field is an array of resource names to exclude from each backup. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup +spec: + excludedResources: + - "xclusters.aws.platformref.upbound.io" + - "xdatabase.aws.platformref.upbound.io" + - "xrolepolicyattachment.iam.aws.crossplane.io" +``` + +:::warning +You must specify resource names in lowercase "resource.group" format (for example, `xclusters.aws.platformref.upbound.io`). Using only the resource kind (for example, `XCluster`) isn't supported. +::: + +### Set the time to live + +Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup +spec: + ttl: 168h # Backup is garbage collected after 7 days +``` + + +### Garbage collect backups on Shared Backup deletion + + + +Set the `spec.useOwnerReferencesInBackup` to define whether to garbage collect associated backups when a shared backup gets deleted. If set to true, backups are garbage collected when the shared backup gets deleted. + +### Control plane selection + +To configure which control planes in a group you want to create a backup for, use the `spec.controlPlaneSelector` field. You can either use `labelSelectors` or the `names` of a control plane directly. A control plane matches if any of the label selectors match. + +This example matches all control planes in the group that have `environment: production` as a label: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +You can use the more complex `matchExpressions` to match labels based on an expression. This example matches control planes that have label `environment: production` or `environment: staging`: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup +spec: + controlPlaneSelector: + labelSelectors: + - matchExpressions: + - { key: environment, operator: In, values: [production,staging] } +``` + +You can also specify the names of control planes directly: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedBackup +metadata: + name: my-backup +spec: + controlPlaneSelector: + names: + - controlplane-dev + - controlplane-staging + - controlplane-prod +``` + +## Create a manual backup + +[Backup][backup] is a [group-scoped][group-scoped-3] resource that causes a single backup to occur for a control planes in its corresponding group. + +Below is an example of a manual Backup of a control plane: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: Backup +metadata: + name: my-backup + namespace: default +spec: + configRef: + kind: SharedBackupConfig + name: default + controlPlane: my-awesome-ctp + deletionPolicy: Delete +``` + +The backup specification `DeletionPolicy` defines backup deletion actions, +including the deletion of the backup file from the bucket. The `Deletion Policy` +value defaults to `Orphan`. Set it to `Delete` to remove uploaded files +in the bucket. +For more information on the backup and restore process, review the [Spaces API documentation][spaces-api-documentation-1]. + + +### Choose a control plane to backup + +The `spec.controlPlane` field defines which control plane to execute a backup against. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: Backup +metadata: + name: my-backup + namespace: default +spec: + controlPlane: my-awesome-ctp +``` + +If the control plane doesn't exist, the backup fails after multiple failed retry attempts. + +### Exclude resources from the backup + +The `spec.excludedResources` field is an array of resource names to exclude from the manual backup. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: Backup +metadata: + name: my-backup +spec: + excludedResources: + - "xclusters.aws.platformref.upbound.io" + - "xdatabase.aws.platformref.upbound.io" + - "xrolepolicyattachment.iam.aws.crossplane.io" +``` + +:::warning +You must specify resource names in lowercase "resource.group" format (for example, `xclusters.aws.platformref.upbound.io`). Using only the resource kind (for example, `XCluster`) isn't supported. +::: + +### Set the time to live + +Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: Backup +metadata: + name: my-backup +spec: + ttl: 168h # Backup is garbage collected after 7 days +``` + +## Restore a control plane from a backup + +You can restore a control plane's state from a backup. Below is an example of creating a new control plane from a previous backup called `restore-me`: + + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: my-awesome-restored-ctp + namespace: default +spec: + restore: + source: + kind: Backup + name: restore-me +``` + + +[group-scoped]: /spaces/concepts/groups +[group-scoped-1]: /spaces/concepts/groups +[group-scoped-2]: /spaces/concepts/groups +[group-scoped-3]: /spaces/concepts/groups +[sharedbackupconfig]: /reference/apis/spaces-api/latest +[thanos-object-storage]: https://thanos.io/tip/thanos/storage.md/ +[sharedbackupschedule]: /reference/apis/spaces-api/latest +[cron-formatted]: https://en.wikipedia.org/wiki/Cron +[spaces-api-documentation]: /reference/apis/spaces-api/v1_9 +[sharedbackup]: /reference/apis/spaces-api/latest +[backup]: /reference/apis/spaces-api/latest +[spaces-api-documentation-1]: /reference/apis/spaces-api/v1_9 + + + diff --git a/spaces_versioned_docs/version-v1.9/howtos/cloud-spaces/_category_.json b/spaces_versioned_docs/version-v1.9/howtos/cloud-spaces/_category_.json new file mode 100644 index 000000000..1e1869a38 --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/cloud-spaces/_category_.json @@ -0,0 +1,10 @@ +{ + "label": "Cloud Spaces", + "position": 1, + "collapsed": true, + "customProps": { + "plan": "standard" + } +} + + diff --git a/spaces_versioned_docs/version-v1.9/howtos/cloud-spaces/dedicated-spaces-deployment.md b/spaces_versioned_docs/version-v1.9/howtos/cloud-spaces/dedicated-spaces-deployment.md new file mode 100644 index 000000000..ebad9493e --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/cloud-spaces/dedicated-spaces-deployment.md @@ -0,0 +1,33 @@ +--- +title: Dedicated Spaces +sidebar_position: 4 +description: A guide to Upbound Dedicated Spaces +plan: business +--- + + +## Benefits + +Dedicated Spaces offer the following benefits: + +- **Single-tenancy** A control plane space where Upbound guarantees you're the only tenant operating in the environment. +- **Connectivity to your private network** Establish secure network connections between your Dedicated Cloud Space running in Upbound and your own resources behind your private network. +- **Reduced Overhead.** Offload day-to-day operational burdens to Upbound while focusing on your job of building your platform. + +## Architecture + +A Dedicated Space is a deployment of the Upbound Spaces software inside an +Upbound-controlled cloud account and network. The control planes you run. + +The diagram below illustrates the high-level architecture of Upbound Dedicated Spaces: + +![Upbound Managed Spaces arch](/img/managed-arch-gcp.png) + +## How to get access to Dedicated Spaces + +If you have an interest in Upbound Dedicated Spaces, contact +[Upbound][contact-us]. We can chat more about your +requirements and see if Dedicated Spaces are a good fit for you. + +[contact-us]: https://www.upbound.io/contact-us +[managed-space]: /spaces/howtos/self-hosted/managed-spaces-deployment diff --git a/spaces_versioned_docs/version-v1.9/howtos/cloud-spaces/gitops-on-upbound.md b/spaces_versioned_docs/version-v1.9/howtos/cloud-spaces/gitops-on-upbound.md new file mode 100644 index 000000000..fa59a8dce --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/cloud-spaces/gitops-on-upbound.md @@ -0,0 +1,318 @@ +--- +title: GitOps with Upbound Control Planes +sidebar_position: 80 +description: An introduction to doing GitOps with control planes on Upbound Cloud Spaces +tier: "business" +--- + +:::info Deployment Model +This guide applies to **Upbound Cloud Spaces** (Dedicated and Managed Spaces). For self-hosted Spaces deployments, see [GitOps with ArgoCD in Self-Hosted Spaces](/spaces/howtos/self-hosted/gitops-with-argocd/). +::: + +GitOps is an approach for managing a system by declaratively describing desired resources' configurations in Git and using controllers to realize the desired state. Upbound's control planes are compatible with this pattern and it's strongly recommended you integrate GitOps in the platforms you build on Upbound. + + +## Integrate with Argo CD + + +[Argo CD][argo-cd] is a project in the Kubernetes ecosystem commonly used for GitOps. You can use it in tandem with Upbound control planes to achieve GitOps flows. The sections below explain how to integrate these tools with Upbound. + +### Generate a kubeconfig for your control plane + +Use the up CLI to [generate a kubeconfig][generate-a-kubeconfig] for your control plane. + +```bash +up ctx /// -f - > context.yaml +``` + +### Create an API token + + +You need a personal access token (PAT). You create PATs on a per-user basis in the Upbound Console. Go to [My Account - API tokens][my-account-api-tokens] and select Create New Token. Give the token a name and save the secret value to somewhere safe. + + +### Add the up CLI init container to Argo + +Create a new file called `up-plugin-values.yaml` and paste the following YAML: + +```yaml +controller: + volumes: + - name: up-plugin + emptyDir: {} + - name: up-home + emptyDir: {} + + volumeMounts: + - name: up-plugin + mountPath: /usr/local/bin/up + subPath: up + - name: up-home + mountPath: /home/argocd/.up + + initContainers: + - name: up-plugin + image: xpkg.upbound.io/upbound/up-cli:v0.39.0 + command: ["cp"] + args: + - /usr/local/bin/up + - /plugin/up + volumeMounts: + - name: up-plugin + mountPath: /plugin + +server: + volumes: + - name: up-plugin + emptyDir: {} + - name: up-home + emptyDir: {} + + volumeMounts: + - name: up-plugin + mountPath: /usr/local/bin/up + subPath: up + - name: up-home + mountPath: /home/argocd/.up + + initContainers: + - name: up-plugin + image: xpkg.upbound.io/upbound/up-cli:v0.39.0 + command: ["cp"] + args: + - /usr/local/bin/up + - /plugin/up + volumeMounts: + - name: up-plugin + mountPath: /plugin +``` + +### Install or upgrade Argo using the values file + +Install or upgrade Argo via Helm, including the values from the `up-plugin-values.yaml` file: + +```bash +helm upgrade --install -n argocd -f up-plugin-values.yaml --reuse-values argocd argo/argo-cd +``` + + +### Configure Argo CD + + +To configure Argo CD for Annotation resource tracking, edit the Argo CD ConfigMap in the Argo CD namespace. +Add `application.resourceTrackingMethod: annotation` to the data section as below. +This configuration turns off Argo CD auto pruning, preventing the deletion of Crossplane resources. + +Next, configure the [auto respect RBAC for the Argo CD controller][auto-respect-rbac-for-the-argo-cd-controller]. +By default, Argo CD attempts to discover some Kubernetes resource types that don't exist in a control plane. +You must configure Argo CD to respect the cluster's RBAC rules so that Argo CD can sync. +Add `resource.respectRBAC: normal` to the data section as below. + +```bash +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-cm +data: + ... + application.resourceTrackingMethod: annotation + resource.respectRBAC: normal +``` + +:::tip +The `resource.respectRBAC` configuration above tells Argo to respect RBAC for _all_ cluster contexts. If you're using an Argo CD instance to manage more than only control planes, you should consider changing the `clusters` string match for the configuration to apply only to control planes. For example, if every control plane context name followed the convention of being named `controlplane-`, you could set the string match to be `controlplane-*` +::: + + +### Create a cluster context definition + + +Replace the variables and run the following script to configure a new Argo cluster context definition. + +To configure Argo for a control plane in a Connected Space, replace `stringData.server` with the ingress URL of the control plane. This URL is what's outputted when using `up ctx`. + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: my-control-plane + namespace: argocd + labels: + argocd.argoproj.io/secret-type: cluster +type: Opaque +stringData: + name: my-control-plane-context + server: https://.spaces.upbound.io/apis/spaces.upbound.io/v1beta1/namespaces//controlplanes//k8s + config: | + { + "execProviderConfig": { + "apiVersion": "client.authentication.k8s.io/v1", + "command": "up", + "args": [ "org", "token" ], + "env": { + "ORGANIZATION": "", + "UP_TOKEN": "" + } + }, + "tlsClientConfig": { + "insecure": false, + "caData": "" + } + } +``` + + +## GitOps for Upbound resources + + +Like any other cloud service, you can drive the lifecycle of Upbound Cloud resources with Crossplane. This lets you establish GitOps flows to declaratively create and manage: + +- [control plane groups][control-plane-groups] +- [control planes][control-planes] +- [Upbound IAM resources][upbound-iam-resources] + +Use a control plane installed with [provider-upbound][provider-upbound] and [provider-kubernetes][provider-kubernetes] to achieve this. + +### Provider-upbound + +[Provider-upbound][provider-upbound-2] is a Crossplane provider built by Upbound to interact with Upbound resources. use _provider-upbound_ to declaratively create and manage the lifecycle of IAM resources and repositories: + +- [Robots][robots] and their membership to teams +- [Teams][teams] +- [Repositories][repositories] and [permissions][permissions] on those repositories. + +:::tip +This provider defines managed resources for control planes, their auth, and permissions. These resources only applicable for customers who run in Upbound's **Legacy Spaces** control plane hosting environments. Customers should use provider-kubernetes explained below to manage the lifecycle of control planes with Crossplane. +::: + +### Provider-kubernetes + +[Provider-kubernetes][provider-kubernetes-3] is a Crossplane provider that defines an [Object][object] resource. Use _Objects_ as general-purpose resources to wrap _any_ Kubernetes resource for Crossplane to manage. + +Upbound [Space APIs][space-apis] are Kube-like APIs and have implemented support for most Kubernetes-style API concepts. You can use kubectl or any other Kubernetes-compatible tooling to interact with the API. This means you can use _provider-kubernetes_ to drive interactions with Space APIs. + +:::warning +When interacting with a Cloud Space's API, the Kubernetes [watch][watch] feature **isn't implemented.** Argo CD requires _watch_ support to function as expected, meaning you can't point Argo directly at a Cloud Space until it's implemented. +::: + +Use _provider-kubernetes_ to declaratively drive interactions with all [Space APIs][space-apis-1]. Wrap the desired API resource in an _Object_. See the example below for a control plane: + +```yaml +apiVersion: kubernetes.crossplane.io/v1alpha2 +kind: Object +metadata: + name: my-controlplane +spec: + forProvider: + manifest: + apiVersion: spaces.upbound.io/v1beta1 + kind: ControlPlane + metadata: + name: my-controlplane + namespace: default + spec: + crossplane: + autoUpgrade: + channel: Rapid +``` + +[Control plane groups][control-plane-groups-2] are a special case because they technically map to an underlying Kubernetes namespace. You should create a `kind: namespace` with the `spaces.upbound.io/group` label to create a control plane group in a Space. See the example below: + +```yaml +apiVersion: kubernetes.crossplane.io/v1alpha2 +kind: Object +metadata: + name: group1 +spec: + forProvider: + manifest: + apiVersion: v1 + kind: Namespace + metadata: + name: group1 + labels: + spaces.upbound.io/group: "true" + spec: {} +``` + +### Configure auth for provider-kubernetes + +Like any other Crossplane provider, _provider-kubernetes_ requires a valid [ProviderConfig][providerconfig] to authenticate with Upbound before interacting with its APIs. Follow the steps below to configure auth for a ProviderConfig on a control plane that you want to use to interact with Upbound resources. + +1. Define an environment variable for the name of your Upbound org account. Use `up org list` to retrieve this value. +```ini +export UPBOUND_ACCOUNT="" +``` + +2. Create a [personal access token][personal-access-token] and store it as an environment variable. +```shell +export UPBOUND_TOKEN="" +``` + +3. Log on to Upbound. +```shell +up login +``` + +4. Create a kubeconfig for the desired Cloud Space instance you want to interact with. +```shell +export CONTROLPLANE_CONFIG=/tmp/controlplane-kubeconfig +KUBECONFIG=$CONTROLPLANE_CONFIG up ctx $UPBOUND_ACCOUNT/upbound-gcp-us-west-1 # Replace this path with whichever Cloud Space you want to communicate with. +``` + +5. On the control plane you want to use to interact with Upbound resources, create a secret containing the credentials: +```shell +kubectl -n crossplane-system create secret generic cluster-config --from-file=kubeconfig=$CONTROLPLANE_CONFIG +kubectl -n crossplane-system create secret generic upbound-credentials --from-literal=token=$UPBOUND_TOKEN +``` + +6. Create a ProviderConfig that references the credentials created in the prior step. Create this resource in your control plane: +```yaml +apiVersion: kubernetes.crossplane.io/v1alpha1 +kind: ProviderConfig +metadata: + name: default +spec: + credentials: + source: Secret + secretRef: + namespace: crossplane-system + name: cluster-config + key: kubeconfig + identity: + type: UpboundTokens + source: Secret + secretRef: + name: upbound-credentials + namespace: crossplane-system + key: token +``` + +You can now create _Objects_ in the control plane which wrap Space APIs. + +[generate-a-kubeconfig]: /manuals/cli/concepts/contexts +[control-plane-groups]: /spaces/concepts/groups +[control-planes]: /spaces/concepts/control-planes +[upbound-iam-resources]: /manuals/platform/concepts/identity-management +[space-apis]: /reference/apis/spaces-api/v1_9 +[space-apis-1]: /reference/apis/spaces-api/v1_9 +[control-plane-groups-2]: /spaces/concepts/groups + + +[argo-cd]: https://argo-cd.readthedocs.io/en/stable/ +[my-account-api-tokens]: https://accounts.upbound.io/settings/tokens +[auto-respect-rbac-for-the-argo-cd-controller]: https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#auto-respect-rbac-for-controller +[spec-writeconnectionsecrettoref]: /reference/apis/spaces-api/latest +[auto-respect-rbac-for-the-argo-cd-controller-1]: https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#auto-respect-rbac-for-controller +[provider-upbound]: https://marketplace.upbound.io/providers/upbound/provider-upbound +[provider-kubernetes]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes +[provider-upbound-2]: https://marketplace.upbound.io/providers/upbound/provider-upbound +[robots]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/iam.upbound.io/Robot/v1alpha1 +[teams]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/iam.upbound.io/Team/v1alpha1 +[repositories]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/repository.upbound.io/Repository/v1alpha1 +[permissions]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/repository.upbound.io/Permission/v1alpha1 +[provider-kubernetes-3]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes +[object]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes/v0.17.0/resources/kubernetes.crossplane.io/Object/v1alpha2 +[watch]: https://kubernetes.io/docs/reference/using-api/api-concepts/#watch-bookmarks +[providerconfig]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes/v0.17.0/resources/kubernetes.crossplane.io/ProviderConfig/v1alpha1 +[personal-access-token]: https://accounts.upbound.io/settings/tokens diff --git a/spaces_versioned_docs/version-v1.9/howtos/control-plane-topologies.md b/spaces_versioned_docs/version-v1.9/howtos/control-plane-topologies.md new file mode 100644 index 000000000..9020e5a41 --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/control-plane-topologies.md @@ -0,0 +1,566 @@ +--- +title: Control Plane Topologies +sidebar_position: 15 +description: Configure scheduling of composites to remote control planes +--- + +:::info API Version Information +This guide is for the Control Plane Topology feature, which is in **private preview**. interested customers with access to this feature, it applies to v1.12+. + +For related API specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). details on feature availability, see the . +::: + +:::important +This feature is in private preview for select customers in Upbound Spaces. If you're interested in this deployment mode, please [contact us](https://www.upbound.io/support/contact). +::: + +Upbound's _Control Plane Topology_ feature lets you build and deploy a platform +of multiple control planes. These control planes work together for a unified platform +experience. + + +With the _Topology_ feature, you can install resource APIs that are +reconciled by other control planes and configure the routing that occurs between +control planes. You can also build compositions that reference other resources +running on your control plane or elsewhere in Upbound. + +This guide explains how to use Control Plane Topology APIs to install, configure +remote APIs, and build powerful compositions that reference other resources. + +## Benefits + +The Control Plane Topology feature provides the following benefits: + +* Decouple your platform architecture into independent offerings to improve your platform's software development lifecycle. +* Install composite APIs from Configurations as CRDs which are fulfilled and reconciled by other control planes. +* Route APIs to other control planes by configuring an _Environment_ resource, which define a set of routable dimensions. + +## How it works + + +Imagine the scenario where you want to let a user reference a subnet when creating a database instance. To your control plane, the `kind: database` and `kind: subnet` are independent resources. To you as the composition author, these resources have an important relationship. It may be that: + +- you don't want your user to ever be able to create a database without specifying a subnet. +- you want to let them create a subnet when they create the database, if it doesn't exist. +- you want to allow them to reuse a subnet that got created elsewhere or gets shared by another user. + +In each of these scenarios, you must resort to writing complex composition logic +to handle each case. The problem is compounded when the resource exists in a +context separate from the current control plane's context. Imagine a scenario +where one control plane manages Database resources and a second control plane +manages networking resources. With the _Topology_ feature, you can offload these +concerns to Upbound machinery. + + +![Control Plane Topology feature arch](/img/topology-arch.png) + +## Prerequisites + +Enable the Control Plane Topology feature in the Space you plan to run your control plane in: + +- Cloud Spaces: Not available yet +- Connected Spaces: Space administrator must enable this feature +- Disconnected Spaces: Space administrator must enable this feature + + + +## Compose resources with _ReferencedObjects_ + + + +_ReferencedObject_ is a resource type available in an Upbound control plane that lets you reference other Kubernetes resources in Upbound. + +:::tip +This feature is useful for composing resources that exist in a +remote context, like another control plane. You can also use +_ReferencedObjects_ to resolve references to any other Kubernetes object +in the current control plane context. This could be a secret, another Crossplane +resource, or more. +::: + +### Declare the resource reference in your XRD + +To compose a _ReferencedObject_, you should start by adding a resource reference +in your Composite Resource Definition (XRD). The convention for the resource +reference follows the shape shown below: + +```yaml +Ref: + type: object + properties: + apiVersion: + type: string + default: "" + enum: [ "" ] + kind: + type: string + default: "" + enum: [ "" ] + grants: + type: array + default: [ "Observe" ] + items: + type: string + enum: [ "Observe", "Create", "Update", "Delete", "*" ] + name: + type: string + namespace: + type: string + required: + - name +``` + +The `Ref` should be the kind of resource you want to reference. The `apiVersion` and `kind` should be the associated API version and kind of the resource you want to reference. + +The `name` and `namespace` strings are inputs that let your users specify the resource instance. + +#### Grants + +The `grants` field is a special array that lets you give users the power to influence the behavior of the referenced resource. You can configure which of the available grants you let your user select and which it defaults to. Similar in behavior as [Crossplane management policies][crossplane-management-policies], each grant value does the following: + +- **Observe:** The composite may observe the state of the referenced resource. +- **Create:** The composite may create the referenced resource if it doesn't exist. +- **Update:** The composite may update the referenced resource. +- **Delete:** The composite may delete the referenced resource. +- **\*:** The composite has full control over the referenced resource. + +Here are some examples that show how it looks in practice: + +
+ +Show example for defining the reference to another composite resource + +```yaml +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xsqlinstances.database.platform.upbound.io +spec: + type: object + properties: + parameters: + type: object + properties: + networkRef: + type: object + properties: + apiVersion: + type: string + default: "networking.platform.upbound.io" + enum: [ "networking.platform.upbound.io" ] + grants: + type: array + default: [ "Observe" ] + items: + type: string + enum: [ "Observe" ] + kind: + type: string + default: "Network" + enum: [ "Network" ] + name: + type: string + namespace: + type: string + required: + - name +``` + +
+ + +
+Show example for defining the reference to a secret +```yaml +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xsqlinstances.database.platform.upbound.io +spec: + type: object + properties: + parameters: + type: object + properties: + secretRef: + type: object + properties: + apiVersion: + type: string + default: "v1" + enum: [ "v1" ] + grants: + type: array + default: [ "Observe" ] + items: + type: string + enum: [ "Observe", "Create", "Update", "Delete", "*" ] + kind: + type: string + default: "Secret" + enum: [ "Secret" ] + name: + type: string + namespace: + type: string + required: + - name +``` +
+ +### Manually add the jsonPath + +:::important +This step is a known limitation of the preview. We're working on tooling that +removes the need for authors to do this step. +::: + +During the preview timeframe of this feature, you must add an annotation by hand +to the XRD. In your XRD's `metadata.annotations`, set the +`references.upbound.io/schema` annotation. It should be a JSON string in the +following format: + +```json +{ + "apiVersion": "references.upbound.io/v1alpha1", + "kind": "ReferenceSchema", + "references": [ + { + "jsonPath": ".spec.parameters.secretRef", + "kinds": [ + { + "apiVersion": "v1", + "kind": "Secret" + } + ] + } + ] +} +``` + +Flatten this JSON into a string and set the annotation on your XRD. View the +example below for an illustration: + +
+Show example setting the references.upbound.io/schema annotation +```yaml +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xthings.networking.acme.com + annotations: + references.upbound.io/schema: '{"apiVersion":"references.upbound.io/v1alpha1","kind":"ReferenceSchema","references":[{"jsonPath":".spec.secretRef","kinds":[{"apiVersion":"v1","kind":"Secret"}]},{"jsonPath":".spec.configMapRef","kinds":[{"apiVersion":"v1","kind":"ConfigMap"}]}]}' +``` +
+ +
+Show example for setting multiples references in the references.upbound.io/schema annotation +```yaml +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xthings.networking.acme.com + annotations: + references.upbound.io/schema: '{"apiVersion":"references.upbound.io/v1alpha1","kind":"ReferenceSchema","references":[{"jsonPath":".spec.parameters.secretRef","kinds":[{"apiVersion":"v1","kind":"Secret"}]},{"jsonPath":".spec.parameters.configMapRef","kinds":[{"apiVersion":"v1","kind":"ConfigMap"}]}]}' +``` +
+ + +You can use a VSCode extension like [vscode-pretty-json][vscode-pretty-json] to make this task easier. + + +### Compose a _ReferencedObject_ + +To pair with the resource reference declared in your XRD, you must compose the referenced resource. Use the _ReferencedObject_ resource type to bring the resource into your composition. _ReferencedObject_ has the following schema: + +```yaml +apiVersion: references.upbound.io/v1alpha1 +kind: ReferencedObject +spec: + managementPolicies: + - Observe + deletionPolicy: Orphan + composite: + apiVersion: + kind: + name: + jsonPath: .spec.parameters.secretRef +``` + +The `spec.composite.apiVersion` and `spec.composite.kind` should match the API version and kind of the `compositeTypeRef` declared in your composition. The `spec.composite.name` should be the name of the composite resource instance. + +The `spec.composite.jsonPath` should be the path to the root of the resource ref you declared in your XRD. + +
+Show example for composing a resource reference to a secret + +```yaml +apiVersion: apiextensions.crossplane.io/v1 +kind: Composition +metadata: + name: demo-composition +spec: + compositeTypeRef: + apiVersion: networking.acme.com/v1alpha1 + kind: XThing + mode: Pipeline + pipeline: + - step: patch-and-transform + functionRef: + name: crossplane-contrib-function-patch-and-transform + input: + apiVersion: pt.fn.crossplane.io/v1beta1 + kind: Resources + resources: + - name: secret-ref-object + base: + apiVersion: references.upbound.io/v1alpha1 + kind: ReferencedObject + spec: + managementPolicies: + - Observe + deletionPolicy: Orphan + composite: + apiVersion: networking.acme.com/v1alpha1 + kind: XThing + name: TO_BE_PATCHED + jsonPath: .spec.parameters.secretRef + patches: + - type: FromCompositeFieldPath + fromFieldPath: metadata.name + toFieldPath: spec.composite.name +``` +
+ +By declaring a resource reference in your XRD, Upbound handles resolution of the desired resource. + +## Deploy APIs + +To configure routing resource requests between control planes, you need to deploy APIs in at least two control planes. + +### Deploy into a service-level control plane + +Package the APIs you build into a Configuration package an deploy it on a +control plane in an Upbound Space. In Upbound, it's common to refer to the +control plane where the Configuration package is deployed as a **service-level +control plane**. This control plane runs the controllers that processes the API +requests and provisions underlying resources. In a later section, you learn how +you can use _Topology_ features to [configure routing][configure-routing]. + +### Deploy as Remote APIs on a platform control plane + +You should use the same package source as deployed in the **service-level +control planes**, but this time deploy the Configuration in a separate control +plane as a _RemoteConfiguration_. The _RemoteConfiguration_ installs Kubernetes +CustomResourceDefinitions for the APIs defined in the Configuration package, but +no controllers get deployed. + +### Install a _RemoteConfiguration_ + +_RemoteConfiguration_ is a resource type available in an Upbound manage control +planes that acts like a sort of Crossplane [Configuration][configuration] +package. Unlike standard Crossplane Configurations, which install XRDs, +compositions, and functions into a desired control plane, _RemoteConfigurations_ +install only the CRDs for claimable composite resource types. + +#### Install directly + +Install a _RemoteConfiguration_ by defining the following and applying it to +your control plane: + +```yaml +apiVersion: pkg.upbound.io/v1alpha1 +kind: RemoteConfiguration +metadata: + name: +spec: + package: +``` + +#### Declare as a project dependency + +You can declare _RemoteConfigurations_ as dependencies in your control plane's +[project file][project-file]. Use the up CLI to add the dependency, providing +the `--remote` flag: + +```tsx live +up dep add --remote +``` + +This command adds a declaration in the `spec.apiDependencies` stanza of your +project's `upbound.yaml` as demonstrated below: + +```yaml +apiVersion: meta.dev.upbound.io/v1alpha1 +kind: Project +metadata: + name: service-controlplane +spec: + apiDependencies: + - configuration: xpkg.upbound.io/upbound/remote-configuration + version: '>=v0.0.0' + dependsOn: + - provider: xpkg.upbound.io/upbound/provider-kubernetes + version: '>=v0.0.0' +``` + +Like a Configuration, a _RemoteConfigurationRevision_ gets created when the +package gets installed on a control plane. Unlike Configurations, XRDs and +compositions **don't** get installed by a _RemoteConfiguration_. Only the CRDs +for claimable composite types get installed and Crossplane thereafter manages +their lifecycle. You can tell when a CRD gets installed by a +_RemoteConfiguration_ because it has the `internal.scheduling.upbound.io/remote: +true` label: + +```yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: things.networking.acme.com + labels: + internal.scheduling.upbound.io/remote: "true" +``` + +## Use an _Environment_ to route resources + +_Environment_ is a resource type available in Upbound control planes that works +in tandem with resources installed by _RemoteConfigurations_. _Environment_ is a +namespace-scoped resource that lets you configure how to route remote resources +to other control planes by a set of user-defined dimensions. + +### Define a routing dimension + +To establish a routing dimensions between two control planes, you must do two +things: + +1. Annotate the service control plane with the name and value of a dimension. +2. Configure an environment on another control plane with a dimension matching the field and value of the service control plane. + +The example below demonstrates the creation of a service control plane with a +`region` dimension: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + labels: + dimension.scheduling.upbound.io/region: "us-east-1" + name: prod-1 + namespace: default +spec: +``` + +Upbound's Spaces controller keeps an inventory of all declared dimensions and +listens for control planes to route to them. + +### Create an _Environment_ + +Next, create an _Environment_ on a separate control plane, referencing the +dimension from before. The example below demonstrates routing all remote +resource requests in the `default` namespace of the control plane based on a +single `region` dimension: + +```yaml +apiVersion: scheduling.upbound.io/v1alpha1 +kind: Environment +metadata: + name: default + namespace: default +spec: + dimensions: + region: us-east-1 +``` + +You can specify whichever dimensions as you want. The example below demonstrates +multiple dimensions: + +```yaml +apiVersion: scheduling.upbound.io/v1alpha1 +kind: Environment +metadata: + name: default + namespace: default +spec: + dimensions: + region: us-east-1 + env: prod + offering: databases +``` + +In order for the routing controller to match, _all_ dimensions must match for a +given service control plane. + +You can specify dimension overrides on a per-resource group basis. This lets you +configure default routing rules for a given _Environment_ and override routing +on a per-offering basis. + +```yaml +apiVersion: scheduling.upbound.io/v1alpha1 +kind: Environment +metadata: + name: default + namespace: default +spec: + dimensions: + region: us-east-1 + resourceGroups: + - name: database.platform.upbound.io # database + dimensions: + region: "us-east-1" + env: "prod" + offering: "databases" + - name: networking.platform.upbound.io # networks + dimensions: + region: "us-east-1" + env: "prod" + offering: "networks" +``` + +### Confirm the configured route + +After you create an _Environment_ on a control plane, the routes selected get +reported in the _Environment's_ `.status.resourceGroups`. This is illustrated +below: + +```yaml +apiVersion: scheduling.upbound.io/v1alpha1 +kind: Environment +metadata: + name: default +... +status: + resourceGroups: + - name: database.platform.upbound.io # database + proposed: + controlPlane: ctp-1 + group: default + space: upbound-gcp-us-central1 + dimensions: + region: "us-east-1" + env: "prod" + offering: "databases" +``` + +If you don't see a response in the `.status.resourceGroups`, this indicates a +match wasn't found or an error establishing routing occurred. + +:::tip +There's no limit to the number of control planes you can route to. You can also +stack routing and form your own topology of control planes, with multiple layers +of routing. +::: + +### Limitations + + +Routing from one control plane to another is currently scoped to control planes +that exist in a single Space. You can't route resource requests to control +planes that exist on a cross-Space boundary. + + +[project-file]: /manuals/cli/howtos/project +[contact-us]: https://www.upbound.io/usage/support/contact +[crossplane-management-policies]: https://docs.crossplane.io/latest/managed-resources/managed-resources/#managementpolicies +[vscode-pretty-json]: https://marketplace.visualstudio.com/items?itemName=chrismeyers.vscode-pretty-json +[configure-routing]: #use-an-environment-to-route-resources +[configuration]: https://docs.crossplane.io/latest/packages/providers diff --git a/spaces_versioned_docs/version-v1.9/howtos/ctp-connector.md b/spaces_versioned_docs/version-v1.9/howtos/ctp-connector.md new file mode 100644 index 000000000..b2cc48c49 --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/ctp-connector.md @@ -0,0 +1,508 @@ +--- +title: Control Plane Connector +weight: 80 +description: A guide for how to connect a Kubernetes app cluster to a control plane in Upbound using the Control Plane connector feature +plan: "standard" +--- + + + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). Control Plane Connector functionality is consistent across versions. + +For related API specifications and resources, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +Control Plane Connector connects arbitrary Kubernetes application clusters outside the +Upbound Spaces to your control planes running in Upbound Spaces. +This lets you interact with your control plane's API from the app cluster. The claim APIs and the namespaced XR APIs +you define via CompositeResourceDefinitions (XRDs) in the control plane, are available in +your app cluster alongside Kubernetes workload APIs like Pod. Control Plane Connector +enables the same experience as a locally installed Crossplane. + +![control plane connector operations flow](/img/ConnectorFlow.png) + +### Control Plane Connector operations + +Control Plane Connector leverages the [Kubernetes API AggregationLayer][kubernetes-api-aggregationlayer] +to create an extension API server and serve the claim APIs and the namespaced XR APIs in the control plane. It +discovers the claim APIs and the namespaced XR APIs available in the control plane and registers corresponding +APIService resources on the app cluster. Those APIService resources refer to the +extension API server of Control Plane Connector. + +The claim APIs and the namespaced XR APIs are available in your Kubernetes cluster, just like all native +Kubernetes APIs. + +The Control Plane Connector processes every request targeting the claim APIs and the namespaced XR APIs and makes the +relevant requests to the connected control plane. + +Only the connected control plane stores and processes all claims and namespaced XRs created in the app +cluster, eliminating any storage use at the application cluster. The control plane +connector provisions a target namespace at the control plane for the app cluster and stores +all claims and namespaced XRs in this target namespace. + +For managing the claims and namespaced XRs, the Control Plane Connector creates a unique identifier for a +resource by combining input parameters from claims, including: +- `metadata.name` +- `metadata.namespace` +- `your cluster name` + + +It employs SHA-256 hashing to generate a hash value and then extracts the first +16 characters of that hash. This ensures the resulting identifier remains within +the 64-character limit in Kubernetes. + + + +For instance, if a claim named `my-bucket` exists in the test namespace in +`cluster-dev`, the system calculates the SHA-256 hash from +`my-bucket-x-test-x-00000000-0000-0000-0000-000000000000` and takes the first 16 +characters. The control plane side then names the claim `claim-c603e518969b413e`. + +For namespaced XRs, the process is similar, only the prefix is different. +The name becomes `nxr-c603e518969b413e`. + + +### Installation + + + + + +Log in with the up CLI: + +```bash +up login +``` + +Connect your app cluster to a namespace in an Upbound control plane with `up controlplane connector install `. This command creates a user token and installs the Control Plane Connector to your cluster. It's recommended you create a values file called `connector-values.yaml` and provide the following below. Select the tab according to which environment your control plane is running in. + + + + + + +```yaml +upbound: + # This is your org account in Upbound e.g. the name displayed after executing `up org list` + account: + # This is a personal access token generated in the Upbound Console + token: + +spaces: + # If your control plane is running in Upbound's GCP Cloud Space, else use upbound-aws-us-east-1.spaces.upbound.io + host: "upbound-gcp-us-west-1.spaces.upbound.io" + insecureSkipTLSVerify: true + controlPlane: + # The name of the control plane you want the Connector to attach to + name: + # The control plane group the control plane resides in + group: + # The namespace within the control plane to sync claims from the app cluster to. NOTE: This must be created before you install the connector. + claimNamespace: +``` + + + + + +1. Create a [kubeconfig][kubeconfig] for the control plane. Update your Upbound context to the path for your desired control plane. +```ini +up login +up ctx /upbound-gcp-us-central-1/default/your-control-plane +up ctx . -f - > context.yaml +``` + +2. Write it to a secret in the cluster where you plan to +install the Control Plane Connector to. +```ini +kubectl create secret generic my-controlplane-kubeconfig --from-file=context.yaml +``` + +3. Reference this secret in the +`spaces.controlPlane.kubeconfigSecret` field below. + +```yaml +spaces: + controlPlane: + # The namespace within the control plane to sync claims from the app cluster to. NOTE: This must be created before you install the connector. + claimNamespace: + kubeconfigSecret: + name: my-controlplane-kubeconfig + key: kubeconfig +``` + + + + + + +Provide the values file above when you run the CLI command: + + +```bash {copy-lines="3"} +up controlplane connector install my-control-plane my-app-ns-1 --file=connector-values.yaml +``` + +The Claim APIs and the namespaced XR APIs from your control plane are now visible in the cluster. +You can verify this with `kubectl api-resources`. + +```bash +kubectl api-resources +``` + +### Uninstall + +Disconnect an app cluster that you prior installed the Control Plane Connector on by +running the following: + +```bash +up ctp connector uninstall +``` + +This command uninstalls the helm chart for the Control Plane Connector from an app +cluster. It moves any claims in the app cluster into the control plane +at the specified namespace. + +:::tip +Make sure your kubeconfig's current context is pointed at the app cluster where +you want to uninstall Control Plane Connector from. +::: + + + + +It's recommended you create a values file called `connector-values.yaml` and +provide the following below. Select the tab according to which environment your +control plane is running in. + + + + + + +```yaml +upbound: + # This is your org account in Upbound e.g. the name displayed after executing `up org list` + account: + # This is a personal access token generated in the Upbound Console + token: + +spaces: + # Upbound GCP US-West-1 upbound-gcp-us-west-1.spaces.upbound.io + # Upbound AWS US-East-1 upbound-aws-us-east-1.spaces.upbound.io + # Upbound GCP US-Central-1 upbound-gcp-us-central-1.spaces.upbound.io + host: "" + insecureSkipTLSVerify: true + controlPlane: + # The name of the control plane you want the Connector to attach to + name: + # The control plane group the control plane resides in + group: + # The namespace within the control plane to sync claims from the app cluster to. + # NOTE: This must be created before you install the connector. + claimNamespace: +``` + + + + +Create a [kubeconfig][kubeconfig-1] for the +control plane. Write it to a secret in the cluster where you plan to +install the Control Plane Connector to. Reference this secret in the +`spaces.controlPlane.kubeconfigSecret` field below. + +```yaml +spaces: + controlPlane: + # The namespace within the control plane to sync claims from the app cluster to. NOTE: This must be created before you install the connector. + claimNamespace: + kubeconfigSecret: + name: my-controlplane-kubeconfig + key: kubeconfig +``` + + + + + + +Provide the values file above when you `helm install` the Control Plane Connector: + + +```bash +helm install --wait mcp-connector oci://xpkg.upbound.io/spaces-artifacts/mcp-connector -n kube-system -f connector-values.yaml +``` +:::tip +Create an API token from the Upbound user account settings page in the console by following [these instructions][these-instructions]. +::: + +### Uninstall + +You can uninstall Control Plane Connector with Helm by running the following: + +```bash +helm uninstall mcp-connector +``` + + + + + +### Example usage + +This example creates a control plane using [Configuration +EKS][configuration-eks]. `KubernetesCluster` is +available as a claim API in your control plane. The following is [an +example][an-example] +object you can create in your control plane. + +```yaml +apiVersion: k8s.starter.org/v1alpha1 +kind: KubernetesCluster +metadata: + name: my-cluster + namespace: default +spec: + id: my-cluster + parameters: + nodes: + count: 3 + size: small + services: + operators: + prometheus: + version: "34.5.1" + writeConnectionSecretToRef: + name: my-cluster-kubeconfig +``` + +After connecting your Kubernetes app cluster to the control plane, you +can create the `KubernetesCluster` object in your app cluster. Although your +local cluster has an Object, the actual resources is in your managed control +plane inside Upbound. + +```bash {copy-lines="3"} +# Applying the claim YAML above. +# kubectl is set up to talk with your Kubernetes cluster. +kubectl apply -f claim.yaml + + +kubectl get claim -A +NAME SYNCED READY CONNECTION-SECRET AGE +my-cluster True True my-cluster-kubeconfig 2m +``` + +Once Kubernetes creates the object, view the console to see your object. + +![Claim by connector in console](/img/ClaimInConsole.png) + +You can interact with the object through your cluster just as if it +lives in your cluster. + +### Migration to control planes + +This guide details the migration of a Crossplane installation to Upbound-managed +control planes using the Control Plane Connector to manage claims on an application +cluster. + +![migration flow application cluster to control plane](/img/ConnectorMigration.png) + +#### Export all resources + +Before proceeding, ensure that you have set the correct kubecontext for your application +cluster. + +```bash +up controlplane migration export --pause-before-export --output=my-export.tar.gz --yes +``` + +This command performs the following: +- Pauses all claim, composite, and managed resources before export. +- Scans the control plane for resource types. +- Exports Crossplane and native resources. +- Archives the exported state into `my-export.tar.gz`. + +Example output: +```bash +Exporting control plane state... + ✓ Pausing all claim resources before export... 1 resources paused! ⏸️ + ✓ Pausing all composite resources before export... 7 resources paused! ⏸️ + ✓ Pausing all managed resources before export... 34 resources paused! ⏸️ + ✓ Scanning control plane for types to export... 231 types found! 👀 + ✓ Exporting 231 Crossplane resources...125 resources exported! 📤 + ✓ Exporting 3 native resources...19 resources exported! 📤 ✓ Archiving exported state... archived to "my-export.tar.gz"! 📦 + +Successfully exported control plane state! +``` + +#### Import all resources + +The system restores the target control plane with the exported +resources, which serves as the destination for the Control Plane Connector. + + +Log into Upbound and select the correct context: + +```bash +up login +up ctx +up ctp create ctp-a +``` + +Output: +```bash +ctp-a created +``` + +Verify that the Crossplane version on both the application cluster and the new managed +control plane matches the core Crossplane version. + +Use the following command to import the resources: +```bash +up controlplane migration import -i my-export.tar.gz \ + --unpause-after-import \ + --mcp-connector-cluster-id=my-appcluster \ + --mcp-connector-claim-namespace=my-appcluster +``` + +This command: +- Note: `--mcp-connector-cluster-id` needs to be unique per application cluster +- Note: `--mcp-connector-claim-namespace` is the namespace the system creates + during the import +- Restores base resources +- Waits for XRDs and packages to establish +- Imports Claims, XRs resources +- Finalizes the import and resumes managed resources + +Example output: +```bash +Importing control plane state... + ✓ Reading state from the archive... Done! 👀 + ✓ Importing base resources... 56 resources imported!📥 + ✓ Waiting for XRDs... Established! ⏳ + ✓ Waiting for Packages... Installed and Healthy! ⏳ + ✓ Importing remaining resources... 88 resources imported! 📥 + ✓ Finalizing import... Done! 🎉 + ✓ Unpausing managed resources ... Done! ▶️ + +fully imported control plane state! +``` + +Verify Imported Claims + + +The Control Plane Connector renames all claims and adds additional labels to them. + +```bash +kubectl get claim -A +``` + +Example output: +```bash +NAMESPACE NAME SYNCED READY CONNECTION-SECRET AGE +my-appcluster cluster.aws.platformref.upbound.io/claim-e708ff592b974f51 True True platform-ref-aws-kubeconfig 3m17s +``` + +Inspect the labels: +```bash +kubectl get -n my-appcluster cluster.aws.platformref.upbound.io/claim-e708ff592b974f51 -o yaml | yq .metadata.labels +``` + +Example output: +```bash +mcp-connector.upbound.io/app-cluster: my-appcluster +mcp-connector.upbound.io/app-namespace: default +mcp-connector.upbound.io/app-resource-name: example +``` + +#### Cleanup the app cluster + +Remove all Crossplane-related resources from the application cluster, including: + +- Managed Resources +- Claims +- Compositions +- XRDs +- Packages (Functions, Configurations, Providers) +- Crossplane and all associated CRDs + + +#### Install Control Plane Connector + + +Follow the preceding installation guide and configure the `connector-values.yaml`: + +```yaml +# NOTE: clusterID needs to match --mcp-connector-cluster-id used in the import on the managed control Plane +clusterID: my-appcluster +upbound: + account: + token: + +spaces: + host: "" + insecureSkipTLSVerify: true + controlPlane: + name: + group: + # NOTE: This is the --mcp-connector-claim-namespace used during the import to the control plane + claimNamespace: +``` +Once the Control Plane Connector installs, verify that resources exist in the application +cluster: + +```bash +kubectl api-resources | grep platform +``` + +Example output: +```bash +awslbcontrollers aws.platform.upbound.io/v1alpha1 true AWSLBController +podidentities aws.platform.upbound.io/v1alpha1 true PodIdentity +sqlinstances aws.platform.upbound.io/v1alpha1 true SQLInstance +clusters aws.platformref.upbound.io/v1alpha1 true Cluster +osss observe.platform.upbound.io/v1alpha1 true Oss +apps platform.upbound.io/v1alpha1 true App +``` + +Restore claims from the control plane to the application cluster: + +```bash +kubectl get claim -A +``` + +Example output: +```bash +NAMESPACE NAME SYNCED READY CONNECTION-SECRET AGE +default cluster.aws.platformref.upbound.io/example True True platform-ref-aws-kubeconfig 127m +``` + +With this guide, you migrated your Crossplane installation to +Upbound-control planes. This ensures seamless integration with your +application cluster using the Control Plane Connector. + +### Connect multiple app clusters to a control plane + +Claims are store in a unique namespace in the Upbound control plane. +Every cluster creates a new control plane namespace. + +![Multi-cluster architecture with control plane connector](/img/ConnectorMulticlusterArch.png) + +There's no limit on the number of clusters connected to a single control plane. +Control plane operators can see all their infrastructure in a central control +plane. + +Without using control planes and Control Plane Connector, users have to install +Crossplane and providers for cluster. Each cluster requires configuration for +providers with necessary credentials. With a single control plane where multiple +clusters connected through Upbound tokens, you don't need to give out any cloud +credentials to the clusters. + + +[kubeconfig]: /manuals/cli/howtos/context-config/#generate-a-kubeconfig-for-a-control-plane-in-a-group +[kubeconfig-1]:/spaces/concepts/control-planes/#connect-directly-to-your-control-plane +[these-instructions]:/manuals/console/#create-a-personal-access-token +[kubernetes-api-aggregationlayer]: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/ +[configuration-eks]: https://github.com/upbound/configuration-eks +[an-example]: https://github.com/upbound/configuration-eks/blob/9f86b6d/.up/examples/cluster.yaml diff --git a/spaces_versioned_docs/version-v1.9/howtos/debugging-a-ctp.md b/spaces_versioned_docs/version-v1.9/howtos/debugging-a-ctp.md new file mode 100644 index 000000000..521271e40 --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/debugging-a-ctp.md @@ -0,0 +1,128 @@ +--- +title: Debugging issues on a control plane +sidebar_position: 70 +description: A guide for how to debug resources on a control plane running in Upbound. +--- + +This guide provides troubleshooting guidance for how to identify and fix issues on a control plane. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). Debugging methods are consistent across versions. + +For related control plane specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version-specific features, see the . +::: + +## Start from Upbound Console + + +The Upbound [Console][console] has a built-in control plane explorer experience +that surfaces status and events for the resources on your control plane. The +explorer is claim-based. Resources in this view exist only if they exist in the +reference chain originating from a claim. This view is a helpful starting point +if you are attempting to debug an issue originating from a claim. + +:::tip +If you directly create Crossplane Managed Resources (`MR`s) or Composite +Resources (`XR`s), they won't render in the explorer. +::: + +### Example + +The example below uses the control plane explorer view to inspect why a claim for an EKS Cluster isn't healthy. + +#### Check the health status of claims + +From the API type card, two claims branching from of it: one shows a healthy green icon, while the other shows an unhealthy red icon. + +![Use control plane explorer view to see status of claims](/img/debug-overview.png) + +Select `More details` on the unhealthy claim card and Upbound shows details for the claim. + +![Use control plane explorer view to see details of claims](/img/debug-claim-more-details.png) + +Looking at the three events for this claim: + +- **ConfigureCompositeResource**: this event indicates Upbound created the claimed Composite Resource (`XR`). + +- **BindCompositeResource**: this indicates the Composite Resource (`XR`) that's being "claimed" isn't ready yet. A claim doesn't show `HEALTHY` until the XR it references is ready. + +- **ConfigureCompositeResource**: the error saying, `cannot apply composite resource...the object has been modified; please apply your changes to the latest version and try again` is a generic event from Crossplane resources. It's safe to ignore this error. + +Next, look at the `status` field of the rendered YAML for the resource. + +![Use control plane explorer view to see status details of claims](/img/debug-claim-status.png) + +The status reports a similar message as the event stream: this claim is waiting for a Composite Resource to be ready. Based on this, investigate the Composite Resource referenced by this claim next. + +#### Check the health status of the Composite Resource + + +The control plane explorer only shows the claim cards by default. Selecting the claim card renders the rest of the Crossplane resource tree associated with the selected claim. + + +The previous claim expands into this screenshot: + +![Use control plane explorer view to expand tree of claim](/img/debug-claim-expansion.png) + +This renders the XR referenced by the claim (along with all its references). You can see the XR is showing the same unhealthy status icon in its card. Notice the XR has itself two nested XRs. One of the nested XRs shows a healthy green icon on its card, while the other shows an unhealthy red icon. Like the claim, a Composite Resource doesn't show healthy until all referenced resources also show healthy. + +#### Inspecting Managed Resources + +Select `more details` to inspect one of the unhealthy Managed Resources shows the following: + +![Use control plane explorer view to view events for an MR](/img/debug-mr-event.png) + +This event reveals it's unhealthy because it's waiting on a reference to another Managed Resource. Searching the rendered YAML of the MR for this resource shows the following: + +![Use control plane explorer view to view status for an MR](/img/debug-mr-status.png) + +The rendered YAML shows this MR is referencing a sibling MR that shares the same controller. The same parent XR created both of these managed resources. Inspect the sibling MR to see what its status is. + +![Use control plane explorer view to view status for a sibling MR](/img/debug-mr-dependency-status.png) + +The sibling MR event stream shows the Provider processed the resource create request. Ignore the `CannotInitalizeManagedResrouce` event. EKS clusters can take 15 minutes or more to provision in AWS. The root cause is everything is fine -- all the resources are still provisioning. Waiting longer and then looking at the control plane explorer again, shows all resources are healthy. reference, below is an example status field for a resource that's healthy and provisioned. + +```yaml +... +status: + atProvider: + id: team-b-app-cluster-bhwfb-hwtgs-20230403135452772300000008 + conditions: + - lastTransitionTime: '2023-04-03T13:56:35Z' + reason: Available + status: 'True' + type: Ready + - lastTransitionTime: '2023-04-03T13:54:02Z' + reason: ReconcileSuccess + status: 'True' + type: Synced + - lastTransitionTime: '2023-04-03T13:54:53Z' + reason: Success + status: 'True' + type: LastAsyncOperation + - lastTransitionTime: '2023-04-03T13:54:53Z' + reason: Finished + status: 'True' + type: AsyncOperation +``` + +### Control plane explorer limitations + +The control plane explorer view is currently designed around claims (`XC`s). The control plane explorer doesn't inspect other Crossplane resources. To inspect other Crossplane resources, use the `up` CLI. + +Some examples of Crossplane resources that require the `up` CLI + +- Managed Resources that aren't associated with a claim +- Composite Resources that aren't associated with a claim +- The status of _deleting_ resources +- ProviderConfigs +- Provider events + +## Use direct CLI access + +If your preference is to use a terminal instead of a GUI, Upbound supports direct access to the API server of the control plane. Use [`up ctx`][up-ctx] to connect directly to your control plane. + + +[console]: /manuals/console/upbound-console +[up-ctx]: /reference/cli-reference diff --git a/spaces_versioned_docs/version-v1.9/howtos/managed-service.md b/spaces_versioned_docs/version-v1.9/howtos/managed-service.md new file mode 100644 index 000000000..40b983a76 --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/managed-service.md @@ -0,0 +1,23 @@ +--- +title: Managed Upbound control planes +description: "Learn about the managed service capabilities of a Space" +sidebar_position: 10 +--- + +Control planes in Upbound are fully isolated [Upbound Crossplane][uxp] instances +that Upbound manages for you. This means: + +- the underlying lifecycle of infrastructure (compute, memory, and storage) required to power your instance. +- scaling of the infrastructure. +- the maintenance of the core Upbound Crossplane components that make up a control plane. + +This lets users focus on building their APIs and operating their control planes, +while Upbound handles the rest. Each control plane has its own dedicated API +server connecting users to their control plane. + +## Learn about Upbound control planes + +Read the [concept][ctp-concept] documentation to learn about Upbound control planes. + +[uxp]: /manuals/uxp/overview +[ctp-concept]: /spaces/concepts/control-planes \ No newline at end of file diff --git a/spaces_versioned_docs/version-v1.9/howtos/mcp-connector-guide.md b/spaces_versioned_docs/version-v1.9/howtos/mcp-connector-guide.md new file mode 100644 index 000000000..8a3866d07 --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/mcp-connector-guide.md @@ -0,0 +1,169 @@ +--- +title: Consume control plane APIs in an app cluster with control plane connector +sidebar_position: 99 +description: A tutorial to configure a Space with Argo to declaratively create and + manage control planes +--- + +In this tutorial, you learn how to configure a Kubernetes app cluster to communicate with a control plane in an Upbound self-hosted Space. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Control plane connector functionality is consistent across versions. + +For related API specifications and resources, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +The [control plane connector][control-plane-connector] bridges your Kubernetes application clusters---running outside of Upbound--to your control planes running in Upbound. This allows you to interact with your control plane's API right from the app cluster. The claim APIs you define via `CompositeResourceDefinitions` are available alongside Kubernetes workload APIs like `Pod`. In effect, control plane connector provides the same experience as a locally installed Crossplane. + +## Prerequisites + +To complete this tutorial, you need the following: + +- Have already deployed an Upbound Space. +- Have already deployed an Kubernetes cluster (referred to as `app cluster`). + +## Create a control plane + +Create a new control plane in your self-hosted Space. Run the following command in a terminal: + +```bash +up ctp create my-control-plane +``` + +Once the control plane is ready, connect to it. + +```bash +up ctp connect my-control-plane +``` + +For convenience, install a an Upbound [platform reference Configuration][platform-reference-configuration] from the marketplace. production scenarios, replace this with your own Crossplane Configurations or compositions. + +```bash +up ctp configuration install xpkg.upbound.io/upbound/platform-ref-aws:v1.4.0 +``` + +## Fetch the control plane's connection details + +Run the following command in a terminal: + +```shell +kubectl get secret kubeconfig-my-control-plane -n default -o jsonpath='{.data.kubeconfig}' | base64 -d > kubeconfig-my-control-plane.yaml +``` + +This command saves the kubeconfig for the control plane to a file in your working directory. + +## Install control plane connector in your app cluster + +Switch contexts to your Kubernetes app cluster. To install the control plane connector in your app cluster, you must first provide a secret containing your control plane's kubeconfig at install-time. Run the following command in a terminal: + +:::important +Make sure the following commands are executed against your **app cluster**, not your control plane. +::: + +```bash +kubectl create secret generic kubeconfig-my-control-plane -n kube-system --from-file=kubeconfig=./kubeconfig-my-control-plane.yaml +``` + +Set the environment variable below to configure which namespace _in your control plane_ you wish to sync the app cluster's claims to. + +```shell +export CONNECTOR_CTP_NAMESPACE=app-cluster-1 +``` + +Install the Control Plane Connector in the app cluster and point it to your control plane. + +```bash +up ctp connector install my-control-plane $CONNECTOR_CTP_NAMESPACE --control-plane-secret=kubeconfig-my-control-plane +``` + +## Inspect your app cluster + +After you install Control Plane Connector in the app cluster, you can now see APIs which live on the control plane. You can confirm this is the case by running the following command on your app cluster: + +```bash {copy-lines="1"} +kubectl api-resources | grep upbound + +# The output should look like this: +sqlinstances aws.platform.upbound.io/v1alpha1 true SQLInstance +clusters aws.platformref.upbound.io/v1alpha1 true Cluster +osss observe.platform.upbound.io/v1alpha1 true Oss +apps platform.upbound.io/v1alpha1 true App +``` + +## Claim a database instance on your app cluster + +Create a database claim against the `SQLInstance` API and observe resources get created by your control plane. Apply the following resources to your app cluster: + +```yaml +cat < --output + ``` + + The command exports your existing Crossplane control plane configuration/state into an archive file. + +::: note +By default, the export command doesn't make any changes to your existing Crossplane control plane state, leaving it intact. Use the `--pause-before-export` flag to pause the reconciliation on managed resources before exporting the archive file. + +This safety mechanism ensures the control plane you migrate state to doesn't assume ownership of resources before you're ready. +::: + +2. Use the control plane [create command][create-command] to create a managed +control plane in Upbound: + + ```bash + up controlplane create my-controlplane + ``` + +3. Use [`up ctx`][up-ctx] to connect to the control plane created in the previous step: + + ```bash + up ctx "///my-controlplane" + ``` + + The command configures your local `kubeconfig` to connect to the control plane. + +4. Run the following command to import the archive file into the control plane: + + ```bash + up controlplane migration import --input + ``` + +:::note +By default, the import command leaves the control plane in an inactive state by pausing the reconciliation on managed +resources. This pause gives you an opportunity to review the imported configuration/state before activating the control plane. +Use the `--unpause-after-import` flag to change the default behavior and activate the control plane immediately after +importing the archive file. +::: + + + +5. Review and validate the imported configuration/state. When you are ready, activate your managed + control plane by running the following command: + + ```bash + kubectl annotate managed --all crossplane.io/paused- + ``` + + At this point, you can delete the source Crossplane control plane. + +## CLI options + +### Filtering + +The migration tool captures the state of a Control Plane. The only filtering +supported is Kubernetes namespace and Kubernetes resource Type filtering. + +You can exclude namespaces using the `--exclude-namespaces` CLI option. This can prevent the CLI from including unwanted resources in the export. + +```bash +--exclude-namespaces=kube-system,kube-public,kube-node-lease,local-path-storage,... + +# A list of specific namespaces to exclude from the export. Defaults to 'kube-system', 'kube-public','kube-node-lease', and 'local-path-storage'. +``` + +You can exclude Kubernetes Resource types by using the `--exclude-resources` CLI option: + +```bash +--exclude-resources=EXCLUDE-RESOURCES,... + +# A list of resource types to exclude from the export in "resource.group" format. No resources are excluded by default. +``` + +For example, here's an example for excluding the CRDs installed by Crossplane functions (since they're not needed): + +```bash +up controlplane migration export \ + --exclude-resources=gotemplates.gotemplating.fn.crossplane.io,kclinputs.template.fn.crossplane.io +``` + +:::warning +You must specify resource names in lowercase "resource.group" format (for example, `gotemplates.gotemplating.fn.crossplane.io`). Using only the resource kind (for example, `GoTemplate`) isn't supported. +::: + + +:::tip Function Input CRDs + +Exclude function input CRDs (`inputs.template.fn.crossplane.io`, `resources.pt.fn.crossplane.io`, `gotemplates.gotemplating.fn.crossplane.io`, `kclinputs.template.fn.crossplane.io`) from migration exports. Upbound automatically recreates these resources during import. Function input CRDs typically have owner references to function packages and may have restricted RBAC access. Upbound installs these CRDs during the import when function packages are restored. + +::: + + +After export, users can also change the archive file to only include necessary resources. + +### Export non-Crossplane resources + +Use the `--include-extra-resources=` CLI option to select other CRD types to include in the export. + +### Set the kubecontext + +Currently `--context` isn't supported in the migration CLI. You should be able to use the `--kubeconfig` CLI option to use a file that's set to the correct context. example: + +```bash +up controlplane migration export --kubeconfig +``` + +Use this in tandem with `up ctx` to export a control plane's kubeconfig: + +```bash +up ctx --kubeconfig ~/.kube/config + +# To list the current contet +up ctx . --kubeconfig ~/.kube/config +``` + +## Export archive + +The migration CLI exports an archive upon successful completion. Below is an example export of a control plane that excludes several CRD types and skips the confirmation prompt. A file gets written to the working directory, unless you select another output file: + +
+ +View the example export + +```bash +$ up controlplane migration export --exclude-resources=gotemplates.gotemplating.fn.crossplane.io,kclinputs.template.fn.crossplane.io --yes +Exporting control plane state... +✓ Scanning control plane for types to export... 121 types found! 👀 +✓ Exporting 121 Crossplane resources...60 resources exported! 📤 +✓ Exporting 3 native resources...8 resources exported! 📤 +✓ Archiving exported state... archived to "xp-state.tar.gz"! 📦 +``` + +
+ + +When an export occurs, a file named `xp-state.tar.gz` by default gets created in the working directory. You can unzip the file and all the contents of the export are all text YAML files. + +- Each CRD (for example `vpcs.ec2.aws.upbound.io`) gets its own directory +which contains: + - A `metadata.yaml` file that contains Kubernetes Object Metadata + - A list of Kubernetes Categories the resource belongs to +- A `cluster` directory that contains YAML manifests for all resources provisioned +using the CRD. + +Sample contents for a Cluster with a single `XNetwork` Composite from +[configuration-aws-network][configuration-aws-network] is show below: + + +
+ +View the example cluster content + +```bash +├── compositionrevisions.apiextensions.crossplane.io +│ ├── cluster +│ │ ├── kcl.xnetworks.aws.platform.upbound.io-4ca6a8a.yaml +│ │ └── xnetworks.aws.platform.upbound.io-9859a34.yaml +│ └── metadata.yaml +├── configurations.pkg.crossplane.io +│ ├── cluster +│ │ └── configuration-aws-network.yaml +│ └── metadata.yaml +├── deploymentruntimeconfigs.pkg.crossplane.io +│ ├── cluster +│ │ └── default.yaml +│ └── metadata.yaml +├── export.yaml +├── functions.pkg.crossplane.io +│ ├── cluster +│ │ ├── crossplane-contrib-function-auto-ready.yaml +│ │ ├── crossplane-contrib-function-go-templating.yaml +│ │ └── crossplane-contrib-function-kcl.yaml +│ └── metadata.yaml +├── internetgateways.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-xgl4q.yaml +│ └── metadata.yaml +├── mainroutetableassociations.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-t2qh7.yaml +│ └── metadata.yaml +├── namespaces +│ └── cluster +│ ├── crossplane-system.yaml +│ ├── default.yaml +│ └── upbound-system.yaml +├── providerconfigs.aws.upbound.io +│ ├── cluster +│ │ └── default.yaml +│ └── metadata.yaml +├── providerconfigusages.aws.upbound.io +│ ├── cluster +│ │ ├── 0a2a3ec6-ef13-45f9-9cf0-63af7f4a6b6b.yaml +...redacted +│ │ └── f7092b0f-3a78-4bfe-82c8-57e5085a9b11.yaml +│ └── metadata.yaml +├── providers.pkg.crossplane.io +│ ├── cluster +│ │ ├── upbound-provider-aws-ec2.yaml +│ │ └── upbound-provider-family-aws.yaml +│ └── metadata.yaml +├── routes.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-dt9cj.yaml +│ └── metadata.yaml +├── routetableassociations.ec2.aws.upbound.io +│ ├── cluster +│ │ ├── borrelli-backup-test-mr2sd.yaml +│ │ ├── borrelli-backup-test-ngq5h.yaml +│ │ ├── borrelli-backup-test-nrkgg.yaml +│ │ └── borrelli-backup-test-wq752.yaml +│ └── metadata.yaml +├── routetables.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-dv4mb.yaml +│ └── metadata.yaml +├── secrets +│ └── namespaces +│ ├── crossplane-system +│ │ ├── cert-token-signing-gateway-pub.yaml +│ │ ├── mxp-hostcluster-certs.yaml +│ │ ├── package-pull-secret.yaml +│ │ └── xgql-tls.yaml +│ └── upbound-system +│ └── aws-creds.yaml +├── securitygrouprules.ec2.aws.upbound.io +│ ├── cluster +│ │ ├── borrelli-backup-test-472f4.yaml +│ │ └── borrelli-backup-test-qftmw.yaml +│ └── metadata.yaml +├── securitygroups.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-w5jch.yaml +│ └── metadata.yaml +├── storeconfigs.secrets.crossplane.io +│ ├── cluster +│ │ └── default.yaml +│ └── metadata.yaml +├── subnets.ec2.aws.upbound.io +│ ├── cluster +│ │ ├── borrelli-backup-test-8btj6.yaml +│ │ ├── borrelli-backup-test-gbmrm.yaml +│ │ ├── borrelli-backup-test-m7kh7.yaml +│ │ └── borrelli-backup-test-nttt5.yaml +│ └── metadata.yaml +├── vpcs.ec2.aws.upbound.io +│ ├── cluster +│ │ └── borrelli-backup-test-7hwgh.yaml +│ └── metadata.yaml +└── xnetworks.aws.platform.upbound.io +├── cluster +│ └── borrelli-backup-test.yaml +└── metadata.yaml +43 directories, 87 files +``` + +
+ + +The `export.yaml` file contains metadata about the export, including the configuration of the export, Crossplane information, and what's included in the export bundle. + +
+ +View the export + +```yaml +version: v1alpha1 +exportedAt: 2025-01-06T17:39:53.173222Z +options: + excludedNamespaces: + - kube-system + - kube-public + - kube-node-lease + - local-path-storage + includedResources: + - namespaces + - configmaps + - secrets + excludedResources: + - gotemplates.gotemplating.fn.crossplane.io + - kclinputs.template.fn.crossplane.io +crossplane: + distribution: universal-crossplane + namespace: crossplane-system + version: 1.17.3-up.1 + featureFlags: + - --enable-provider-identity + - --enable-environment-configs + - --enable-composition-functions + - --enable-usages +stats: + total: 68 + nativeResources: + configmaps: 0 + namespaces: 3 + secrets: 5 + customResources: + amicopies.ec2.aws.upbound.io: 0 + amilaunchpermissions.ec2.aws.upbound.io: 0 + amis.ec2.aws.upbound.io: 0 + availabilityzonegroups.ec2.aws.upbound.io: 0 + capacityreservations.ec2.aws.upbound.io: 0 + carriergateways.ec2.aws.upbound.io: 0 + compositeresourcedefinitions.apiextensions.crossplane.io: 0 + compositionrevisions.apiextensions.crossplane.io: 2 + compositions.apiextensions.crossplane.io: 0 + configurationrevisions.pkg.crossplane.io: 0 + configurations.pkg.crossplane.io: 1 +...redacted +``` + +
+ +### Skipped resources + +Along with to the resources excluded via CLI options, the following resources aren't +included in the backup: + +- The `kube-root-ca.crt` ConfigMap, since this is cluster-specific +- Resources directly managed via Helm (ArgoCD's helm implementation, which templates +Helm resources and then applies them, get included in the backup). The migration creates the exclusion list by looking for: + - Any Resource with the label `"app.kubernetes.io/managed-by" == "Helm"` + - Kubernetes Secrets with the label prefix `helm.sh/release`. example, `helm.sh/release.v1` +- Resources installed via a Crossplane package. These have an `ownerReference` with +a prefix `pkg.crossplane.io`. The expectation is that during import, the Crossplane Package Manager bears responsibility for installing the resources. +- Crossplane Locks: Any `Lock.pkg.crossplane.io` resource isn't included in the +export. + +## Restore + +The following is an example of a successful import run. At the end of the import, all Managed Resources are in a paused state. + +
+ +View the migration import + +```bash +$ up controlplane migration import +Importing control plane state... +✓ Reading state from the archive... Done! 👀 +✓ Importing base resources... 18 resources imported! 📥 +✓ Waiting for XRDs... Established! ⏳ +✓ Waiting for Packages... Installed and Healthy! ⏳ +✓ Importing remaining resources... 50 resources imported! 📥 +✓ Finalizing import... Done! 🎉 +``` + +
+ +Your scenario may involve migrating resources which already exist through other automation on the platform. When executing an import in these circumstances, the importer applies the new manifests to the cluster. If the resource already exists, the restore sets fields to what's in the backup. + +The importer restores all resources in the export archive. Managed Resources get imported with the `crossplane.io/paused: "true"` annotation set. Use the `--unpause-after-import` CLI argument to automatically un-pause resources that got +paused during backup, or remove the annotation manually. + +### Restore order + +The importer restores based on Kubernetes types. The restore order doesn't include parent/child relationships. + +Because Crossplane Composites create new Managed Resources if not present on the cluster, all +Claims, Composites and Managed Resources get imported in a paused state. You can un-pause them after the restore completes. + +The first step of import is installing Base Resources into the cluster. These resources (such has +packages and XRDs) must be ready before proceeding with the import. +Base Resources are: + +- Kubernetes Resources + - ConfigMaps + - Namespaces + - Secrets +- Crossplane Resources + - ControllerConfigs: `controllerconfigs.pkg.crossplane.io` + - DeploymentRuntimeConfigs: `deploymentruntimeconfigs.pkg.crossplane.io` + - StoreConfigs: `storeconfigs.secrets.crossplane.io` +- Crossplane Packages + - Providers: `providers.pkg.crossplane.io` + - Functions: `functions.pkg.crossplane.io` + - Configurations: `configurations.pkg.crossplane.io` + +Restore waits for the base resources to be `Ready` before moving on to the next step. Next, restore walks through the archive and restores all the manifests present. + +During import, the `crossplane.io/paused` annotation gets added to Managed Resources, Claims +and Composites. + +To manually un-pause managed resources after an import, remove the annotation by running: + +```bash +kubectl annotate managed --all crossplane.io/paused- +``` + +You can also run import again with the `--unpause-after-import` flag to remove the annotations. + +```bash +up controlplane migration import --unpause-after-import +``` + +### Restoring resource status + +The importer applies the status of all resources during import. The importer determines if the CRD version has a status field defined based on the stored CRD version. + + +[cli-command]: /reference/cli-reference +[up-cli]: /reference/cli-reference +[up-cli-1]: /manuals/cli/overview +[create-command]: /reference/cli-reference +[up-ctx]: /reference/cli-reference +[configuration-aws-network]: https://marketplace.upbound.io/configurations/upbound/configuration-aws-network diff --git a/spaces_versioned_docs/version-v1.9/howtos/observability.md b/spaces_versioned_docs/version-v1.9/howtos/observability.md new file mode 100644 index 000000000..8fc5c3278 --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/observability.md @@ -0,0 +1,395 @@ +--- +title: Observability +sidebar_position: 50 +description: A guide for how to use the integrated observability pipeline feature + in a Space. +plan: "enterprise" +--- + + + +This guide explains how to configure observability in Upbound Spaces. Upbound +provides integrated observability features built on +[OpenTelemetry][opentelemetry] to collect, process, and export logs, metrics, +and traces. + +Upbound Spaces offers two levels of observability: + +1. **Space-level observability** - Observes the cluster infrastructure where Spaces software is installed (Self-Hosted only) +2. **Control plane observability** - Observes workloads running within individual control planes + + + + + +:::info API Version Information & Version Selector +This guide applies to all supported versions (v1.9-v1.15+). Observability features evolved: + +- **v1.9-v1.12**: SharedTelemetryConfig available (alpha until v1.11) +- **v1.11+**: Observability promoted to stable with logs export support +- **v1.14+**: Both space-level and control-plane observability GA + +**View API Reference for Your Version**: +| Version | Status | Link | +|---------|--------|------| +| v1.15 | Latest | [Spaces API v1.15](../../../next/reference/apis/spaces-api/) | +| v1.14 | Supported | [Spaces API v1.14](../../../1.14/reference/apis/spaces-api/) | +| v1.13 | Supported | [Spaces API v1.13](../../../1.13/reference/apis/spaces-api/) | +| v1.12 | Supported | [Spaces API v1.12](../../../1.12/reference/apis/spaces-api/) | +| v1.11 | Supported | [Spaces API v1.11](../../../1.11/reference/apis/spaces-api/) | +| v1.10 | Limited | [Spaces API v1.10](../../../1.10/reference/apis/spaces-api/) | +| v1.9 | EOL | [Spaces API v1.9](../../../1.9/reference/apis/spaces-api/) | + +For version support policy and feature availability, see and . +::: + +:::important +**Space-level observability** (available since v1.6.0, GA in v1.14.0): +- Disabled by default +- Requires manual enablement and configuration +- Self-Hosted Spaces only + +**Control plane observability** (available since v1.13.0, GA in v1.14.0): +- Enabled by default +- No additional configuration required +::: + + + + +## Prerequisites + + +**Control plane observability** is enabled by default. No additional setup is +required. + + + +### Self-hosted Spaces + +1. **Enable the observability feature** when installing Spaces: + ```bash + up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "observability.enabled=true" + ``` + +Set `features.alpha.observability.enabled=true` instead if using Spaces version +before `v1.14.0`. + +2. **Install OpenTelemetry Operator** (required for Space-level observability): + ```bash + kubectl apply -f https://github.com/open-telemetry/opentelemetry-operator/releases/download/v0.116.0/opentelemetry-operator.yaml + ``` + + :::important + If running Spaces `v1.11` or later, use OpenTelemetry Operator `v0.110.0` or later due to breaking changes. + ::: + + +## Space-level Observability + +Space-level observability is only available for self-hosted Spaces and allows +administrators to observe the cluster infrastructure. + +### Configuration + +Configure Space-level observability using the `spacesCollector` value in your +Spaces Helm chart: + +```yaml +observability: + spacesCollector: + config: + exporters: + otlphttp: + endpoint: "" + headers: + api-key: YOUR_API_KEY + exportPipeline: + logs: + - otlphttp + metrics: + - otlphttp +``` + +This configuration exports metrics and logs from: + +- Crossplane installation +- Spaces infrastructure (controller, API, router, etc.) + +### Router metrics + +The Spaces router uses Envoy as a reverse proxy and automatically exposes +metrics when you enable Space-level observability. These metrics provide +visibility into: + +- Traffic routing to control planes and services +- Request status codes, timeouts, and retries +- Circuit breaker state preventing cascading failures +- Client connection patterns and request volume +- Request latency (P50, P95, P99) + +For more information about available metrics, example queries, and how to enable +this feature, see the [Space-level observability guide][space-level-o11y]. + +## Control plane observability + +Control plane observability collects telemetry data from workloads running +within individual control planes using `SharedTelemetryConfig` resources. + +The pipeline deploys [OpenTelemetry Collectors][opentelemetry-collectors] per +control plane, defined by a `SharedTelemetryConfig` at the group level. +Collectors pass data to external observability backends. + +:::important +From Spaces `v1.13` and beyond, telemetry only includes user-facing control +plane workloads (Crossplane, providers, functions). + +Self-hosted users can include system workloads (`api-server`, `etcd`) by setting +`observability.collectors.includeSystemTelemetry=true` in Helm. +::: + +:::important +Spaces validates `SharedTelemetryConfig` resources before applying them by +sending telemetry to configured exporters. self-hosted Spaces, ensure that +`spaces-controller` can reach the exporter endpoints. +::: + +### `SharedTelemetryConfig` + +`SharedTelemetryConfig` is a group-scoped custom resource that defines telemetry +configuration for control planes. + +#### New Relic example + +```yaml +apiVersion: observability.spaces.upbound.io/v1alpha1 +kind: SharedTelemetryConfig +metadata: + name: newrelic + namespace: default +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + org: foo + exporters: + otlphttp: + endpoint: https://otlp.nr-data.net + headers: + api-key: YOUR_API_KEY + exportPipeline: + metrics: [otlphttp] + traces: [otlphttp] + logs: [otlphttp] +``` + +#### Datadog Example + +```yaml +apiVersion: observability.spaces.upbound.io/v1alpha1 +kind: SharedTelemetryConfig +metadata: + name: datadog + namespace: default +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + org: foo + exporters: + datadog: + api: + site: ${DATADOG_SITE} + key: ${DATADOG_API_KEY} + exportPipeline: + metrics: [datadog] + traces: [datadog] + logs: [datadog] +``` + +### Control plane selection + +Use `spec.controlPlaneSelector` to specify which control planes should use the +telemetry configuration. + +#### Label-based selection + +```yaml +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +#### Expression-based selection + +```yaml +spec: + controlPlaneSelector: + labelSelectors: + - matchExpressions: + - { key: environment, operator: In, values: [production,staging] } +``` + +#### Name-based selection + +```yaml +spec: + controlPlaneSelector: + names: + - controlplane-dev + - controlplane-staging + - controlplane-prod +``` + +### Manage sensitive data + +:::important +Available from Spaces `v1.10` +::: + +Store sensitive data in Kubernetes secrets and reference them in your +`SharedTelemetryConfig`: + +1. **Create the secret:** + ```bash + kubectl create secret generic sensitive -n \ + --from-literal=apiKey='YOUR_API_KEY' + ``` + +2. **Reference in SharedTelemetryConfig:** + ```yaml + apiVersion: observability.spaces.upbound.io/v1alpha1 + kind: SharedTelemetryConfig + metadata: + name: newrelic + spec: + configPatchSecretRefs: + - name: sensitive + key: apiKey + path: exporters.otlphttp.headers.api-key + controlPlaneSelector: + labelSelectors: + - matchLabels: + org: foo + exporters: + otlphttp: + endpoint: https://otlp.nr-data.net + headers: + api-key: dummy # Replaced by secret value + exportPipeline: + metrics: [otlphttp] + traces: [otlphttp] + logs: [otlphttp] + ``` + +### Telemetry processing + +:::important +Available from Spaces `v1.11` +::: + +Configure processing pipelines to transform telemetry data using the [transform +processor][transform-processor]. + +#### Add labels to metrics + +```yaml +spec: + processors: + transform: + error_mode: ignore + metric_statements: + - context: datapoint + statements: + - set(attributes["newLabel"], "someLabel") + processorPipeline: + metrics: [transform] +``` + +#### Remove labels + +From metrics: +```yaml +processors: + transform: + metric_statements: + - context: datapoint + statements: + - delete_key(attributes, "kubernetes_namespace") +``` + +From logs: +```yaml +processors: + transform: + log_statements: + - context: log + statements: + - delete_key(attributes, "log.file.name") +``` + +#### Modify log messages + +```yaml +processors: + transform: + log_statements: + - context: log + statements: + - set(attributes["original"], body) + - set(body, Concat(["log message:", body], " ")) +``` + +### Monitor status + +Check the status of your `SharedTelemetryConfig`: + +```bash +kubectl get stc +NAME SELECTED FAILED PROVISIONED AGE +datadog 1 0 1 63s +``` + +- `SELECTED`: Number of control planes selected +- `FAILED`: Number of control planes that failed provisioning +- `PROVISIONED`: Number of successfully running collectors + +For detailed status information: + +```bash +kubectl describe stc +``` + +## Supported exporters + +Both Space-level and control plane observability support: +- `datadog` -. Datadog integration +- `otlphttp` - General-purpose exporter (used by New Relic, among others) +- `debug` -. troubleshooting + +## Considerations + +- **Control plane conflicts**: Each control plane can only use one `SharedTelemetryConfig`. Multiple configs selecting the same control plane conflict. +- **Custom collector image**: Both Space-level and control plane observability use the same custom OpenTelemetry Collector image with supported exporters. +- **Resource scope**: `SharedTelemetryConfig` resources are group-scoped, allowing different telemetry configurations per group. + +For more advanced configuration options, review the [Helm chart +reference][helm-chart-reference] and [OpenTelemetry Transformation Language +documentation][opentelemetry-transformation-language]. + + +[opentelemetry]: https://opentelemetry.io/ +[opentelemetry-collectors]: https://opentelemetry.io/docs/collector/ +[opentelemetry-collector-configuration]: https://opentelemetry.io/docs/collector/configuration/#exporters +[opentelemetry-operator]: https://opentelemetry.io/docs/kubernetes/operator/ +[transform-processor]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/transformprocessor/README.md +[opentelemetry-transformation-language]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl +[space-level-o11y]: /spaces/howtos/self-hosted/space-observability +[helm-chart-reference]: /reference/helm-reference +[opentelemetry-transformation-language-functions]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/ottlfuncs/README.md +[opentelemetry-transformation-language-contexts]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts +[guide-on-ottl]: https://betterstack.com/community/guides/observability/ottl/#a-brief-overview-of-the-ottl-grammar diff --git a/spaces_versioned_docs/version-v1.9/howtos/query-api.md b/spaces_versioned_docs/version-v1.9/howtos/query-api.md new file mode 100644 index 000000000..78163de2f --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/query-api.md @@ -0,0 +1,320 @@ +--- +title: Query API +sidebar_position: 40 +description: Use the `up` CLI to query objects and resources +--- + + + + +Upbound's Query API allows users to inspect objects and resources within their control planes. The read-only `up alpha query` and `up alpha get` CLI commands allow you to gather information on your control planes in a fast and efficient package. These commands follow the [`kubectl` conventions][kubectl-conventions] for filtering, sorting, and retrieving information from your Space. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). Query API is available in Cloud Spaces since v1.6 and Self-Hosted since v1.8. + +For detailed deployment procedures, see [Deploy Query API infrastructure - Self-Hosted](./self-hosted/query-api.md). version compatibility details, see the . +::: + + + +## Using the Query API + + +The Query API allows you to retrieve control plane information faster than traditional `kubectl` commands. This feature lets you debug your Crossplane resources with the CLI or within the Upbound Console's enhanced management views. + +### Query within a single control plane + +Use the `up alpha get` command to retrieve information about objects within the current control plane context. This command uses the **Query** endpoint and targets the current control plane. + +To switch between control plane groups, use the [`up ctx` ][up-ctx] and change to your desired context with an interactive prompt or specify with your control plane path: + +```shell +up ctx /// +``` + +You can query within a single control plane with the [`up alpha get` command][up-alpha-get-command] to return more information about a given object within the current kubeconfig context. + +The `up alpha get` command can query resource types and aliases to return objects in your control plane. + +```shell +up alpha get managed +NAME READY SYNCED AGE +custom-account1-5bv5j-sa True True 15m +custom-cluster1-bq6dk-net True True 15m +custom-account1-5bv5j-subnet True True 15m +custom-cluster1-bq6dk-nodepool True True 15m +custom-cluster1-bq6dk-cluster True True 15m +custom-account1-5bv5j-net True True 15m +custom-cluster1-bq6dk-subnet True True 15m +custom-cluster1-bq6dk-sa True True 15m +``` + +The [`-A` flag][a-flag] queries for objects across all namespaces. + +```shell +up alpha get configmaps -A +NAMESPACE NAME AGE +crossplane-system uxp-versions-config 18m +crossplane-system universal-crossplane-config 18m +crossplane-system kube-root-ca.crt 18m +upbound-system kube-root-ca.crt 18m +kube-system kube-root-ca.crt 18m +kube-system coredns 18m +default kube-root-ca.crt 18m +kube-node-lease kube-root-ca.crt 18m +kube-public kube-root-ca.crt 18m +kube-system kube-apiserver-legacy-service-account-token-tracking 18m +kube-system extension-apiserver-authentication 18m +``` + +To query for [multiple resource types][multiple-resource-types], you can add the name or alias for the resource as a comma separated string. + +```shell +up alpha get providers,providerrevisions + +NAME HEALTHY REVISION IMAGE STATE DEP-FOUND DEP-INSTALLED AGE +providerrevision.pkg.crossplane.io/crossplane-contrib-provider-nop-ecc25c121431 True 1 xpkg.upbound.io/crossplane-contrib/provider-nop:v0.2.1 Active 18m +NAME INSTALLED HEALTHY PACKAGE AGE +provider.pkg.crossplane.io/crossplane-contrib-provider-nop True True xpkg.upbound.io/crossplane-contrib/provider-nop:v0.2.1 18m +``` + +### Query multiple control planes + +The [`up alpha query` command][up-alpha-query-command] returns a list of objects of any kind within all the control planes in your Space. This command uses either the **SpaceQuery** or **GroupQuery** endpoints depending on your query scope. The `-A` flag switches the query context from the group level to the entire Space + +The `up alpha query` command accepts resources and aliases to return objects across your group or Space. + +```shell +up alpha query crossplane + +NAME ESTABLISHED OFFERED AGE +compositeresourcedefinition.apiextensions.crossplane.io/xnetworks.platform.acme.co True True 20m +compositeresourcedefinition.apiextensions.crossplane.io/xaccountscaffolds.platform.acme.co True True 20m + + +NAME XR-KIND XR-APIVERSION AGE +composition.apiextensions.crossplane.io/xaccountscaffolds.platform.acme.co XAccountScaffold platform.acme.co/v1alpha1 20m +composition.apiextensions.crossplane.io/xnetworks.platform.acme.co XNetwork platform.acme.co/v1alpha1 20m + + +NAME REVISION XR-KIND XR-APIVERSION AGE +compositionrevision.apiextensions.crossplane.io/xaccountscaffolds.platform.acme.co-5ae9da5 1 XAccountScaffold platform.acme.co/v1alpha1 20m +compositionrevision.apiextensions.crossplane.io/xnetworks.platform.acme.co-414ce80 1 XNetwork platform.acme.co/v1alpha1 20m + +NAME READY SYNCED AGE +nopresource.nop.crossplane.io/custom-cluster1-bq6dk-subnet True True 19m +nopresource.nop.crossplane.io/custom-account1-5bv5j-net True True 19m + +## Output truncated... + +``` + + +The [`--sort-by` flag][sort-by-flag] allows you to return information to your specifications. You can construct your sort order in a JSONPath expression string or integer. + + +```shell +up alpha query crossplane -A --sort-by="{.metadata.name}" + +CONTROLPLANE NAME AGE +default/test deploymentruntimeconfig.pkg.crossplane.io/default 10m + +CONTROLPLANE NAME AGE TYPE DEFAULT-SCOPE +default/test storeconfig.secrets.crossplane.io/default 10m Kubernetes crossplane-system +``` + +To query for multiple resource types, you can add the name or alias for the resource as a comma separated string. + +```shell +up alpha query namespaces,configmaps -A + +CONTROLPLANE NAME AGE +default/test namespace/upbound-system 15m +default/test namespace/crossplane-system 15m +default/test namespace/kube-system 16m +default/test namespace/default 16m + +CONTROLPLANE NAMESPACE NAME AGE +default/test crossplane-system configmap/uxp-versions-config 15m +default/test crossplane-system configmap/universal-crossplane-config 15m +default/test crossplane-system configmap/kube-root-ca.crt 15m +default/test upbound-system configmap/kube-root-ca.crt 15m +default/test kube-system configmap/coredns 16m +default/test default configmap/kube-root-ca.crt 16m + +## Output truncated... + +``` + +The Query API also allows you to return resource types with specific [label columns][label-columns]. + +```shell +up alpha query composite -A --label-columns=crossplane.io/claim-namespace + +CONTROLPLANE NAME SYNCED READY COMPOSITION AGE CLAIM-NAMESPACE +query-api-test/test xeks.argo.discover.upbound.io/test-k7xbk False xeks.argo.discover.upbound.io 51d default + +CONTROLPLANE NAME EXTERNALDNS SYNCED READY COMPOSITION AGE CLAIM-NAMESPACE +spaces-clusters/controlplane-query-api-test-spaces-playground xexternaldns.externaldns.platform.upbound.io/spaces-cluster-0-xd8v2-lhnl7 6.34.2 True True xexternaldns.externaldns.platform.upbound.io 19d default +default/query-api-test xexternaldns.externaldns.platform.upbound.io/space-awg-kine-f7dxq-nkk2q 6.34.2 True True xexternaldns.externaldns.platform.upbound.io 55d default + +## Output truncated... + +``` + +### Query API request format + +The CLI can also return a version of your query request with the [`--debug` flag][debug-flag]. This flag returns the API spec request for your query. + +```shell +up alpha query composite -A -d + +apiVersion: query.spaces.upbound.io/v1alpha1 +kind: SpaceQuery +metadata: + creationTimestamp: null +spec: + cursor: true + filter: + categories: + - composite + controlPlane: {} + limit: 500 + objects: + controlPlane: true + table: {} + page: {} +``` + +For more complex queries, you can interact with the Query API like a Kubernetes-style API by creating a query and applying it with `kubectl`. + +The example below is a query for `claim` resources in every control plane from oldest to newest and returns specific information about those claims. + + +```yaml +apiVersion: query.spaces.upbound.io/v1alpha1 +kind: SpaceQuery +spec: + filter: + categories: + - claim + order: + - creationTimestamp: Asc + cursor: true + count: true + objects: + id: true + controlPlane: true + object: + kind: true + apiVersion: true + metadata: + name: true + uid: true + spec: + containers: + image: true +``` + + +The Query API is served by the Spaces API endpoint. You can use `up ctx` to +switch the kubectl context to the Spaces API ingress. After that, you can use +`kubectl create` and receive the `response` for your query parameters. + + +```shell +kubectl create -f spaces-query.yaml -o yaml +``` + +Your `response` should look similar to this example: + +```yaml {copy-lines="none"} +apiVersion: query.spaces.upbound.io/v1alpha1 +kind: SpaceQuery +metadata: + creationTimestamp: "2024-08-08T14:41:46Z" + name: default +response: + count: 3 + cursor: + next: "" + page: 0 + pageSize: 100 + position: 0 + objects: + - controlPlane: + name: query-api-test + namespace: default + id: default/query-api-test/823b2781-7e70-4d91-a6f0-ee8f455d67dc + object: + apiVersion: spaces.platform.upbound.io/v1alpha1 + kind: Space + metadata: + name: space-awg-kine + resourceVersion: "803868" + uid: 823b2781-7e70-4d91-a6f0-ee8f455d67dc + spec: {} + - controlPlane: + name: test-1 + namespace: test + id: test/test-1/08a573dd-851a-42cc-a600-b6f6ed37ee8d + object: + apiVersion: argo.discover.upbound.io/v1alpha1 + kind: EKS + metadata: + name: test-1 + resourceVersion: "4270320" + uid: 08a573dd-851a-42cc-a600-b6f6ed37ee8d + spec: {} + - controlPlane: + name: controlplane-query-api-test-spaces-playground + namespace: spaces-clusters + id: spaces-clusters/controlplane-query-api-test-spaces-playground/b5a6770f-1f85-4d09-8990-997c84bd4159 + object: + apiVersion: spaces.platform.upbound.io/v1alpha1 + kind: Space + metadata: + name: spaces-cluster-0 + resourceVersion: "1408337" + uid: b5a6770f-1f85-4d09-8990-997c84bd4159 + spec: {} +``` + + +## Query API Explorer + + + +import CrdDocViewer from '@site/src/components/CrdViewer'; + +### Query + +The Query resource allows you to query objects in a single control plane. + + + +### GroupQuery + +The GroupQuery resource allows you to query objects across a group of control planes. + + + +### SpaceQuery + +The SpaceQuery resource allows you to query objects across all control planes in a space. + + + + + + +[documentation]: /spaces/howtos/self-hosted/query-api +[up-ctx]: /reference/cli-reference +[up-alpha-get-command]: /reference/cli-reference +[a-flag]: /reference/cli-reference +[multiple-resource-types]: /reference/cli-reference +[up-alpha-query-command]: /reference/cli-reference +[sort-by-flag]: /reference/cli-reference +[label-columns]: /reference/cli-reference +[debug-flag]: /reference/cli-reference +[kubectl-conventions]: https://kubernetes.io/docs/reference/kubectl/generated/kubectl_get/ diff --git a/spaces_versioned_docs/version-v1.9/howtos/secrets-management.md b/spaces_versioned_docs/version-v1.9/howtos/secrets-management.md new file mode 100644 index 000000000..88e730ae5 --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/secrets-management.md @@ -0,0 +1,719 @@ +--- +title: Secrets Management +sidebar_position: 20 +description: A guide for how to configure synchronizing external secrets into control + planes in a Space. +--- + +Upbound's _Shared Secrets_ is a built in secrets management feature that +provides an integrated way to manage secrets across your platform. It allows you +to store sensitive data like passwords and certificates for your managed control +planes as secrets in an external secret store. + +This feature is a wrapper around the [External Secrets Operator (ESO)][external-secrets-operator-eso] that pulls secrets from external vaults and distributes them across your platform. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). The SharedSecretStore and SharedExternalSecret CRDs have been stable since v1.9. + +For API specifications and configuration details, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +## Benefits + +The Shared Secrets feature allows you to: + +* Access secrets from a variety of external secret stores without operation overhead +* Configure synchronization for multiple control planes in a group +* Store and manage all your secrets centrally +* Use Shared Secrets across all Upbound environments(Cloud and Disconnected Spaces) +* Synchronize secrets across groups of control planes while maintaining clear security boundaries +* Manage secrets at scale programmatically while ensuring proper isolation and access control + +## Understanding the Architecture + +The Shared Secrets feature uses a hierarchical approach to centrally manage +secrets and effectively control their distribution. + +![Shared Secrets workflow diagram](/img/shared-secrets-workflow.png) + +1. The flow begins at the group level, where you define your secret sources and distribution rules +2. These rules automatically create corresponding resources in your control planes +3. In each control plane, specific namespaces receive the secrets +4. Changes at the group level automatically propagate through this chain + +## Component configuration + +Upbound Shared Secrets consists of two components: + +1. **SharedSecretStore**: Defines connections to external secret providers +2. **SharedExternalSecret**: Specifies which secrets to synchronize and where + + +### Connect to an External Vault + + +The `SharedSecretStore` component is the connection point to your external +secret vaults. It provisions ClusterSecretStore resources into control planes +within the group. + + +#### AWS Secrets Manager + + + +In this example, you'll create a `SharedSecretStore` to connect to AWS +Secrets Manager in `us-west-2`. Then apply access to all control planes labeled with +`environment: production`, and make these secrets available in the `default` and +`crossplane-system` namespaces. + + +You can configure access to AWS Secrets Manager using static credentials or +workload identity. + +:::important +While the underlying ESO API supports more auth methods, static credentials are currently the only supported auth method in Cloud Spaces. +::: + +##### Static credentials + +1. Use the AWS CLI to create access credentials. + + +2. Create your access credentials. +```ini +# Create a text file with AWS credentials +cat > aws-credentials.txt << EOF +[default] +aws_access_key_id = +aws_secret_access_key = +EOF +``` + +3. Next,store the access credentials in a secret in the namespace you want to have access to the `SharedSecretStore`. +```shell +kubectl create secret \ + generic aws-credentials \ + -n default \ + --from-file=creds=./aws-credentials.txt +``` + +4. Create a `SharedSecretStore` custom resource file called `secretstore.yaml`. + Paste the following configuration: +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: aws-secrets +spec: + # Define which control planes should receive this configuration + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production + + # Define which namespaces within those control planes can access secrets + namespaceSelector: + names: + - default + - crossplane-system + + # Configure the connection to AWS Secrets Manager + provider: + aws: + service: SecretsManager + region: us-west-2 + auth: + secretRef: + accessKeyIDSecretRef: + name: aws-credentials + key: access-key-id + secretAccessKeySecretRef: + name: aws-credentials + key: secret-access-key +``` + + + +##### Workload Identity with IRSA + + + +You can also use AWS IAM Roles for Service Accounts (IRSA) depending on your +organizations needs: + +1. Ensure you have deployed the Spaces software into an IRSA-enabled EKS cluster. +2. Follow the AWS instructions to create an IAM OIDC provider with your EKS OIDC + provider URL. +3. Determine the Spaces-generated `controlPlaneID` of your control plane: +```shell +kubectl get controlplane -o jsonpath='{.status.controlPlaneID}' +``` + +4. Create an IAM trust policy in your AWS account to match the control plane. +```yaml +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam:::oidc-provider/" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + ":aud": "sts.amazonaws.com", + ":sub": [ +"system:serviceaccount:mxp--system:external-secrets-controller"] + } + } + } + ] +} +``` + +5. Update your Spaces deployment to annotate the SharedSecrets service account + with the role ARN. +```shell +up space upgrade ... \ + --set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."eks\.amazonaws\.com/role-arn"="" +``` + +6. Create a SharedSecretStore and reference the SharedSecrets service account: +```ini {copy-lines="all"} +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: aws-sm + namespace: default +spec: + provider: + aws: + service: SecretsManager + region: + auth: + jwt: + serviceAccountRef: + name: external-secrets-controller + controlPlaneSelector: + names: + - + namespaceSelector: + names: + - default +``` + +When you create a `SharedSecretStore` the underlying mechanism: + +1. Applies at the group level +2. Determines which control planes should receive this configuration by the `controlPlaneSelector` +3. Automatically creates a ClusterSecretStore inside each identified control plane +4. Maintains a connection in each control plane with the ClusterSecretStore + credentials and configuration from the parent SharedSecretStore + +Upbound automatically generates a ClusterSecretStore in each matching control +plane when you create a SharedSecretStore. + +```yaml {copy-lines="none"} +# Automatically created in each matching control plane +apiVersion: external-secrets.io/v1beta1 +kind: ClusterSecretStore +metadata: + name: aws-secrets # Name matches the parent SharedSecretStore +spec: + provider: + upboundspaces: + storeRef: + name: aws-secret +``` + +When you create the SharedSecretStore controller, it replaces the provider with +a special provider called `upboundspaces`. This provider references the +SharedSecretStore object in the Spaces API. This avoids copying the actual cloud +credentials from Spaces to each control plane. + +This workflow allows you to configure the store connection only once at the +group level and automatically propagates to each control plane. Individual control +planes can use the store without exposure to the group-level configuration and +updates all child ClusterSecretStores when updated. + + +#### Azure Key Vault + + +:::important +While the underlying ESO API supports more auth methods, static credentials are currently the only supported auth method in Cloud Spaces. +::: + +##### Static credentials + +1. Use the Azure CLI to create a service principal and authentication file. +2. Create a service principal and save credentials in a file: +```json +{ + "appId": "myAppId", + "displayName": "myServicePrincipalName", + "password": "myServicePrincipalPassword", + "tenant": "myTentantId" +} +``` + +3. Store the credentials as a Kubernetes secret: +```shell +kubectl create secret \ + generic azure-secret-sp \ + -n default \ + --from-file=creds=./azure-credentials.json +``` + +4. Create a SharedSecretStore referencing these credentials: +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: azure-kv +spec: + provider: + azurekv: + tenantId: "" + vaultUrl: "" + authSecretRef: + clientId: + name: azure-secret-sp + key: ClientID + clientSecret: + name: azure-secret-sp + key: ClientSecret + controlPlaneSelector: + names: + - + namespaceSelector: + names: + - default +``` + +##### Workload Identity + + +You can also use Entra Workload Identity Federation to access Azure Key Vault +without needing to manage secrets. + +To use Entra Workload ID with AKS: + + +1. Deploy the Spaces software into a [workload identity-enabled AKS cluster][workload-identity-enabled-aks-cluster]. +2. Retrieve the OIDC issuer URL of the AKS cluster: +```ini +az aks show --name "" \ + --resource-group "" \ + --query "oidcIssuerProfile.issuerUrl" \ + --output tsv +``` + +3. Use the Azure CLI to make a managed identity: +```ini +az identity create \ + --name "" \ + --resource-group "" \ + --location "" \ + --subscription "" +``` + +4. Look up the managed identity's client ID: +```ini +az identity show \ + --resource-group "" \ + --name "" \ + --query 'clientId' \ + --output tsv +``` + +5. Update your Spaces deployment to annotate the SharedSecrets service account with the associated Entra application client ID from the previous step: +```ini +up space upgrade ... \ + --set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."azure\.workload\.identity/client-id"="" \ + --set-string controlPlanes.sharedSecrets.pod.customLabels."azure\.workload\.identity/use"="true" +``` + +6. Determine the Spaces-generated `controlPlaneID` of your control plane. When you deploy a `kind: controlplane` in a Space, the Spaces software deploys a set of pods in a new namespace following the format `mxp--system`. +```ini +kubectl get controlplane -o jsonpath='{.status.controlPlaneID}' +``` + +7. Create a federated identity credential. +```ini +FEDERATED_IDENTITY_CREDENTIAL_NAME= +USER_ASSIGNED_IDENTITY_NAME= +RESOURCE_GROUP= +AKS_OIDC_ISSUER= +CONTROLPLANE_ID= +az identity federated-credential create --name ${FEDERATED_IDENTITY_CREDENTIAL_NAME} --identity-name "${USER_ASSIGNED_IDENTITY_NAME}" --resource-group "${RESOURCE_GROUP}" --issuer "${AKS_OIDC_ISSUER}" --subject system:serviceaccount:"mxp-${CONTROLPLANE_ID}-system:external-secrets-controller" --audience api://AzureADTokenExchange +``` + +8. Assign the `Key Vault Secrets User` role to the user-assigned managed identity that you created earlier. This step gives the managed identity permission to read secrets from the key vault: +```ini +az role assignment create \ + --assignee-object-id "${IDENTITY_PRINCIPAL_ID}" \ + --role "Key Vault Secrets User" \ + --scope "${KEYVAULT_RESOURCE_ID}" \ + --assignee-principal-type ServicePrincipal +``` + +:::important +You must manually restart a workload's pod when you add the annotation to the running pod's service account. The Entra workload identity mutating admission webhook requires a restart to inject the necessary environment. +::: + +8. Create a `SharedSecretStore`. Replace `vaultURL` with the URL of your Azure Key Vault instance. Replace `identityId` with the client ID of the managed identity created earlier: +```yaml {copy-lines="all"} +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: azure-kv +spec: + provider: + azurekv: + authType: WorkloadIdentity + vaultUrl: "" + controlPlaneSelector: + names: + - + namespaceSelector: + names: + - default +``` + + + + +#### Google Cloud Secret Manager + + + +You can configure access to Google Cloud Secret Manager using static credentials or workload identity. Below are instructions for configuring either. See the [ESO provider API][eso-provider-api] for more information. + +:::important +While the underlying ESO API supports more auth methods, static credentials are currently the only supported auth method in Cloud Spaces. +::: + +##### Static credentials + +1. Use the [GCP CLI][gcp-cli] to create access credentials. +2. Save the output in a file called `gcp-credentials.json`. +3. Store the access credentials in a secret in the same namespace as the `SharedSecretStore`. + ```shell {label="kube-create-secret",copy-lines="all"} + kubectl create secret \ + generic gcpsm-secret \ + -n default \ + --from-file=creds=./gcp-credentials.json + ``` + +4. Create a `SharedSecretStore`, referencing the secret created earlier. Replace `projectID` with your GCP Project ID: +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: gcp-sm +spec: + provider: + gcpsm: + auth: + secretRef: + secretAccessKeySecretRef: + name: gcpsm-secret + key: creds + projectID: + controlPlaneSelector: + names: + - + namespaceSelector: + names: + - default +``` + +:::tip +The example above maps a Shared Secret Store into a single namespace of a single control plane. Read [control plane selection][control-plane-selection] and [namespace selection][namespace-selection] to learn how to map into one or more namespaces of one or more control planes. +::: + + +##### Workload identity with Service Accounts to IAM Roles + + +To configure, grant the `roles/iam.workloadIdentityUser` role to the Kubernetes +service account in the control plane namespace to impersonate the IAM service +account. + +1. Ensure you've deployed Spaces on a [Workload Identity Federation-enabled][workload-identity-federation-enabled] GKE cluster. +2. Determine the Spaces-generated `controlPlaneID` of your control plane. When you deploy a `kind: controlplane` in a Space, the Spaces software deploys a set of pods in a new namespace following the format `mxp--system`. +```ini +kubectl get controlplane -o jsonpath='{.status.controlPlaneID}' +``` + +3. Create a GCP IAM service account with the [GCP CLI][gcp-cli-1]: +```ini +gcloud iam service-accounts create \ + --project= +``` + +4. Grant the IAM service account the role to access GCP Secret Manager: +```ini +SA_NAME= +IAM_SA_PROJECT_ID= +gcloud projects add-iam-policy-binding IAM_SA_PROJECT_ID \ + --member "serviceAccount:SA_NAME@IAM_SA_PROJECT_ID.iam.gserviceaccount.com" \ + --role roles/secretmanager.secretAccessor +``` + +5. When you enable the Shared Secrets feature, a service account gets created in each control plane for the External Secrets Operator. Apply a [GCP IAM policy binding][gcp-iam-policy-binding] to associate this service account with the desired GCP IAM role. +```ini +PROJECT_ID= +PROJECT_NUMBER= +CONTROLPLANE_ID= +gcloud projects add-iam-policy-binding projects/${PROJECT_ID} \ + --role "roles/iam.workloadIdentityUser" \ + --member=principal://iam.googleapis.com/projects/${PROJECT_NUMBER}/locations/global/workloadIdentityPools/${PROJECT_ID}.svc.id.goog/subject/ns/mxp-${CONTROLPLANE_ID}-system/sa/external-secrets-controller +``` + +6. Update your Spaces deployment to annotate the SharedSecrets service account with GCP IAM service account's identifier: +```ini +up space upgrade ... \ + --set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."iam\.gke\.io/gcp-service-account"="" +``` + +7. Create a `SharedSecretStore`. Replace `projectID` with your GCP Project ID: +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: gcp-sm +spec: + provider: + gcpsm: + projectID: + controlPlaneSelector: + names: + - + namespaceSelector: + names: + - default +``` + +:::tip +The example above maps a Shared Secret Store into a single namespace of a single control plane. Read [control plane selection][control-plane-selection-1] and [namespace selection][namespace-selection-2] to learn how to map into one or more namespaces of one or more control planes. +::: + +### Manage your secret distribution + +After you create your SharedSecretStore, you can define which secrets to +distribute using SharedExternalSecret: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedExternalSecret +metadata: + name: database-credentials + namespace: default +spec: + # Select the same control planes as your SharedSecretStore + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production + + externalSecretSpec: + refreshInterval: 1h + secretStoreRef: + name: aws-secrets # References the SharedSecretStore name + kind: ClusterSecretStore + target: + name: db-credentials + data: + - secretKey: username + remoteRef: + key: prod/database/credentials + property: username + - secretKey: password + remoteRef: + key: prod/database/credentials + property: password +``` + +This configuration: + +* Pulls database credentials from your external secret provider +* Creates secrets in all production control planes +* Refreshes the secrets every hour +* Creates a secret called `db-credentials` in each control plane + +When you create a SharedExternalSecret at the group level, Upbound's system +creates a template for the corresponding ClusterExternalSecrets in each selected +control plane. + +The example below simulates the ClusterExternalSecret that Upbound creates: + +```yaml +# Inside each matching control plane: +apiVersion: external-secrets.io/v1beta1 +kind: ClusterExternalSecret +metadata: + name: database-credentials +spec: + refreshInterval: 1h + secretStoreRef: + name: aws-secrets + kind: ClusterSecretStore + data: + - secretKey: username + remoteRef: + key: prod/database/credentials + property: username +``` + +The hierarchy in this configuration is: + +1. SharedExternalSecret (group level) defines what secrets to distribute +2. ClusterExternalSecret (control plane level) manages the distribution within + each control plane + +3. Kubernetes Secrets (namespace level) are created in specified namespaces + + +#### Control plane selection + +To configure which control planes in a group you want to project a SecretStore into, use the `spec.controlPlaneSelector` field. You can either use `labelSelectors` or the `names` of a control plane directly. A control plane matches if any of the label selectors match. + +This example matches all control planes in the group that have `environment: production` as a label: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + controlPlaneSelector: + labelSelectors: + - matchLabels: + environment: production +``` + +You can use the more complex `matchExpressions` to match labels based on an expression. This example matches control planes that have label `environment: production` or `environment: staging`: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + controlPlaneSelector: + labelSelectors: + - matchExpressions: + - { key: environment, operator: In, values: [production,staging] } +``` + +You can also specify the names of control planes directly: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + controlPlaneSelector: + names: + - controlplane-dev + - controlplane-staging + - controlplane-prod +``` + + +#### Namespace selection + +To configure which namespaces **within each matched control plane** to project the secret store into, use `spec.namespaceSelector` field. The projected secret store only appears in the namespaces matching the provided selector. You can either use `labelSelectors` or the `names` of namespaces directly. A control plane matches if any of the label selectors match. + +**For all control planes matched by** `spec.controlPlaneSelector`, This example matches all namespaces in each selected control plane that have `team: team1` as a label: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + namespaceSelector: + labelSelectors: + - matchLabels: + team: team1 +``` + +You can use the more complex `matchExpressions` to match labels based on an expression. This example matches namespaces that have label `team: team1` or `team: team2`: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + namespaceSelector: + labelSelectors: + - matchExpressions: + - { key: team, operator: In, values: [team1,team2] } +``` + +You can also specify the names of namespaces directly: + +```yaml +apiVersion: spaces.upbound.io/v1alpha1 +kind: SharedSecretStore +metadata: + name: my-secret-store +spec: + namespaceSelector: + names: + - team1-namespace + - team2-namespace +``` + +## Configure secrets directly in a control plane + + +The above explains using group-scoped resources to project secrets into multiple control planes. You can also use ESO API types directly in a control plane as you would in standalone Crossplane or Kubernetes. + + +See the [ESO documentation][eso-documentation] for a full guide on using the API types. + +## Best practices + +When you configure secrets management in your Upbound environment, keep the +following best practices in mind: + +**Use consistent labeling schemes** across your control planes for predictable +and manageable secret distribution. + +**Organize your secrets** in your external provider using a hierarchical +structure that mirrors your control plane organization. + +**Set appropriate refresh intervals** based on your security requires and the +nature of the secrets. + +**Use namespace selection sparingly** to limit secret distribution to only the +namespaces that need them. + +**Use separate tokens for each environment.** Keep them in distinct +SharedSecretStores. Users could bypass SharedExternalSecret selectors by +creating ClusterExternalSecrets directly in control planes. This grants access to all +secrets available to that token. + +**Document your secret management architecture**, including which control planes +should receive which secrets. + +[control-plane-selection]: #control-plane-selection +[namespace-selection]: #namespace-selection +[control-plane-selection-1]: #control-plane-selection +[namespace-selection-2]: #namespace-selection + +[external-secrets-operator-eso]: https://external-secrets.io +[workload-identity-enabled-aks-cluster]: https://learn.microsoft.com/en-us/azure/aks/workload-identity-deploy-cluster +[eso-provider-api]: https://external-secrets.io/latest/provider/google-secrets-manager/ +[gcp-cli]: https://cloud.google.com/iam/docs/creating-managing-service-account-keys +[workload-identity-federation-enabled]: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#enable_on_clusters_and_node_pools +[gcp-cli-1]: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#kubernetes-sa-to-iam +[gcp-iam-policy-binding]: https://cloud.google.com/sdk/gcloud/reference/iam/service-accounts/add-iam-policy-binding +[eso-documentation]: https://external-secrets.io/latest/introduction/getting-started/ diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/_category_.json b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/_category_.json new file mode 100644 index 000000000..5bf23bb0a --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/_category_.json @@ -0,0 +1,11 @@ +{ + "label": "Self-Hosted Spaces", + "position": 2, + "collapsed": true, + "customProps": { + "plan": "business" + } + +} + + diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/administer-features.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/administer-features.md new file mode 100644 index 000000000..ce878014e --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/administer-features.md @@ -0,0 +1,121 @@ +--- +title: Administer features +sidebar_position: 12 +description: Enable and disable features in Spaces +--- + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Feature flags and their availability may vary by version. + +For detailed feature availability across versions, see the . +::: + +This guide shows how to enable or disable features in your self-hosted Space. + +## Shared secrets + +**Status:** Preview + +This feature is enabled by default in Cloud Spaces. + +To enable this feature in a self-hosted Space, set +`features.alpha.sharedSecrets.enabled=true` when installing the Space: + +```bash +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "features.alpha.sharedSecrets.enabled=true" \ +``` + + +## Observability + +**Status:** GA +**Available from:** Spaces v1.13+ + +This feature is enabled by default in Cloud Spaces. + + + +To enable this feature in a self-hosted Space, set +`observability.enabled=true` (`features.alpha.observability.enabled=true` before `v1.14.0`) when installing the Space: + +```bash +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "observability.enabled=true" \ +``` + +The observability feature collects telemetry data from user-facing control +plane workloads like: + +* Crossplane +* Providers +* Functions + +Self-hosted Spaces users can add control plane system workloads such as the +`api-server`, `etcd` by setting the +`observability.collectors.includeSystemTelemetry` Helm flag to true. + +### Sensitive data + +To avoid exposing sensitive data in the `SharedTelemetryConfig` resource, use +Kubernetes secrets to store the sensitive data and reference the secret in the +`SharedTelemetryConfig` resource. + +Create the secret in the same namespace/group as the `SharedTelemetryConfig` +resource. The example below uses `kubectl create secret` to create a new secret: + +```bash +kubectl create secret generic sensitive -n \ + --from-literal=apiKey='YOUR_API_KEY' +``` + +Next, reference the secret in the `SharedTelemetryConfig` resource: + +```yaml +apiVersion: observability.spaces.upbound.io/v1alpha1 +kind: SharedTelemetryConfig +metadata: + name: newrelic +spec: + configPatchSecretRefs: + - name: sensitive + key: apiKey + path: exporters.otlphttp.headers.api-key + controlPlaneSelector: + labelSelectors: + - matchLabels: + org: foo + exporters: + otlphttp: + endpoint: https://otlp.nr-data.net + headers: + api-key: dummy # This value is replaced by the secret value, can be omitted + exportPipeline: + metrics: [otlphttp] + traces: [otlphttp] + logs: [otlphttp] +``` + +The `configPatchSecretRefs` field in the `spec` specifies the secret `name`, +`key`, and `path` values to inject the secret value in the +`SharedTelemetryConfig` resource. + +## Shared backups + +As of Spaces `v.12.0`, this feature is enabled by default. + +To disable in a self-hosted Space, pass the `features.alpha.sharedBackup.enabled=false` as a Helm chart value. +`--set "features.alpha.sharedBackup.enabled=false"` + +## Query API + +**Status:** Preview +The Query API is available in the Cloud Space offering and enabled by default. + +Query API is required for self-hosted deployments with connected Spaces. See the +related [documentation][documentation] +to enable this feature. + +[documentation]: /spaces/howtos/query-api/ diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/attach-detach.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/attach-detach.md new file mode 100644 index 000000000..1465921cf --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/attach-detach.md @@ -0,0 +1,198 @@ +--- +title: Connect or disconnect a Space +sidebar_position: 12 +description: Enable and connect self-hosted Spaces to the Upbound console +--- +:::info API Version Information +This guide applies to **Spaces v1.8.0 and later** (Self-Hosted only). Connection to Upbound console requires Query API and RBAC to be enabled. + +For version-specific features and requirements, see the . Query API setup details, see [Deploy Query API infrastructure](./query-api.md). +::: + +:::important +This feature is in preview. Starting in Spaces `v1.8.0` and later, you must +deploy and [enable the Query API][enable-the-query-api] and [enable Upbound +RBAC][enable-upbound-rbac] to connect a Space to Upbound. +::: + +[Upbound][upbound] allows you to connect self-hosted Spaces and enables a streamlined operations and debugging experience in your Console. + +## Usage + +### Connect + +Before you begin, make sure you have: + +- An existing Upbound [organization][organization] in Upbound SaaS. +- The `up` CLI installed and logged into your organization +- `kubectl` installed with the kubecontext of your self-hosted Space cluster. +- A `token.json` license, provided by your Upbound account representative. +- You enabled the [Query API][query-api] in the self-hosted Space. + +Create a new `UPBOUND_SPACE_NAME`. If you don't create a name, `up` automatically generates one for you: + +```ini +export UPBOUND_SPACE_NAME=your-self-hosted-space +``` + +#### With up CLI + +:::tip +The command tries to connect the Space to the org account context pointed at by your `up` CLI profile. Make sure you've logged into Upbound SaaS with `up login -a ` before trying to connect the Space. +::: + +Connect the Space to the Console: + +```bash +up space connect "${UPBOUND_SPACE_NAME}" +``` + +This command installs a Connect agent, creates a service account, and configures permissions in your Upbound cloud organization in the `upbound-system` namespace of your Space. + +#### With Helm + +Export your Upbound org account name to an environment variable called `UPBOUND_ORG_NAME`. You can see this value by running `up org list` after logging on to Upbound. + +```ini +export UPBOUND_ORG_NAME=your-org-name +``` + +Create a new robot token and export it to an environment variable called `UPBOUND_TOKEN`: + +```bash +up robot create "${UPBOUND_SPACE_NAME}" --description="Robot used for authenticating Space '${UPBOUND_SPACE_NAME}' with Upbound Connect" +export UPBOUND_TOKEN=$(up robot token create "$UPBOUND_SPACE_NAME" "$UPBOUND_SPACE_NAME" --file - | jq -r '.token') +``` + +:::note +Follow the [`jq` installation guide][jq-install] if your machine doesn't include +it by default. +::: + +Create a secret containing the robot token: + +```bash +kubectl create secret -n upbound-system generic connect-token --from-literal=token=${UPBOUND_TOKEN} +``` + +Specify your username and password for the helm OCI registry: + +```bash +jq -r .token $SPACES_TOKEN_PATH | helm registry login xpkg.upbound.io -u $(jq -r .accessId $SPACES_TOKEN_PATH) --password-stdin +``` + +In the same cluster where you installed the Spaces software, install the Upbound connect agent with your token secret. + +```bash +helm -n upbound-system upgrade --install agent \ + oci://xpkg.upbound.io/spaces-artifacts/agent \ + --version "0.0.0-441.g68777b9" \ + --set "image.repository=xpkg.upbound.io/spaces-artifacts/agent" \ + --set "registration.image.repository=xpkg.upbound.io/spaces-artifacts/register-init" \ + --set "imagePullSecrets[0].name=upbound-pull-secret" \ + --set "registration.enabled=true" \ + --set "space=${UPBOUND_SPACE_NAME}" \ + --set "organization=${UPBOUND_ORG_NAME}" \ + --set "tokenSecret=connect-token" \ + --wait +``` + + +#### View your Space in the Console + + +Go to the [Upbound Console][upbound-console], log in, and choose the newly connected Space from the Space selector dropdown. + +![A screenshot of the Upbound Console space selector dropdown](/img/attached-space.png) + +:::note +You can only connect a self-hosted Space to a single organization at a time. +::: + +### Disconnect + +#### With up CLI + +To disconnect a self-hosted Space or a deleted self-hosted Space, run the following command: + +```bash +up space disconnect "${UPBOUND_SPACE_NAME}" +``` + +If the Space still exists, this command uninstalls the Connect agent and deletes the associated service account and permissions. + +#### With Helm + +To disconnect a self-hosted Space or a deleted self-hosted Space, run the following command: + +```bash +helm delete -n upbound-system agent +``` + +Clean up the robot token you created for this self-hosted Space: + +```bash +up robot delete "${UPBOUND_SPACE_NAME}" --force +``` + +## Security model + +### Architecture + +![An architectural diagram of a self-hosted Space attached to Upbound](/img/console-attach-architecture.jpg) + +:::note +This diagram illustrates a self-hosted Space running in AWS connected to the global Upbound Console. The same model applies to a Space running in AKS, GKE, or other Kubernetes environments. +::: + +### Data path + +Upbound uses a Pub/Sub model over TLS to communicate between Upbound's global +console and your self-hosted Space. Self-hosted Spaces establishes a secure +connection with `connect.upbound.io`. `api.upbound.io`, and `auth.upbound.io` and subscribes to an +endpoint. + +:::important +Add `connect.upbound.io`, `api.upbound.io`, and `auth.upbound.io` to your organization's list of +allowed endpoints. +::: + +The +Upbound Console communicates to the Space through that endpoint. The data flow +is: + +1. Users sign in to the Upbound Console, redirecting to authenticate with an organization's configured Identity Provider via SSO. +2. Once authenticated, actions in the Console, like listing control planes or specific resource types from a control plane. These requests post as messages to the Upbound Connect service. +3. A user's self-hosted Space polls the Upbound Connect service periodically for new messages, verifies the authenticity of the message, and fulfills the request contained. +4. A user's self-hosted Space returns the results of the request to the Upbound Connect service and the Console renders the results in the user's browser session. + +**Upbound never stores data originated from a self-hosted Space.** The data is transient and only exposed in the user's browser session. The Console needs this data to render your resources and control planes in the UI. + +### Data transmitted + +Users interact with the Upbound Console to generate request queries to the Upbound Connect Service while exploring, managing, or debugging a self-hosted Space. These requests send data back to the user's browser session in the Console, including: + +* Metadata for the Space +* Metadata for control planes in the state +* Configuration manifests for various resource types within your Space: Crossplane managed resources, composite resources, composite resource claims, Upbound shared secrets, Upbound shared backups, Crossplane providers, ProviderConfigs, Configurations, and Crossplane Composite Functions. + +:::important +This data only concerns resource configuration. The data _inside_ the managed +resource in your Space isn't visible at any point. +::: + +**Upbound can't see your data.** Upbound doesn't have access to session-based data rendered for your users in the Upbound Console. Upbound has no information about your self-hosted Space, other than that you've connected a self-hosted Space. + +### Threat vectors + +Only users with editor or administrative permissions can make changes using the Console like creating or deleting control planes or groups. + + +[enable-the-query-api]: /spaces/howtos/self-hosted/query-api +[enable-upbound-rbac]: /manuals/platform/concepts/authorization/upbound-rbac +[upbound]: /manuals/console/upbound-console +[organization]: /manuals/platform/concepts/identity-management/organizations +[query-api]: /spaces/howtos/self-hosted/query-api +[jq-install]: https://jqlang.org/download/ + +[upbound-console]: https://console.upbound.io diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/billing.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/billing.md new file mode 100644 index 000000000..145ff9f03 --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/billing.md @@ -0,0 +1,307 @@ +--- +title: Self-Hosted Space Billing +sidebar_position: 50 +description: A guide for how billing works in an Upbound Space +--- + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Billing models may evolve between versions; see Capacity Licensing for alternative models. + +For version-specific features and capacity-based licensing details, see the . reference specifications, see [Capacity Licensing](/spaces/howtos/self-hosted/capacity-licensing). +::: + +Spaces are a self-hosting feature of Upbound's [flagship product][flagship-product] for platform teams to deploy control planes in their self-managed environments. You can install Spaces into any Kubernetes cluster in your own cloud account, on-premises data center, or on the edge. The pricing usage-based and requires an Upbound account and subscription. The billing unit is a `Loop`. + + +:::info +This guide describes the traditional usage-based billing model using object storage. disconnected or air-gapped environments, consider [Capacity Licensing](/spaces/howtos/self-hosted/capacity-licensing), which provides a simpler fixed-capacity model with local usage tracking. +::: + +## Billing details + +Spaces **aren't connected** to Upbound's global service. To enable proper billing, the Spaces software ships a controller whose responsibility is to collect billing data from your Spaces deployment. The collection and storage of your billing data happens expressly locally within your environment; no data is automatically emitted back to Upbound's global service. This data gets written to object storage of your choice. AWS, Azure, and GCP are currently supported. The Spaces software exports billing usage software every ~15 seconds. + +Spaces customers must periodically provide the billing data to Upbound. Contact your Upbound sales representative to learn more. + + + +## AWS S3 + + + +Configure billing to write to an S3 bucket by providing the following values at install-time. Create an S3 bucket if you don't already have one. + +### IAM policy + +You must create an IAM policy and attach it to the IAM user (for static credentials) or IAM role (for assumed +roles). + +The policy example below enables the necessary S3 permissions: + +```json +{ + "Sid":"EnableS3Permissions", + "Effect":"Allow", + "Action": [ + "s3:PutObject", + "s3:GetObject", + "s3:ListBucket", + "s3:DeleteObject" + ], + "Resource": [ + "arn:aws:s3:::your-bucket-name/*", + "arn:aws:s3:::your-bucket-name" + ] +}, +{ + "Sid": "ListBuckets", + "Effect": "Allow", + "Action": "s3:ListAllMyBuckets", + "Resource": "*" +} +``` + +### Authentication with static credentials + +In your Spaces install cluster, create a secret in the `upbound-system` +namespace. This secret must contain keys `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`. + +```bash +kubectl create secret generic billing-credentials -n upbound-system \ + --from-literal=AWS_ACCESS_KEY_ID= \ + --from-literal=AWS_SECRET_ACCESS_KEY= +``` + +Install the Space software, providing the billing details to the other required values. + + + + + + +```bash {hl_lines="2-6"} +helm -n upbound-system upgrade --install spaces ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=aws" \ + --set "billing.storage.aws.region=" \ + --set "billing.storage.aws.bucket=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + +```bash {hl_lines="2-6"} +up space init ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=aws" \ + --set "billing.storage.aws.region=" \ + --set "billing.storage.aws.bucket=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + + + +### Authentication with an IAM role + + +To use short-lived credentials with an assumed IAM role, create an IAM role with +established trust to the `vector`-serviceaccount in all `mxp-*-system` +namespaces. + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::12345678912:oidc-provider/oidc.eks.eu-west-2.amazonaws.com/id/YOUROIDCPROVIDERID" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringLike": { + "oidc.eks.eu-west-2.amazonaws.com/id/YOUROIDCPROVIDERID:sub": "system:serviceaccount:mxp-*-system:vector" + } + } + } + ] +} +``` + +For more information about workload identities, review the [Workload-identity +Configuration documentation][workload-identity-configuration-documentation] + + + + + + +```bash {hl_lines="2-7"} +helm -n upbound-system upgrade --install spaces ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=aws" \ + --set "billing.storage.aws.region=" \ + --set "billing.storage.aws.bucket=" \ + --set "billing.storage.secretRef.name=" \ + --set "controlPlanes.vector.serviceAccount.customAnnotations[eks.amazonaws.com/role-arn]=" + ... +``` + + + + + +```bash {hl_lines="2-7"} +up space init ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=aws" \ + --set "billing.storage.aws.region=" \ + --set "billing.storage.aws.bucket=" \ + --set "billing.storage.secretRef.name=" \ + --set "controlPlanes.vector.serviceAccount.customAnnotations[eks.amazonaws.com/role-arn]=" + ... +``` + + + + + + +*Note*: You must set `billing.storage.secretRef.name` to an empty string when using an assumed role. + + +## Azure blob storage + +Configure billing to write to a blob in Azure by providing the following values at install-time. Create a storage account and container if you don't already have one. + +Then, on the cluster where you installed the Spaces software, create a secret in `upbound-system`. This secret must contain keys `AZURE_TENANT_ID`, `AZURE_CLIENT_ID`, and `AZURE_CLIENT_SECRET`. Make sure to replace the values with details generated from your Azure account. + +```bash +kubectl create secret generic billing-credentials -n upbound-system \ + --from-literal=AZURE_TENANT_ID= \ + --from-literal=AZURE_CLIENT_ID= \ + --from-literal=AZURE_CLIENT_SECRET= +``` + +Install the Space software, providing the billing details to the other required values. + + + + + + +```bash {hl_lines="2-6"} +helm -n upbound-system upgrade --install spaces ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=azure" \ + --set "billing.storage.azure.storageAccount=" \ + --set "billing.storage.azure.container=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + +```bash {hl_lines="2-6"} +up space init ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=azure" \ + --set "billing.storage.azure.storageAccount=" \ + --set "billing.storage.azure.container=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + + + +## GCP Cloud Storage Buckets + + +Configure billing to write to a Cloud Storage bucket in GCP by providing the following values at install-time. Create a bucket if you don't already have one. + +Then, on the cluster where you installed the Spaces software, create a secret in `upbound-system`. This secret must contain the key `google_application_credentials`. Make sure to replace the value with a GCP service account key JSON generated from your GCP account. + +```bash +kubectl create secret generic billing-credentials -n upbound-system \ + --from-literal=google_application_credentials= +``` + +Install the Space software, providing the billing details to the other required values. + + + + + + +```bash {hl_lines="2-5"} +helm -n upbound-system upgrade --install spaces ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=gcp" \ + --set "billing.storage.gcp.bucket=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + +```bash {hl_lines="2-5"} +up space init ... \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=gcp" \ + --set "billing.storage.gcp.bucket=" \ + --set "billing.storage.secretRef.name=billing-credentials" + ... +``` + + + + + +## Export billing data to send to Upbound + +To prepare the billing data to send to Upbound, do the following: + +Ensure the current context of your kubeconfig points at the Spaces cluster. Then, run the [export][export] command. + + +:::important +Your current CLI must have read access to the bucket to run this command. +::: + + +The example below exports billing data stored in AWS: + +```bash +up space billing export --provider=aws \ + --bucket=spaces-billing-bucket \ + --account=your-upbound-org \ + --billing-month=2024-07 \ + --force-incomplete +``` + +The command creates a billing report that's zipped up in your current working directory. Send the output to your Upbound sales representative. + + +You can find full instructions and command options in the up [CLI reference][cli-reference] docs. + + +[export]: /reference/cli-reference +[cli-reference]: /reference/cli-reference +[flagship-product]: https://www.upbound.io/platform +[workload-identity-configuration-documentation]: https://docs.upbound.io/operate/accounts/authentication/oidc-configuration diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/capacity-licensing.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/capacity-licensing.md new file mode 100644 index 000000000..a1dc6c101 --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/capacity-licensing.md @@ -0,0 +1,591 @@ +--- +title: Capacity Licensing +sidebar_position: 60 +description: A guide for capacity-based licensing in self-hosted Spaces +plan: "enterprise" +--- + + + + + +This guide explains how to configure and monitor capacity-based licensing in +self-hosted Upbound Spaces. Capacity licensing provides a simplified billing +model for disconnected or air-gapped environments where automated usage +reporting isn't possible. + +:::info +Spaces `v1.15` and later support Capacity Licensing as an +alternative to the traditional usage-based billing model described in the +[Self-Hosted Space Billing][space-billing] guide. +::: + +## Overview + +Capacity licensing allows organizations to purchase a fixed capacity of +resources upfront. The Spaces software tracks usage locally and provides +visibility into consumption against your purchased capacity, all without +requiring external connectivity to Upbound's services. + +### Key concepts + +- **Resource Hours**: The primary billing unit representing all resources + managed by Crossplane over time. This includes managed resources, + composites (XRs), claims (XRCs), and all composed resources - essentially + everything Crossplane manages. The system aggregates resource counts over each + hour using trapezoidal integration to accurately account for changes in + resource count throughout the hour. +- **Operations**: The number of Operations invoked by Crossplane. +- **License Capacity**: The total amount of resource hours and operations included in your license. +- **Usage Tracking**: Continuous monitoring of consumption with real-time utilization percentages. + +### How it works + +1. Upbound provides you with a license file containing your purchased capacity +2. You configure a `SpaceLicense` in your Spaces cluster +3. The metering system automatically: + - Collects measurements from all control planes every minute + - Aggregates usage data into hourly intervals + - Stores usage data in a local PostgreSQL database + - Updates the `SpaceLicense` status with current consumption + +## Prerequisites + +### PostgreSQL database + +Capacity licensing requires a PostgreSQL database to store usage measurements. You can use: + +- An existing PostgreSQL instance +- A managed PostgreSQL service (AWS RDS, Azure Database, Google Cloud SQL) +- A PostgreSQL instance deployed in your cluster + +The database must be: + +- Accessible from the Spaces cluster +- Configured with a dedicated database and credentials + +#### Example: Deploy PostgreSQL with CloudNativePG + +If you don't have an existing PostgreSQL instance, you can deploy one in your +cluster using [CloudNativePG] (CNPG). CNPG is a Kubernetes operator that +manages PostgreSQL clusters. + +1. Install the CloudNativePG operator: + +```bash +kubectl apply -f https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.24/releases/cnpg-1.24.1.yaml +``` + +2. Create a PostgreSQL cluster for metering: + +```yaml +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: metering-postgres + namespace: upbound-system +spec: + instances: 1 + imageName: ghcr.io/cloudnative-pg/postgresql:16 + bootstrap: + initdb: + database: metering + owner: metering + postInitApplicationSQL: + - ALTER ROLE "metering" CREATEROLE; + storage: + size: 5Gi + # Optional: Configure resources for production use + # resources: + # requests: + # memory: "512Mi" + # cpu: "500m" + # limits: + # memory: "1Gi" + # cpu: "1000m" +--- +apiVersion: v1 +kind: Secret +metadata: + name: metering-postgres-app + namespace: upbound-system + labels: + cnpg.io/reload: "true" +stringData: + username: metering + password: "your-secure-password-here" +type: kubernetes.io/basic-auth +``` + +```bash +kubectl apply -f metering-postgres.yaml +``` + +3. Wait for the cluster to be ready: + +```bash +kubectl wait --for=condition=ready cluster/metering-postgres -n upbound-system --timeout=5m +``` + +4. You can access the PostgreSQL cluster at `metering-postgres-rw.upbound-system.svc.cluster.local:5432`. + +:::tip +For production deployments, consider: +- Increasing `instances` to 3 for high availability +- Configuring [backups] to object storage +- Setting appropriate resource requests and limits +- Using a dedicated storage class with good I/O performance +::: + +### License file + +Contact your Upbound sales representative to obtain a license file for your organization. The license file contains: +- Your unique license ID +- Purchased capacity (resource hours and operations) +- License validity period +- Any usage restrictions (such as cluster UUID pinning) + +## Configuration + +### Step 1: Create database credentials secret + +Create a Kubernetes secret containing your PostgreSQL password using the pgpass format: + +```bash +# Create a pgpass file with format: hostname:port:database:username:password +# Note: The database name and username must be 'metering' +# For CNPG clusters, use the read-write service endpoint: -rw..svc.cluster.local +echo "metering-postgres-rw.upbound-system.svc.cluster.local:5432:metering:metering:your-secure-password-here" > pgpass + +# Create the secret +kubectl create secret generic metering-postgres-credentials \ + -n upbound-system \ + --from-file=pgpass=pgpass + +# Clean up the pgpass file +rm pgpass +``` + +The secret must contain a single key: +- **`pgpass`**: PostgreSQL password file in the format `hostname:port:metering:metering:password` + +:::note +The database name and username are fixed as `metering`. Ensure your PostgreSQL instance has a database named `metering` with a user `metering` that has appropriate permissions. + +If you deployed PostgreSQL using CNPG as shown in the example above, the password should match what you set in the `metering-postgres-app` secret. +::: + +:::tip +For production environments, consider using external secret management solutions: +- [External Secrets Operator][eso] +- Cloud-specific secret managers (AWS Secrets Manager, Azure Key Vault, GCP Secret Manager) +::: + +### Step 2: Enable metering in Spaces + +Enable the metering feature when installing or upgrading Spaces: + + + + + +```bash {hl_lines="2-7"} +helm -n upbound-system upgrade --install spaces ... \ + --set "metering.enabled=true" \ + --set "metering.storage.postgres.connection.url=metering-postgres-rw.upbound-system.svc.cluster.local:5432" \ + --set "metering.storage.postgres.connection.credentials.secret.name=metering-postgres-credentials" \ + --set "metering.interval=1m" \ + --set "metering.workerCount=10" \ + --set "metering.aggregationInterval=1h" \ + --set "metering.measurementRetentionDays=30" + ... +``` + + + + + +```bash {hl_lines="2-7"} +up space init ... \ + --set "metering.enabled=true" \ + --set "metering.storage.postgres.connection.url=metering-postgres-rw.upbound-system.svc.cluster.local:5432" \ + --set "metering.storage.postgres.connection.credentials.secret.name=metering-postgres-credentials" \ + --set "metering.interval=1m" \ + --set "metering.workerCount=10" \ + --set "metering.aggregationInterval=1h" \ + --set "metering.measurementRetentionDays=30" + ... +``` + + + + + +#### Configuration options + +| Option | Default | Description | +|--------|---------|-------------| +| `metering.enabled` | `false` | Enable the metering feature | +| `metering.storage.postgres.connection.url` | - | PostgreSQL host and port (format: `host:port`, required) | +| `metering.storage.postgres.connection.credentials.secret.name` | - | Name of the secret containing PostgreSQL credentials (required) | +| `metering.storage.postgres.connection.sslmode` | `require` | SSL mode for PostgreSQL connection (`disable`, `allow`, `prefer`, `require`, `verify-ca`, `verify-full`) | +| `metering.storage.postgres.connection.ca.name` | - | Name of the secret containing CA certificate for TLS connections (optional) | +| `metering.interval` | `1m` | How often to collect measurements from control planes | +| `metering.workerCount` | `10` | Number of parallel workers for measurement collection | +| `metering.aggregationInterval` | `1h` | How often to aggregate measurements into hourly usage data | +| `metering.measurementRetentionDays` | `30` | Days to retain raw measurements (0 = indefinite) | + + +#### Database sizing and retention + +The metering system uses two PostgreSQL tables to track usage: + +**Raw measurements table** (`measurements`): +- Stores point-in-time snapshots collected every measurement interval (default: 1 minute) +- One row per control plane per interval +- Affected by the `measurementRetentionDays` setting +- Used for detailed auditing and troubleshooting + +**Aggregated usage table** (`hourly_usage`): +- Stores hourly aggregated resource hours and operations per license +- One row per hour per license +- Never deleted (required for accurate license tracking) +- Grows much slower than raw measurements + +##### Storage sizing guidelines + +Estimate your PostgreSQL storage needs based on these factors: + + +| Deployment Size | Control Planes | Measurement Interval | Retention Days | Raw Measurements | Indexes & Overhead | Total Storage | +|----------------|----------------|---------------------|----------------|------------------|-------------------|---------------| +| Small | 10 | 1m | 30 | ~85 MB | ~40 MB | **~125 MB** | +| Medium | 50 | 1m | 30 | ~430 MB | ~215 MB | **~645 MB** | +| Large | 200 | 1m | 30 | ~1.7 GB | ~850 MB | **~2.5 GB** | +| Large (90-day retention) | 200 | 1m | 90 | ~5.2 GB | ~2.6 GB | **~7.8 GB** | + +The aggregated hourly usage table adds minimal overhead (~50 KB per year per license). + +**Formula for custom calculations**: +``` +Daily measurements per control plane = (24 * 60) / interval_minutes +Total rows = control_planes × daily_measurements × retention_days +Storage (MB) ≈ (total_rows × 200 bytes) / 1,048,576 × 1.5 (with indexes) +``` + +##### Retention behavior + +The `measurementRetentionDays` setting controls retention of raw measurement data: + +- **Default: 30 days** - Balances audit capabilities with storage efficiency +- **Set to 0**: Disables cleanup, retains all raw measurements indefinitely +- **Cleanup runs**: Every aggregation interval (default: hourly) +- **What's kept forever**: Aggregated hourly usage data (needed for license tracking) +- **What's cleaned up**: Raw point-in-time measurements older than retention period + +**Recommendations**: +- **30 days**: For most troubleshooting and short-term auditing +- **60 to 90 days**: For environments requiring extended audit trails +- **Unlimited (0)**: Only for environments with ample storage or specific compliance requirements + +:::note +Increasing retention period linearly increases storage requirements for raw measurements. The aggregated hourly data is always retained regardless of this setting. +::: + +### Step 3: Apply your license + +Use the `up` CLI to apply your license file: + +```bash +up space license apply /path/to/license.json +``` + +This command automatically: +- Creates a secret containing your license file in the `upbound-system` namespace +- Creates the `SpaceLicense` resource configured to use that secret + +:::tip +You can specify a different namespace for the license secret using the `--namespace` flag: +```bash +up space license apply /path/to/license.json --namespace my-namespace +``` +::: + +
+Alternative: Manual kubectl approach + +If you prefer not to use the `up` CLI, you can manually create the resources: + +1. Create the license secret: + +```bash +kubectl create secret generic space-license \ + -n upbound-system \ + --from-file=license.json=/path/to/license.json +``` + +2. Create the SpaceLicense resource: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceLicense +metadata: + name: space +spec: + secretRef: + name: space-license + namespace: upbound-system + key: license.json +``` + +```bash +kubectl apply -f spacelicense.yaml +``` + +:::important +You **must** name the `SpaceLicense` resource `space`. This resource is a singleton and only one can exist in the cluster. +::: + +
+ +## Monitoring usage + +### Check license status + +Use the `up` CLI to view your license details and current usage: + +```bash +up space license show +``` + +Example output: + +``` +Spaces License Status: Valid (License is valid) + +Created: 2024-01-01T00:00:00Z +Expires: 2025-01-01T00:00:00Z + +Plan: enterprise + +Resource Hour Limit: 1000000 +Operation Limit: 500000 + +Enabled Features: +- spaces +- query-api +- backup-restore +``` + +The output shows: +- License validity status and any validation messages +- Creation and expiration dates +- Your commercial plan tier +- Capacity limits for resource hours and operations +- Enabled features in your license +- Any restrictions (such as cluster UUID pinning) + +
+Alternative: View detailed status with kubectl + +For detailed information including usage statistics, use kubectl: + +```bash +kubectl get spacelicense space -o yaml +``` + +Example output showing usage data: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceLicense +metadata: + name: space +spec: + secretRef: + name: space-license + namespace: upbound-system +status: + conditions: + - type: LicenseValid + status: "True" + reason: Valid + message: "License is valid" + id: "lic_abc123xyz" + plan: "enterprise" + capacity: + resourceHours: 1000000 + operations: 500000 + usage: + resourceHours: 245680 + operations: 12543 + resourceHoursUtilization: "24.57%" + operationsUtilization: "2.51%" + firstMeasurement: "2024-01-15T10:00:00Z" + lastMeasurement: "2024-02-10T14:30:00Z" + createdAt: "2024-01-01T00:00:00Z" + expiresAt: "2025-01-01T00:00:00Z" + enabledFeatures: + - "spaces" + - "query-api" + - "backup-restore" +``` + +
+ +### Understanding the status fields + +| Field | Description | +|-------|-------------| +| `status.id` | Unique license identifier | +| `status.plan` | Your commercial plan (community, standard, enterprise) | +| `status.capacity` | Total capacity included in your license | +| `status.usage.resourceHours` | Total resource hours consumed | +| `status.usage.operations` | Total operations performed | +| `status.usage.resourceHoursUtilization` | Percentage of resource hours capacity used | +| `status.usage.operationsUtilization` | Percentage of operations capacity used | +| `status.usage.firstMeasurement` | When usage tracking began | +| `status.usage.lastMeasurement` | Most recent usage update | +| `status.expiresAt` | License expiration date | + +### Monitor with kubectl + +Watch your license utilization in real-time: + +```bash +kubectl get spacelicense space -w +``` + +Short output format: + +``` +NAME PLAN VALID REASON AGE +space enterprise True Valid 45d +``` + +## Managing licenses + +### Updating your license + +To update your license with a new license file (for example, when renewing or upgrading capacity), apply the new license: + +```bash +up space license apply /path/to/new-license.json +``` + +This command replaces the existing license secret and updates the SpaceLicense resource. + +### Removing a license + +To remove a license: + +```bash +up space license remove +``` + +This command: +- Prompts for confirmation before proceeding +- Removes the license secret + +To skip the confirmation prompt, use the `--force` flag: + +```bash +up space license remove --force +``` + +## Troubleshooting + +### License not updating + +If the license status doesn't update with usage data: + +1. **Check metering controller logs**: + ```bash + kubectl logs -n upbound-system deployment/spaces-controller -c metering + ``` + +2**Check if the system captures your measurements**: + + ```bash + # Connect to PostgreSQL and query the measurements table + kubectl exec -it -- psql -U -d \ + -c "SELECT COUNT(*) FROM measurements WHERE timestamp > NOW() - INTERVAL '1 hour';" + ``` + +### High utilization warnings + +If you're approaching your capacity limits: + +1. **Review resource usage** by control plane to identify high consumers +2. **Contact your Upbound sales representative** to discuss capacity expansion +3. **Optimize managed resources** by cleaning up unused resources + +### License validation failures + +If your license shows as invalid: + +1. **Check expiration date**: `kubectl get spacelicense space -o jsonpath='{.status.expiresAt}'` +2. **Verify license file integrity**: Ensure the secret contains valid JSON +3. **Check for cluster UUID restrictions**: Upbound pins some licenses to + specific clusters +4. **Review controller logs** for detailed error messages + +## Differences from traditional billing + +### Capacity licensing + +- ✅ Works in disconnected environments +- ✅ Provides real-time usage visibility +- ✅ No manual data export required +- ✅ Requires PostgreSQL database +- ✅ Fixed capacity model + +### Traditional billing (object storage) + + +- ❌ Requires periodic manual export +- ❌ Delayed visibility into usage +- ✅ Works with S3/Azure Blob/GCS +- ❌ Requires cloud storage access +- ✅ Pay-as-you-go model + +## Best practices + +### Database management + +1. **Regular backups**: Back up your metering database regularly to preserve usage history +2. **Monitor database size**: Set appropriate retention periods to manage storage growth +3. **Use managed databases**: Consider managed PostgreSQL services for production +4. **Connection pooling**: Use connection pooling for better performance at scale + +### License management + +1. **Monitor utilization**: Set up alerts before reaching 80% capacity +2. **Plan renewals early**: Start renewal discussions 60 days before expiration +3. **Track grace periods**: Note the `gracePeriodEndsAt` date for planning +4. **Secure license files**: Treat license files as sensitive credentials + +### Operational monitoring + +1. **Set up dashboards**: Create Grafana dashboards for usage trends +2. **Enable alerting**: Configure alerts for high utilization and expiration +3. **Regular audits**: Periodically review usage patterns across control planes +4. **Capacity planning**: Use historical data to predict future capacity needs + +## Next steps + +- Learn about [Observability] to monitor your Spaces deployment +- Explore [Backup and Restore][backup-restore] to protect your control plane data +- Review [Self-Hosted Space Billing][space-billing] for the traditional billing model +- Contact [Upbound Sales][sales] to discuss capacity licensing options + + +[space-billing]: /spaces/howtos/self-hosted/billing +[CloudNativePG]: https://cloudnative-pg.io/ +[backups]: https://cloudnative-pg.io/documentation/current/backup_recovery/ +[backup-restore]: /spaces/howtos/backup-and-restore +[sales]: https://www.upbound.io/contact +[eso]: https://external-secrets.io/ +[Observability]: /spaces/howtos/observability + + diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/certs.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/certs.md new file mode 100644 index 000000000..e517c250e --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/certs.md @@ -0,0 +1,274 @@ +--- +title: Istio Ingress Gateway With Custom Certificates +sidebar_position: 20 +description: Install self hosted spaces using istio ingress gateway in a Kind cluster +--- + +:::important +Prerequisites + +- Spaces Token available in a file +- `docker login xpkg.upbound.io -u -p ` +- [`istioctl`][istioctl] installation +- `jq` installation +::: + +This document describes the installation of a self hosted space on an example `kind` +cluster along with Istio Ingress Gateway and certificates. The service mesh and certificates +installation is transferable to self hosted spaces in arbitrary clouds. + +## Create a kind cluster + +```shell +cat < +## Install Istio + + + +:::important +This is an example and not recommended for use in production. +::: + + +1. Create the `istio-values.yaml` file + +```shell +cat > istio-values.yaml << 'EOF' +apiVersion: install.istio.io/v1alpha1 +kind: IstioOperator +spec: + hub: gcr.io/istio-release + components: + ingressGateways: + - enabled: true + name: istio-ingressgateway + k8s: + nodeSelector: + ingress-ready: "true" + overlays: + - apiVersion: apps/v1 + kind: Deployment + name: istio-ingressgateway + patches: + - path: spec.template.spec.containers.[name:istio-proxy].ports + value: + - containerPort: 8080 + hostPort: 80 + - containerPort: 8443 + hostPort: 443 +EOF +``` + +2. Install istio via `istioctl` + +```shell +istioctl install -f istio-values.yaml +``` + +## Create a self-signed Certificate via cert-manager + +:::important +This Certificate manifest creates a self-signed certificate for a proof of concept +environment and isn't recommended for production use cases. +::: + +1. Create the upbound-system namespace + +```shell +kubectl create namespace upbound-system +``` + +2. Create a self-signed certificate + +```shell +cat < +## Create an Istio Gateway and VirtualService + + + + +Configure an Istio Gateway and VirtualService to use TLS passthrough. + + +```shell +cat < spaces-values.yaml << 'EOF' +# Configure spaces-router to use the TLS secret created by cert-manager. +externalTLS: + tlsSecret: + name: example-tls-secret + caBundleSecret: + name: example-tls-secret + key: ca.crt +ingress: + provision: false + # Allow Istio Ingress Gateway to communicate to the spaces-router + namespaceLabels: + kubernetes.io/metadata.name: istio-system + podLabels: + app: istio-ingressgateway + istio: ingressgateway +EOF +``` + +2. Set the required environment variables + +```shell +# Update these according to your account/token file +export SPACES_TOKEN_PATH= +export UPBOUND_ACCOUNT= +# Replace SPACES_ROUTER_HOST with your Spaces ingress hostname +export SPACES_ROUTER_HOST="proxy.upbound-127.0.0.1.nip.io" +export SPACES_VERSION="1.14.1" +``` + +3. Create an image pull secret for Spaces + +```shell +kubectl -n upbound-system create secret docker-registry upbound-pull-secret \ + --docker-server=https://xpkg.upbound.io \ + --docker-username="$(jq -r .accessId $SPACES_TOKEN_PATH)" \ + --docker-password="$(jq -r .token $SPACES_TOKEN_PATH)" +``` + +4. Install the Spaces helm chart + +```shell +# Login to xpkg.upbound.io +jq -r .token $SPACES_TOKEN_PATH | helm registry login xpkg.upbound.io -u $(jq -r .accessId $SPACES_TOKEN_PATH) --password-stdin + +# Install spaces helm chart +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "authentication.hubIdentities=true" \ + --set "authorization.hubRBAC=true" \ + --wait -f spaces-values.yaml +``` + +## Validate the installation + +Successful access of the `up` command to interact with your self hosted space validates the +certificate installation. + +- `up ctx .` + +You can also issue control plane creation, list and deletion commands. + +- `up ctp create cert-test` +- `up ctp list` +- `up ctx disconnected/kind-kind/default/cert-test && kubectl get namespace` +- `up ctp delete cert-test` + +:::note +If `up` can't connect to your control plane, follow [this guide to create a new profile][up-profile]. +::: + +## Troubleshooting + +Examine your certificate with `openssl`: + +```shell +openssl s_client -connect proxy.upbound-127.0.0.1.nip.io:443 -showcerts +``` + +[istioctl]: https://istio.io/latest/docs/ops/diagnostic-tools/istioctl/ +[up-profile]: /manuals/cli/howtos/profile-config/ diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/configure-ha.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/configure-ha.md new file mode 100644 index 000000000..ddf36c55e --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/configure-ha.md @@ -0,0 +1,450 @@ +--- +title: Production Scaling and High Availability +description: Configure your Self-Hosted Space for production +sidebar_position: 5 +--- + + + +This guide explains how to configure an existing Upbound Space deployment for +production operation at scale. + +Use this guide when you're ready to deploy production scaling, high availability, +and monitoring in your Space. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. + +For API specifications on ControlPlane resources and configurations, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +## Prerequisites + +Before you begin scaling your Spaces deployment, make sure you have: + + +* A working Space deployment +* Cluster administrator access +* An understanding of load patterns and growth in your organization +* A familiarity with node affinity, tainting, and Horizontal Pod Autoscaling + (HPA) + + +## Production scaling strategy + + +In this guide, you will: + + + +* Create dedicated node pools for different component types +* Configure high-availability to ensure there are no single points of failure +* Set dynamic scaling for variable workloads +* Optimize your storage and component operations +* Monitor your deployment health and performance + +## Spaces architecture + +The basic Spaces workflow follows the pattern below: + + +![Spaces workflow][spaces-workflow] + +## Node architecture + +You can mitigate resource contention and improve reliability by separating system +components into dedicated node pools. + +### `etcd` dedicated nodes + +`etcd` performance directly impacts your entire Space, so isolate it for +consistent performance. + +1. Create a dedicated `etcd` node pool + + **Requirements:** + - **Minimum**: 3 nodes for HA + - **Instance type**: General purpose with high network throughput/low latency + - **Storage**: High performance storage (`etcd` is I/O sensitive) + +2. Taint `etcd` nodes to reserve them + + ```bash + kubectl taint nodes target=etcd:NoSchedule + ``` + +3. Configure `etcd` storage + + `etcd` is sensitive to storage I/O performance. Review the [`etcd` scaling + documentation][scaling] + for specific storage guidance. + +### API server dedicated nodes + +API servers handle all control plane requests and should run on dedicated +infrastructure. + +1. Create dedicated API server nodes + + **Requirements:** + - **Minimum**: 2 nodes for HA + - **Instance type**: Compute-optimized, memory-optimized, or general-purpose + - **Scaling**: Scale vertically based on API server load patterns + +2. Taint API server nodes + + ```bash + kubectl taint nodes target=apiserver:NoSchedule + ``` + +### Configure cluster autoscaling + +Enable cluster autoscaling for all node pools. + +For AWS EKS clusters, Upbound recommends using [`Karpenter`][karpenter] for +improved bin-packing and instance type selection. + +For GCP GKE clusters, follow the [GKE autoscaling][gke-autoscaling] guide. + +For Azure AKS clusters, follow the [AKS autoscaling][aks-autoscaling] guide. + + +## Configure high availability + +Ensure control plane components can survive node and zone failures. + +### Enable high availability mode + +1. Configure control planes for high availability + + ```yaml + controlPlanes: + ha: + enabled: true + ``` + + This configures control plane pods to run with multiple replicas and + associated pod disruption budgets. + +### Configure component distribution + +1. Set up API server pod distribution + + ```yaml + controlPlanes: + vcluster: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: target + operator: In + values: + - apiserver + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vcluster + topologyKey: "kubernetes.io/hostname" + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vcluster + topologyKey: topology.kubernetes.io/zone + weight: 100 + ``` + +2. Configure `etcd` pod distribution + + ```yaml + controlPlanes: + etcd: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: target + operator: In + values: + - etcd + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vcluster-etcd + topologyKey: "kubernetes.io/hostname" + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vcluster-etcd + topologyKey: topology.kubernetes.io/zone + weight: 100 + ``` + +### Configure tolerations + +Allow control plane pods to schedule on the tainted dedicated nodes (available +in Spaces v1.14+). + +1. Add tolerations for `etcd` pods + + ```yaml + controlPlanes: + etcd: + tolerations: + - key: "target" + operator: "Equal" + value: "etcd" + effect: "NoSchedule" + ``` + +2. Add tolerations for API server pods + + ```yaml + controlPlanes: + vcluster: + tolerations: + - key: "target" + operator: "Equal" + value: "apiserver" + effect: "NoSchedule" + ``` + + +## Configure autoscaling for Spaces components + + +Set up the Spaces system components to handle variable load automatically. + +### Scale API and `apollo` services + +1. Configure minimum replicas for availability + + ```yaml + api: + replicaCount: 2 + + features: + alpha: + apollo: + enabled: true + replicaCount: 2 + ``` + + Both services support horizontal and vertical scaling based on load patterns. + +### Configure router autoscaling + +The `spaces-router` is the entry point for all traffic and needs intelligent +scaling. + + +1. Enable Horizontal Pod Autoscaler + + ```yaml + router: + hpa: + enabled: true + minReplicas: 2 + maxReplicas: 8 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 80 + ``` + +2. Monitor scaling factors + + **Router scaling behavior:** + - **Vertical scaling**: Scales based on number of control planes + - **Horizontal scaling**: Scales based on request volume + - **Resource monitoring**: Monitor CPU and memory usage + + + +### Configure controller scaling + +The `spaces-controller` manages Space-level resources and requires vertical +scaling. + +1. Configure adequate resources with headroom + + ```yaml + controller: + resources: + requests: + cpu: "500m" + memory: "1Gi" + limits: + cpu: "2000m" + memory: "4Gi" + ``` + + **Important**: The controller can spike when reconciling large numbers of + control planes, so provide adequate headroom for resource spikes. + +## Set up production storage + + +### Configure Query API database + + +1. Use a managed PostgreSQL database + + **Recommended services:** + - [AWS RDS][rds] + - [Google Cloud SQL][gke-sql] + - [Azure Database for PostgreSQL][aks-sql] + + **Requirements:** + - Minimum 400 IOPS performance + + +## Monitoring + + + +Monitor key metrics to ensure healthy scaling and identify issues quickly. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +### Control plane health + +Track these `spaces-controller` metrics: + +1. **Total control planes** + + ``` + spaces_control_plane_exists + ``` + + Tracks the total number of control planes in the system. + +2. **Degraded control planes** + + ``` + spaces_control_plane_degraded + ``` + + Returns control planes that don't have a `Synced`, `Ready`, and + `Healthy` state. + +3. **Stuck control planes** + + ``` + spaces_control_plane_stuck + ``` + + Control planes stuck in a provisioning state. + +4. **Deletion issues** + + ``` + spaces_control_plane_deletion_stuck + ``` + + Control planes stuck during deletion. + +### Alerting + +Configure alerts for critical scaling and health metrics: + +- **High error rates**: Alert when 4xx/5xx response rates exceed thresholds +- **Control plane health**: Alert when degraded or stuck control planes exceed acceptable counts + +## Architecture overview + +**Spaces System Components:** + +- **`spaces-router`**: Entry point for all endpoints, dynamically builds routes to control plane API servers +- **`spaces-controller`**: Reconciles Space-level resources, serves webhooks, works with `mxp-controller` for provisioning +- **`spaces-api`**: API for managing groups, control planes, shared secrets, and telemetry objects (accessed only through spaces-router) +- **`spaces-apollo`**: Hosts the Query API, connects to PostgreSQL database populated by `apollo-syncer` pods + + +**Control Plane Components (per control plane):** +- **`mxp-controller`**: Handles provisioning tasks, serves webhooks, installs UXP and `XGQL` +- **`XGQL`**: GraphQL API powering console views +- **`kube-state-metrics`**: Collects usage metrics for billing (updated by `mxp-controller` when CRDs change) +- **`vector`**: Works with `kube-state-metrics` to send usage data to external storage for billing +- **`apollo syncer`**: Syncs `etcd` data into PostgreSQL for the Query API + + +### `up ctx` workflow + + + up ctx workflow diagram + + +### Access a control plane API server via kubectl + + + kubectl workflow diagram + + +### Query API/Apollo + + + query API workflow diagram + + +## See also + +* [Upbound Spaces deployment requirements][deployment] +* [Upbound `etcd` scaling resources][scaling] + +[up-ctx-workflow]: /img/up-ctx-workflow.png +[kubectl]: /img/kubectl-workflow.png +[query-api]: /img/query-api-workflow.png +[spaces-workflow]: /img/up-basic-flow.png +[rds]: https://aws.amazon.com/rds/postgresql/ +[gke-sql]: https://cloud.google.com/kubernetes-engine/docs/tutorials/stateful-workloads/postgresql +[aks-sql]: https://learn.microsoft.com/en-us/azure/aks/deploy-postgresql-ha?tabs=azuredisk +[deployment]: https://docs.upbound.io/spaces/howtos/self-hosted/deployment-reqs/ +[karpenter]: https://docs.aws.amazon.com/eks/latest/best-practices/karpenter.html +[gke-autoscaling]: https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-autoscaler +[aks-autoscaling]: https://learn.microsoft.com/en-us/azure/aks/cluster-autoscaler-overview +[scaling]: https://docs.upbound.io/deploy/self-hosted-spaces/scaling-resources#scaling-etcd-storage diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/controllers.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/controllers.md new file mode 100644 index 000000000..692740638 --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/controllers.md @@ -0,0 +1,389 @@ +--- +title: Controllers +weight: 250 +description: A guide to how to wrap and deploy an Upbound controller into control planes on Upbound. +--- + +:::important +This feature is in private preview for select customers in Upbound Spaces. If you're interested in this feature, please [contact us](https://www.upbound.io/contact-us). +::: + +Upbound's _Controllers_ feature lets you build and deploy control plane software from the Kubernetes ecosystem. With the _Controllers_ feature, you're not limited to just managing resource types defined by Crossplane. Now you can create resources from _CustomResourceDefinitions_ defined by other Kubernetes ecosystem tooling. + +This guide explains how to bundle and deploy control plane software from the Kubernetes ecosystem on a control plane in Upbound. + +## Benefits + +The Controllers feature provides the following benefits: + +* Deploy control plane software from the Kubernetes ecosystem. +* Use your control plane's package manager to handle the lifecycle of the control plane software and define dependencies between package. +* Build powerful compositions that combine both Crossplane and Kubernetes _CustomResources_. + +## How it works + +A _Controller_ is a package type that bundles control plane software from the Kubernetes ecosystem. Examples of such software includes: + +- Kubernetes policy engines +- CI/CD tooling +- Your own private custom controllers defined by your organization + +You build a _Controller_ package by wrapping a helm chart along with its requisite _CustomResourceDefinitions_. Your _Controller_ package gets pushed to an OCI registry, and from there you can apply it to a control plane like you would any other Crossplane package. Your control plane's package manager is responsible for managing the lifecycle of the software once applied. + +## Prerequisites + +Enable the Controllers feature in the Space you plan to run your control plane in: + +- Cloud Spaces: Not available yet +- Connected Spaces: Space administrator must enable this feature +- Disconnected Spaces: Space administrator must enable this feature + +Packaging a _Controller_ requires [up CLI][cli] `v0.39.0` or later. + + + +## Build a _Controller_ package + + + +_Controllers_ are a package type that get administered by your control plane's package manager. + +### Prepare the package + +To define a _Controller_, you need a Helm chart. This guide assumes the control plane software you want to build into a _Controller_ already has a Helm chart available. + +Start by making a working directory to assemble the necessary parts: + +```ini +mkdir controller-package +cd controller-package +``` + +Inside the working directory, pull the Helm chart: + +```shell +export CHART_REPOSITORY= +export CHART_NAME= +export CHART_VERSION= + +helm pull $CHART_NAME --repo $CHART_REPOSITORY --version $CHART_VERSION +``` + +Be sure to update the Helm chart repository, name, and version with your own. + +Move the Helm chart into its own folder: + +```ini +mkdir helm +mv $CHART_NAME-$CHART_VERSION.tgz helm/chart.tgz +``` + +Unpack the CRDs from the Helm chart into their own directory: + +```shell +export RELEASE_NAME= +export RELEASE_NAMESPACE= + +mkdir crds +helm template $RELEASE_NAME helm/chart.tgz -n $RELEASE_NAMESPACE --include-crds | \ + yq e 'select(.kind == "CustomResourceDefinition")' - | \ + yq -s '("crds/" + .metadata.name + ".yaml")' - +``` +Be sure to update the Helm release name, and namespace with your own. + +:::info +The instructions above assume your CRDs get deployed as part of your Helm chart. If they're deployed another way, you need to manually copy your CRDs instead. +::: + +Create a `crossplane.yaml` with your controller metadata: + +```yaml +cat < crossplane.yaml +apiVersion: meta.pkg.upbound.io/v1alpha1 +kind: Controller +metadata: + annotations: + friendly-name.meta.crossplane.io: Controller + meta.crossplane.io/description: | + A brief description of what the controller does. + meta.crossplane.io/license: Apache-2.0 + meta.crossplane.io/maintainer: + meta.crossplane.io/readme: | + An explanation of your controller. + meta.crossplane.io/source: + name: +spec: + packagingType: Helm + helm: + releaseName: + releaseNamespace: + # Value overrides for the helm release can be provided below. + # values: + # foo: bar +EOF +``` + +Your controller's file structure should look like this: + +```ini +. +├── crds +│ ├── your-crd.yaml +│ ├── second-crd.yaml +│ └── another-crd.yaml +├── crossplane.yaml +└── helm + └── chart.tgz +``` + +### Package and push the _Controller_ + +At the root of your controller's working directory, build the contents into an xpkg: + +```ini +up xpkg build +``` + +This causes an xpkg to get saved to your current directory with a name like `controller-f7091386b4c0.xpkg`. + +Push the package to your desired OCI registry: + +```shell +export UPBOUND_ACCOUNT= +export CONTROLLER_NAME= +export CONTROLLER_VERSION= +export XPKG_FILENAME= + +up xpkg push xpkg.upbound.io/$UPBOUND_ACCOUNT/$CONTROLLER_NAME:$CONTROLLER_VERSION -f $XPKG_FILENAME +``` + + + +## Deploy a _Controller_ package + + + +:::important +_Controllers_ are only installable on control planes running Crossplane `v1.19.0` or later. +::: + +Set your kubecontext to the desired control plane in Upbound. Change the package path to the OCI registry you pushed it to. Then, deploy the _Controller_ directly: + +```shell +export CONTROLLER_NAME= +export CONTROLLER_VERSION= + +cat < crossplane.yaml +apiVersion: meta.pkg.upbound.io/v1alpha1 +kind: Controller +metadata: + annotations: + friendly-name.meta.crossplane.io: Controller ArgoCD + meta.crossplane.io/description: | + The ArgoCD Controller enables continuous delivery and declarative configuration + management for Kubernetes applications using GitOps principles. + meta.crossplane.io/license: Apache-2.0 + meta.crossplane.io/maintainer: Upbound Maintainers + meta.crossplane.io/readme: | + ArgoCD is a declarative GitOps continuous delivery tool for Kubernetes that + follows the GitOps methodology to manage infrastructure and application + configurations. + meta.crossplane.io/source: https://github.com/argoproj/argo-cd + name: argocd +spec: + packagingType: Helm + helm: + releaseName: argo-cd + releaseNamespace: argo-system + # values: + # foo: bar +EOF +``` + +Your controller's file structure should look like this: + +```ini +. +├── crds +│ ├── applications.argoproj.io.yaml +│ ├── applicationsets.argoproj.io.yaml +│ └── appprojects.argoproj.io.yaml +├── crossplane.yaml +└── helm + └── chart.tgz +``` + +### Package and push controller-argocd + +At the root of your controller's working directory, build the contents into an xpkg: + +```ini +up xpkg build +``` + +This causes an xpkg to get saved to your current directory with a name like `argocd-f7091386b4c0.xpkg`. + +Push the package to your desired OCI registry: + +```shell +export UPBOUND_ACCOUNT= +export CONTROLLER_NAME=controller-argocd +export CONTROLLER_VERSION=v7.8.8 +export XPKG_FILENAME= + +up xpkg push --create xpkg.upbound.io/$UPBOUND_ACCOUNT/$CONTROLLER_NAME:$CONTROLLER_VERSION -f $XPKG_FILENAME +``` + +### Deploy controller-argocd to a control plane + +Set your kubecontext to the desired control plane in Upbound. Change the package path to the OCI registry you pushed it to. Then, deploy the _Controller_ directly: + +```ini +cat < + +## Frequently asked questions + +
+Can I package any software or are there any prerequisites to be a Controller? + +We define a *Controller* as a software that has at least one Custom Resource Definition (CRD) and a Kubernetes controller for that CRD. This is the minimum requirement to be a *Controller*. We have some checks to enforce this at packaging time. + +
+ +
+How can I package my software as a Controller? + +Currently, we support Helm charts as the underlying package format for *Controllers*. As long as you have a Helm chart, you can package it as a *Controller*. + +If you don't have a Helm chart, you can't deploy the software. We only support Helm charts as the underlying package format for *Controllers*. We may extend this to support other packaging formats like Kustomize in the future. + +
+ +
+Can I package Crossplane XRDs/Compositions as a Helm chart to deploy as a Controller? + +This is not recommended. For packaging Crossplane XRDs/ and Compositions, we recommend using the `Configuration` package format. A helm chart only with Crossplane XRDs/Compositions does not qualify as a *Controller*. + +
+ +
+How can I override the Helm values when deploying a Controller? + +Overriding the Helm values is possible at two levels: +- During packaging time, in the package manifest file. +- At runtime, using a `ControllerRuntimeConfig` resource (similar to Crossplane `DeploymentRuntimeConfig`). + +
+ +
+How can I configure the helm release name and namespace for the controller? + +Right now, it is not possible to configure this at runtime. The package author configures release name and namespace during packaging, so it is hardcoded inside the package. Unlike a regular application that is deployed by a Helm chart, *Controllers* can only be deployed once in a given control plane, so, we hope it should be ok to rely on predefined release names and namespaces. We may consider exposing these in `ControllerRuntimeConfig` later, but, we would like to keep it opinionated unless there are strong reasons to do so. + +
+ +
+Can I deploy more than one instance of a Controller package? + +No, this is not possible. Remember, a *Controller* package introduces CRDs which are cluster-scoped objects. Just like one cannot deploy more than one instance of the same Crossplane Provider package today, it is not possible to deploy more than one instance of a *Controller*. + +
+ +
+Do I need a specific Crossplane version to run Controllers? + +Yes, you need to use Crossplane v1.19.0 or later to use *Controllers*. This is because of the changes in the Crossplane codebase to support third-party package formats in dependencies. + +Spaces `v1.12.0` supports Crossplane `v1.19` in the *Rapid* release channel. + +
+ +
+Can I deploy Controllers outside of an Upbound control plane? With UXP? + +No, *Controllers* are a proprietary package format and are only available for control planes running in Spaces hosting environments in Upbound. + +
+ + +[cli]: /manuals/uxp/overview + diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/ctp-audit-logs.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/ctp-audit-logs.md new file mode 100644 index 000000000..52f52c776 --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/ctp-audit-logs.md @@ -0,0 +1,549 @@ +--- +title: Control plane audit logging +--- + +This guide explains how to enable and configure audit logging for control planes +in Self-Hosted Upbound Spaces. + +Starting in Spaces `v1.14.0`, each control plane contains an API server that +supports audit log collection. You can use audit logging to track creation, +updates, and deletions of Crossplane resources. Control plane audit logs +use observability features to collect audit logs with `SharedTelemetryConfig` and +send logs to an OpenTelemetry (`OTEL`) collector. + +:::info API Version Information +This guide applies to **Spaces v1.14.0 and later** (Self-Hosted only). Audit logging is not available in earlier versions. + +For API specifications on observability resources, see the [Spaces API Reference - v1.14](../../../../../1.14/reference/apis/spaces-api/). details on observability evolution across versions, see the . +::: + +## Prerequisites + +Before you begin, make sure you have: + +* Spaces `v1.14.0` or greater +* Admin access to your Spaces host cluster +* `kubectl` configured to access the host cluster +* `helm` installed +* `yq` installed +* `up` CLI installed and logged in to your organization + +## Enable observability + + +Observability graduated to General Available in `v1.14.0` but is disabled by +default. + + + + + +### Before `v1.14` +To enable the GA Observability feature, upgrade your Spaces installation to `v1.14.0` +or later and update your installation setting to the new flag: + +```diff +helm upgrade spaces upbound/spaces -n upbound-system \ +- --set "features.alpha.observability.enabled=true" ++ --set "observability.enabled=true" +``` + + + +### After `v1.14` + +To enable the GA Observability feature for `v1.14.0` and later, pass the feature +flag: + +```sh +helm upgrade spaces upbound/spaces -n upbound-system \ + --set "observability.enabled=true" + +``` + + + + +To confirm Observability is enabled, run the `helm get values` command: + + +```shell +helm get values --namespace upbound-system spaces | yq .observability +``` + +Your output should return: + +```shell-noCopy + enabled: true +``` + +## Install an observability backend + +:::note +If you already have an observability backend in your environment, skip to the +next section. +::: + + +For this guide, you'll use Grafana's `docker-otel-lgtm` bundle to validate audit log +generation. production environments, configure a dedicated observability +backend like Datadog, Splunk, or an enterprise-grade Grafana stack. + + + +First, make sure your `kubectl` context points to your Spaces host cluster: + +```shell +kubectl config current-context +``` + +The output should return your cluster name. + +Next, install `docker-otel-lgtm` as a deployment using port-forwarding to +connect to Grafana. Create a manifest file and paste the +following configuration: + +```yaml title="otel-lgtm.yaml" +apiVersion: v1 +kind: Namespace +metadata: + name: observability +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: otel-lgtm + name: otel-lgtm + namespace: observability +spec: + ports: + - name: grpc + port: 4317 + protocol: TCP + targetPort: 4317 + - name: http + port: 4318 + protocol: TCP + targetPort: 4318 + - name: grafana + port: 3000 + protocol: TCP + targetPort: 3000 + selector: + app: otel-lgtm +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: otel-lgtm + labels: + app: otel-lgtm + namespace: observability +spec: + replicas: 1 + selector: + matchLabels: + app: otel-lgtm + template: + metadata: + labels: + app: otel-lgtm + spec: + containers: + - name: otel-lgtm + image: grafana/otel-lgtm + ports: + - containerPort: 4317 + - containerPort: 4318 + - containerPort: 3000 +``` + +Next, apply the manifest: + +```shell +kubectl apply --filename otel-lgtm.yaml +``` + +Your output should return the resources: + +```shell +namespace/observability created + service/otel-lgtm created + deployment.apps/otel-lgtm created +``` + +To verify your resources deployed, use `kubectl get` to display resources with +an `ACTIVE` or `READY` status. + +Next, forward the Grafana port: + +```shell +kubectl port-forward svc/otel-lgtm --namespace observability 3000:3000 +``` + +Now you can access the Grafana UI at http://localhost:3000. + + +## Create an audit-enabled control plane + +To enable audit logging for a control plane, you need to label it so the +`SharedTelemetryConfig` can identify and apply audit settings. This section +creates a new control plane with the `audit-enabled: "true"` label. The +`audit-enabled: "true"` label marks this control plane for audit logging. The +`SharedTelemetryConfig` (created in the next section) finds control planes with +this label and enables audit logging on them. + +Create a new manifest file and paste the configuration below: + +
+```yaml title="ctp-audit.yaml" +apiVersion: v1 +kind: Namespace +metadata: + name: audit-test +--- +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + labels: + audit-enabled: "true" + name: ctp1 + namespace: audit-test +spec: + writeConnectionSecretToRef: + name: kubeconfig-ctp1 + namespace: audit-test +``` +
+ +The `metadata.labels` section contains the `audit-enabled` setting. + +Apply the manifest: + +```shell +kubectl apply --filename ctp-audit.yaml +``` + +Confirm your control plane reaches the `READY` status: + +```shell +kubectl get --filename ctp-audit.yaml +``` + +## Create a `SharedTelemetryConfig` + +The `SharedTelemetryConfig` applies to all control plane objects in a namespace +and enables audit logging and routes logs to your `OTEL` endpoint. + +Create a `SharedTelemetryConfig` manifest file and paste the configuration +below: + +
+```yaml title="sharedtelemetryconfig.yaml" +apiVersion: observability.spaces.upbound.io/v1alpha1 +kind: SharedTelemetryConfig +metadata: + name: apiserver-audit + namespace: audit-test +spec: + apiServer: + audit: + enabled: true + exporters: + otlphttp: + endpoint: http://otel-lgtm.observability:4318 + exportPipeline: + logs: [otlphttp] + controlPlaneSelector: + labelSelectors: + - matchLabels: + audit-enabled: "true" +``` +
+ +This configuration: + +* Sets `apiServer.audit.enabled` to `true` +* Configures the `otlphttp` exporter to point to the `docker-otel-lgtm` service +* Uses `controlPlaneSelector` to match any control plane in the namespace with the `audit-enabled` label set to `true` + +:::note +You can configure the `SharedTelemetryConfig` to select control planes in +several ways. more information on control plane selection, see the [control +plane selection][ctp-selection] documentation. +::: + +Apply the `SharedTelemetryConfig`: + +```shell +kubectl apply --filename sharedtelemetryconfig.yaml +``` + +Confirm the configuration selected the control plane: + +```shell +kubectl get --filename sharedtelemetryconfig.yaml +``` + +The output should return `SELECTED` as `1` and `VALIDATED` as `TRUE`. + +For more detailed status information, use `kubectl get`: + +```shell +kubectl get --filename sharedtelemetryconfig.yaml --output yaml | yq .status +``` + +## Generate and monitor audit events + +You enabled telemetry on your new control plane and can now generate events to +test the audit logging. This guide uses the `nop-provider` to simulate resource +operations. + +Switch your `up` context to the new control plane: + +```shell +up ctx /// +``` + +Create a new Provider manifest: + +```yaml title="provider-nop.yaml" +apiVersion: pkg.crossplane.io/v1 + kind: Provider + metadata: + name: crossplane-contrib-provider-nop + spec: + package: xpkg.upbound.io/crossplane-contrib/provider-nop:v0.4.0 +``` + +Apply the provider manifest: + +```shell +kubectl apply --filename provider-nop.yaml +``` + +Verify the provider installed and returns `HEALTHY` status as `TRUE`. + +Apply an example resource to kick off event generation: + + +```shell +kubectl apply --filename https://raw.githubusercontent.com/crossplane-contrib/provider-nop/refs/heads/main/examples/nopresource.yaml +``` + +In your Grafana dashboard, navigate to **Drilldown** > **Logs** under the +Grafana menu. + + +Filter for `controlplane-audit` log messages. + +Create a query to find `create` events on `nopresources` by filtering: + +* The `verb` field for `create` events +* The `objectRef_resource` field to match the Kind `nopresources` + +Review the audit log results. The log stream displays: + +*The client applying the create operation +* The resource kind +* Client details +* The response code + +Expand the example below for an audit log entry: + +
+ Audit log entry + +```json +{ + "level": "Metadata", + "auditID": "51bbe609-14ad-4874-be78-1289c10d506a", + "stage": "ResponseComplete", + "requestURI": "/apis/nop.crossplane.io/v1alpha1/nopresources?fieldManager=kubectl-client-side-apply&fieldValidation=Strict", + "verb": "create", + "user": { + "username": "kubernetes-admin", + "groups": ["system:masters", "system:authenticated"] + }, + "impersonatedUser": { + "username": "upbound:spaces:host:masterclient", + "groups": [ + "system:authenticated", + "upbound:controlplane:admin", + "upbound:spaces:host:system:masters" + ] + }, + "sourceIPs": ["10.244.0.135", "127.0.0.1"], + "userAgent": "kubectl/v1.32.2 (darwin/arm64) kubernetes/67a30c0", + "objectRef": { + "resource": "nopresources", + "name": "example", + "apiGroup": "nop.crossplane.io", + "apiVersion": "v1alpha1" + }, + "responseStatus": { "metadata": {}, "code": 201 }, + "requestReceivedTimestamp": "2025-09-19T23:03:24.540067Z", + "stageTimestamp": "2025-09-19T23:03:24.557583Z", + "annotations": { + "authorization.k8s.io/decision": "allow", + "authorization.k8s.io/reason": "RBAC: allowed by ClusterRoleBinding \"controlplane-admin\" of ClusterRole \"controlplane-admin\" to Group \"upbound:controlplane:admin\"" + } + } +``` +
+ +## Customize the audit policy + +Spaces `v1.14.0` includes a default audit policy. You can customize this policy +by creating a configuration file and passing the values to +`observability.collectors.apiServer.auditPolicy` in the helm values file. + +An example custom audit policy: + +```yaml +observability: + controlPlanes: + apiServer: + auditPolicy: | + apiVersion: audit.k8s.io/v1 + kind: Policy + rules: + # ============================================================================ + # RULE 1: Exclude health check and version endpoints + # ============================================================================ + - level: None + nonResourceURLs: + - '/healthz*' + - '/readyz*' + - /version + # ============================================================================ + # RULE 2: ConfigMaps - Write operations only + # ============================================================================ + - level: Metadata + resources: + - group: "" + resources: + - configmaps + verbs: + - create + - update + - patch + - delete + omitStages: + - RequestReceived + - ResponseStarted + # ============================================================================ + # RULE 3: Secrets - ALL operations + # ============================================================================ + - level: Metadata + resources: + - group: "" + resources: + - secrets + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + omitStages: + - RequestReceived + - ResponseStarted + # ============================================================================ + # RULE 4: Global exclusion of read-only operations + # ============================================================================ + - level: None + verbs: + - get + - list + - watch + # ========================================================================== + # RULE 5: Exclude standard Kubernetes resources from write operation logging + # ========================================================================== + - level: None + resources: + - group: "" + - group: "apps" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "storage.k8s.io" + - group: "batch" + - group: "autoscaling" + - group: "metrics.k8s.io" + - group: "node.k8s.io" + - group: "scheduling.k8s.io" + - group: "coordination.k8s.io" + - group: "discovery.k8s.io" + - group: "events.k8s.io" + - group: "flowcontrol.apiserver.k8s.io" + - group: "internal.apiserver.k8s.io" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "admissionregistration.k8s.io" + verbs: + - create + - update + - patch + - delete + # ============================================================================ + # RULE 6: Catch-all for ALL custom resources and any missed resources + # ============================================================================ + - level: Metadata + verbs: + - create + - update + - patch + - delete + omitStages: + - RequestReceived + - ResponseStarted + # ============================================================================ + # RULE 7: Final catch-all - exclude everything else + # ============================================================================ + - level: None + omitStages: + - RequestReceived + - ResponseStarted +``` +You can apply this policy during Spaces installation or upgrade using the helm values file. + +Audit policies use rules evaluated in order from top to bottom where the first +matching rule applies. Control plane audit policies follow Kubernetes conventions and use the +following logging levels: + +* **None** - Don't log events matching this rule +* **Metadata** - Log request metadata (user, timestamp, resource, verb) but not request or response bodies +* **Request** - Log metadata and request body but not response body +* **RequestResponse** - Log metadata, request body, and response body + +For more information, review the Kubernetes [Auditing] documentation. + +## Disable audit logging + +You can disable audit logging on a control plane by removing it from the +`SharedTelemetryConfig` selector or by deleting the `SharedTelemetryConfig`. + +### Disable for specific control planes + +Remove the `audit-enabled` label from control planes that should stop sending audit logs: + +```bash +kubectl label controlplane --namespace audit-enabled- +``` + +The `SharedTelemetryConfig` no longer selects this control plane, and audit log collection stops. + +### Disable for all control planes + +Delete the `SharedTelemetryConfig` to stop audit logging for all control planes it manages: + +```bash +kubectl delete sharedtelemetryconfig --namespace +``` + +[ctp-selection]: /spaces/howtos/observability/#control-plane-selection +[Auditing]: https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/ diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/declarative-ctps.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/declarative-ctps.md new file mode 100644 index 000000000..2c3e5331b --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/declarative-ctps.md @@ -0,0 +1,110 @@ +--- +title: Declaratively create control planes +sidebar_position: 99 +description: A tutorial to configure a Space with Argo to declaratively create and + manage control planes +--- + +In this tutorial, you learn how to configure [Argo CD][argo-cd] to communicate with a self-hosted Space. This flow allows you to declaratively create and manage control planes from Git. Argo CD is a continuous delivery tool for Kubernetes that you can use to drive GitOps flows for your control plane infrastructure. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. + +For API specifications on ControlPlane resources and their declarative creation, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . +::: + +## Prerequisites + +To complete this tutorial, you need the following: + +- Have already deployed an Upbound Space. +- Have already deployed an instance of Argo CD on a Kubernetes cluster. + +## Connect your Space to Argo CD + +Fetch the kubeconfig for the Space cluster, the Kubernetes cluster where you installed the Upbound Spaces software. You must add the Space cluster as a context to Argo. + +```ini +export SPACES_CLUSTER_SERVER="https://url" +export SPACES_CLUSTER_NAME="cluster" +``` + +Switch contexts to the Kubernetes cluster where you've installed Argo. Create a secret on the Argo cluster whose data contains the connection details of the Space cluster. + +:::important +Make sure the following commands are executed against your **Argo** cluster, not your Space cluster. +::: + +Run the following command in a terminal: + +```yaml +cat < +When you install a Crossplane provider on a control plane, memory gets consumed +according to the number of custom resources it defines. Upbound [Official Provider families][official-provider-families] provide higher fidelity control +to platform teams to install providers for only the resources they need, +reducing the bloat of needlessly installing unused custom resources. Still, you +must factor provider memory usage into your calculations to ensure you've +rightsized the memory available in your Spaces cluster. + + +:::important +Be careful not to conflate `managed resource` with `custom resource definition`. +The former is an "instance" of an external resource in Crossplane, while the +latter defines the API schema of that resource. +::: + +It's estimated that each custom resource definition consumes ~3 MB of memory. +The calculation is: + +```bash +number_of_managed_resources_defined_in_provider x 3 MB = memory_required +``` + +For example, if you plan to use [provider-aws-ec2][provider-aws-ec2], [provider-aws-s3][provider-aws-s3], and [provider-aws-iam][provider-aws-iam], the resulting calculation is: + +```bash +provider-aws-ec2: 98 x 3 MB = 294 MB +provider-aws-s3: 23 x 3 MB = 69 MB +provider-aws-iam 22 x 3 MB = 66 MB +--- +total memory: 429 MB +``` + +In this scenario, you should budget ~430 MB of memory for provider usage on this control plane. + +:::tip +Do this calculation for each provider you plan to install on your control plane. +Then do this calculation for each control plane you plan to run in your Space. +::: + + +#### Total memory usage + +Add the memory usage from the previous sections. Given the preceding examples, +they result in a recommendation to budget ~1 GB memory for each control plane +you plan to run in the Space. + +:::important + +The 1 GB recommendation is an example. +You should input your own provider requirements to arrive at a final number for +your own deployment. + +::: + +### CPU considerations + +#### Managed resource CPU usage + +The number of managed resources under management by a control plane is the largest contributing factor for CPU usage in a Space. CPU usage scales linearly according to the number of managed resources under management by your control plane. In Upbound's testing, CPU usage requirements _does_ vary from provider to provider. Using the Upbound Official Provider families as a baseline: + + +| Provider | MR create operation (CPU core seconds) | MR update or reconciliation operation (CPU core seconds) | +| ---- | ---- | ---- | +| provider-family-aws | 10 | 2 to 3 | +| provider-family-gcp | 7 | 1.5 | +| provider-family-azure | 7 to 10 | 1.5 to 3 | + + +When resources are in a non-ready state, Crossplane providers reconcile often (as fast as every 15 seconds). Once a resource reaches `READY`, each Crossplane provider defaults to a 10 minute poll interval. Given this, a 16-core machine has `16x10x60 = 9600` CPU core seconds available. Interpreting this table: + +- A single control plane that needs to create 100 AWS MRs concurrently would consume 1000 CPU core seconds, or about 1.5 cores. +- A single control plane that continuously reconciles 100 AWS MRs once they've reached a `READY` state would consume 300 CPU core seconds, or a little under half a core. + +Since `provider-family-aws` has the highest recorded numbers for CPU time required, you can use that as an upper limit in your calculations. + +Using these calculations and extrapolating values, given a 16 core machine, it's recommended you don't exceed a single control plane managing 1000 MRs. Suppose you plan to run 10 control planes, each managing 1000 MRs. You want to make sure your node pool has capacity for 160 cores. If you are using a machine type that has 16 cores per machine, that would mean having a node pool of size 10. If you are using a machine type that has 32 cores per machine, that would mean having a node pool of size 5. + +#### Cloud API latency + +Oftentimes, you are using Crossplane providers to talk to external cloud APIs. Those external cloud APIs often have global API rate limits (examples: [Azure limits][azure-limits], [AWS EC2 limits][aws-ec2-limits]). + +For Crossplane providers built on [Upjet][upjet] (such as Upbound Official Provider families), these providers use Terraform under the covers. They expose some knobs (such as `--max-reconcile-rate`) you can use to tweak reconciliation rates. + +### Resource buffers + +The guidance in the preceding sections explains how to calculate CPU and memory usage requirements for: + +- a set of control planes in a Space +- tuned to the number of providers you plan to use +- according to the number of managed resource instances you plan to have managed by your control planes + +Upbound recommends budgeting an extra buffer of 20% to your resource capacity calculations. The numbers shared in the preceding sections don't account for peaks or surges since they're based off average measurements. Upbound recommends budgeting this buffer to account for these things. + +## Deploying more than one Space + +You are welcome to deploy more than one Space. You just need to make sure you have a 1:1 mapping of Space to Kubernetes clusters. Spaces are by their nature constrained to a single Kubernetes Cluster, which are regional entities. If you want to offer control planes in multiple cloud environments or multiple public clouds entirely, these are justifications for deploying >1 Spaces. + +## Cert-manager + +A Spaces deployment uses the [Certificate Custom Resource] from cert-manager to +provision certificates within the Space. This establishes a nice API boundary +between what your platform may need and the Certificate requirements of a +Space. + + +In the event you would like more control over the issuing Certificate Authority +for your deployment or the deployment of cert-manager itself, this guide is for +you. + + +### Deploying + +An Upbound Space deployment doesn't have any special requirements for the +cert-manager deployment itself. The only expectation is that cert-manager and +the corresponding Custom Resources exist in the cluster. + +You should be free to install cert-manager in the cluster in any way that makes +sense for your organization. You can find some [installation ideas] in the +cert-manager docs. + +### Issuers + +A default Upbound Space install includes a [ClusterIssuer]. This `ClusterIssuer` +is a `selfSigned` issuer that other certificates are minted from. You have a +couple of options available to you for changing the default deployment of the +Issuer: +1. Changing the issuer name. +2. Providing your own ClusterIssuer. + + +#### Changing the issuer name + +The `ClusterIssuer` name is controlled by the `certificates.space.clusterIssuer` +Helm property. You can adjust this during installation by providing the +following parameter (assuming your new name is 'SpaceClusterIssuer'): +```shell +--set "certificates.space.clusterIssuer=SpaceClusterIssuer" +``` + + + +#### Providing your own ClusterIssuer + +To provide your own `ClusterIssuer`, you need to first setup your own +`ClusterIssuer` in the cluster. The cert-manager docs have a variety of options +for providing your own. See the [Issuer Configuration] docs for more details. + +Once you have your own `ClusterIssuer` set up in the cluster, you need to turn +off the deployment of the `ClusterIssuer` included in the Spaces deployment. +To do that, provide the following parameter during installation: +```shell +--set "certificates.provision=false" +``` + +###### Considerations +If your `ClusterIssuer` has a name that's different from the default name that +the Spaces installation expects ('spaces-selfsigned'), you need to also specify +your `ClusterIssuer` name during install using: +```shell +--set "certificates.space.clusterIssuer=" +``` + +## Ingress + +To route requests from an external client (kubectl, ArgoCD, etc) to a +control plane, a Spaces deployment includes a default [Ingress] manifest. In +order to ease getting started scenarios, the current `Ingress` includes +configurations (properties and annotations) that assume that you installed the +commonly used [ingress-nginx ingress controller] in the cluster. This section +walks you through using a different `Ingress`, if that's something that your +organization needs. + +### Default manifest + +An example of what the current `Ingress` manifest included in a Spaces install +is below: + +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: mxe-router-ingress + namespace: upbound-system + annotations: + nginx.ingress.kubernetes.io/use-regex: "true" + nginx.ingress.kubernetes.io/ssl-redirect: "false" + nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" + nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" + nginx.ingress.kubernetes.io/proxy-request-buffering: "off" + nginx.ingress.kubernetes.io/proxy-body-size: "0" + nginx.ingress.kubernetes.io/proxy-http-version: "1.1" + nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + nginx.ingress.kubernetes.io/proxy-ssl-verify: "on" + nginx.ingress.kubernetes.io/proxy-ssl-secret: "upbound-system/mxp-hostcluster-certs" + nginx.ingress.kubernetes.io/proxy-ssl-name: spaces-router + nginx.ingress.kubernetes.io/configuration-snippet: | + more_set_headers "X-Request-Id: $req_id"; + more_set_headers "Request-Id: $req_id"; + more_set_headers "Audit-Id: $req_id"; +spec: + ingressClassName: nginx + tls: + - hosts: + - {{ .Values.ingress.host }} + secretName: mxe-router-tls + rules: + - host: {{ .Values.ingress.host }} + http: + paths: + - path: "/v1/controlPlanes" + pathType: Prefix + backend: + service: + name: spaces-router + port: + name: http +``` + +The notable pieces are: +1. Namespace + + + +This property represents the namespace that the spaces-router is deployed to. +In most cases this is `upbound-system`. + + + +2. proxy-ssl-* annotations + +The spaces-router pod terminates TLS using certificates located in the +mxp-hostcluster-certs `Secret` located in the `upbound-system` `Namespace`. + +3. proxy-* annotations + +Requests coming into the ingress-controller can be variable depending on what +the client is requesting. For example, `kubectl get crds` has different +requirements for the connection compared to a 'watch', for example +`kubectl get pods -w`. The ingress-controller is configured to be able to +account for either scenario. + + +4. configuration-snippets + +These commands add headers to the incoming requests that help with telemetry +and diagnosing problems within the system. + +5. Rules + +Requests coming into the control planes use a `/v1/controlPlanes` prefix and +need to be routed to the spaces-router. + + +### Using a different ingress manifest + +Operators can choose to use an `Ingress` manifest and ingress controller that +makes the most sense for their organization. If they want to turn off deploying +the default `Ingress` manifest, they can do so during installation by providing +the following parameter during installation: +```shell +--set ".Values.ingress.provision=false" +``` + +#### Considerations + + + + + +Operators will need to take into account the following considerations when +disabling the default `Ingress` deployment. + +1. Ensure the custom `Ingress` manifest is placed in the same namespace as the +`spaces-router` pod. +2. Ensure that the ingress is configured to use a `spaces-router` as a secure +backend and that the secret used is the mxp-hostcluster-certs secret. +3. Ensure that the ingress is configured to handle long-lived connections. +4. Ensure that the routing rule sends requests prefixed with +`/v1/controlPlanes` to the `spaces-router` using the `http` port. + + + + + + +[cert-manager]: https://cert-manager.io/ +[Certificate Custom Resource]: https://cert-manager.io/docs/usage/certificate/ +[ClusterIssuer]: https://cert-manager.io/docs/concepts/issuer/ +[ingress-nginx ingress controller]: https://kubernetes.github.io/ingress-nginx/deploy/ +[installation ideas]: https://cert-manager.io/docs/installation/ +[Ingress]: https://kubernetes.io/docs/concepts/services-networking/ingress/ +[Issuer Configuration]: https://cert-manager.io/docs/configuration/ +[official-provider-families]: /manuals/packages/providers/provider-families +[aws-eks]: https://aws.amazon.com/eks/ +[google-cloud-gke]: https://cloud.google.com/kubernetes-engine +[microsoft-aks]: https://azure.microsoft.com/en-us/products/kubernetes-service +[upbound-account]: https://www.upbound.io/register/?utm_source=docs&utm_medium=cta&utm_campaign=docs_spaces +[provider-aws-ec2]: https://marketplace.upbound.io/providers/upbound/provider-aws-ec2 +[provider-aws-s3]: https://marketplace.upbound.io/providers/upbound/provider-aws-s3 +[provider-aws-iam]: https://marketplace.upbound.io/providers/upbound/provider-aws-iam +[azure-limits]: https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/request-limits-and-throttling +[aws-ec2-limits]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/throttling.html#throttling-limits-rate-based +[upjet]: https://github.com/upbound/upjet diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/dr.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/dr.md new file mode 100644 index 000000000..67ecbfecf --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/dr.md @@ -0,0 +1,412 @@ +--- +title: Disaster Recovery +sidebar_position: 13 +description: Configure Space-wide backups for disaster recovery. +--- + +:::info API Version Information +This guide applies to **Spaces v1.9.0 and later** (Self-Hosted only). Space Backups is a alpha feature enabled by default starting in v1.14.0. + +- **v1.9.0-v1.13.x**: Alpha feature (requires manual enablement) +- **v1.14.0+**: GA (enabled by default) + +For version-specific features and backup resources, see the . control-plane backups, see [Backup and Restore](../backup-and-restore.md). +::: + +:::important +For Connected and Disconnected Spaces, this feature requires Spaces `v1.9.0` and, starting with `v1.14.0`, Spaces enables it by default. + +To enable it on versions earlier than `v1.14.0`, set features.alpha.spaceBackup.enabled=true when you install Spaces. + +```bash +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "features.alpha.spaceBackup.enabled=true" +``` +::: + +Upbound's _Space Backups_ is a built-in Space-wide backup and restore feature. This guide explains how to configure Space Backups and how to restore from one of them in case of disaster recovery. + +This feature is meant for Space administrators. Group or Control Plane users can leverage [Shared Backups][shared-backups] to backup and restore their ControlPlanes. + +## Benefits +The Space Backups feature provides the following benefits: + +* Automatic backups for all resources in a Space and all resources in control planes, without any operational overhead. +* Backup schedules. +* Selectors to specify resources to backup. + +## Prerequisites + +Enabled the Space Backups feature in the Space: + +- Cloud Spaces: Not accessible to users. +- Connected Spaces: Space administrator must enable this feature. +- Disconnected Spaces: Space administrator must enable this feature. + +## Configure a Space Backup Config + +[SpaceBackupConfig][spacebackupconfig] is a cluster-scoped resource. This resource configures the storage details and provider. Whenever a backup executes (either by schedule or manually initiated), it references a SpaceBackupConfig to tell it where store the snapshot. + + +### Backup config provider + + +The `spec.objectStorage.provider` and `spec.objectStorage.config` fields configures: + +* The object storage provider +* The path to the provider +* The credentials needed to communicate with the provider + +You can only set one provider. Upbound currently supports AWS, Azure, and GCP as providers. + + +`spec.objectStorage.config` is a freeform map of configuration options for the object storage provider. See [Thanos object storage][thanos-object-storage] for more information on the formats for each supported cloud provider. `spec.bucket` and `spec.provider` overrides the required values in the config. + + +#### AWS as a storage provider + +This example demonstrates how to use AWS as a storage provider for your backups: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupConfig +metadata: + name: default +spec: + objectStorage: + provider: AWS + bucket: spaces-backup-bucket + config: + endpoint: s3.eu-west-2.amazonaws.com + region: eu-west-2 + credentials: + source: Secret + secretRef: + name: bucket-creds + namespace: upbound-system + key: creds +``` + +This example assumes you've already created an S3 bucket called +`spaces-backup-bucket` in the `eu-west-2` AWS region. To access the bucket, +define the account credentials as a Secret in the specified Namespace +(`upbound-system` in this example). + +#### Azure as a storage provider + +This example demonstrates how to use Azure as a storage provider for your backups: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupConfig +metadata: + name: default + namespace: default +spec: + objectStorage: + provider: Azure + bucket: upbound-backups + config: + storage_account: upbackupstore + container: upbound-backups + endpoint: blob.core.windows.net + credentials: + source: Secret + secretRef: + name: bucket-creds + namespace: upbound-system + key: creds +``` + + +This example assumes you've already created an Azure storage account called +`upbackupstore` and blob `upbound-backups`. To access the blob, +define the account credentials as a Secret in the specified Namespace +(`upbound-system` in this example). + + +#### GCP as a storage provider + +This example demonstrates how to use Google Cloud Storage as a storage provider for your backups: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupConfig +metadata: + name: default + namespace: default +spec: + objectStorage: + provider: GCP + bucket: spaces-backup-bucket + credentials: + source: Secret + secretRef: + name: bucket-creds + namespace: upbound-system + key: creds +``` + + +This example assumes you've already created a Cloud bucket called +"spaces-backup-bucket" and a service account with access to this bucket. Define the key file as a Secret in the specified Namespace +(`upbound-system` in this example). + + +## Configure a Space Backup Schedule + + +[SpaceBackupSchedule][spacebackupschedule] is a cluster-scoped resource. This resource defines a backup schedule for the whole Space. + +Below is an example of a Space Backup Schedule running every day. It backs up all groups having `environment: production` labels and all control planes in those groups having `backup: please` labels. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupSchedule +metadata: + name: daily-schedule +spec: + schedule: "@daily" + configRef: + kind: SpaceBackupConfig + name: default + match: + groups: + labelSelectors: + - matchLabels: + environment: production + controlPlanes: + labelSelectors: + - matchLabels: + backup: please +``` + +### Define a schedule + +The `spec.schedule` field is a [Cron-formatted][cron-formatted] string. Some common examples are below: + +| Entry | Description | +| ----------------- | ------------------------------------------------------------------------------------------------- | +| `@hourly` | Run once an hour. | +| `@daily` | Run once a day. | +| `@weekly` | Run once a week. | +| `0 0/4 * * *` | Run every 4 hours. | +| `0/15 * * * 1-5` | Run every fifteenth minute on Monday through Friday. | +| `@every 1h30m10s` | Run every 1 hour, 30 minutes, and 10 seconds. Hour is the largest measurement of time for @every. | + +### Suspend a schedule + +Use `spec.suspend` field to suspend the schedule. It creates no new backups, but allows running backups to complete. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupSchedule +metadata: + name: daily-schedule +spec: + suspend: true +... +``` + +### Garbage collect backups when the schedule gets deleted + +Set the `spec.useOwnerReferencesInBackup` to garbage collect associated `SpaceBackup` when a `SpaceBackupSchedule` gets deleted. If set to true, backups are garbage collected when the schedule gets deleted. + +### Set the time to live + +Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. + +The time to live is a duration, for example, `168h` for 7 days. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackupSchedule +metadata: + name: daily-schedule +spec: + ttl: 168h # Backup is garbage collected after 7 days +... +``` + +## Selecting space resources to backup + +By default, a SpaceBackup selects all groups and, for each of them, all control planes, secrets, and any other group-scoped resources. + +By setting `spec.match`, you can include only specific groups, control planes, secrets, or other Space resources in the backup. + +By setting `spec.exclude`, you can filter out some matched Space API resources from the backup. + +### Including space resources in a backup + +Different fields are available to include resources based on labels or names: +- `spec.match.groups` to include only some groups in the backup. +- `spec.match.controlPlanes` to include only some control planes in the backup. +- `spec.match.secrets` to include only some secrets in the backup. +- `spec.match.extras` to include only some extra resources in the backup. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackup +metadata: + name: my-backup +spec: + configRef: + kind: SpaceBackupConfig + name: default + match: + groups: + labelSelectors: + - matchLabels: + environment: production + controlPlanes: + labelSelectors: + - matchLabels: + backup: please + secrets: + names: + - my-secret + extras: + - apiGroup: "spaces.upbound.io" + kind: "SharedBackupConfig" + names: + - my-shared-backup +``` + +### Excluding Space resources from the backup + +Use the `spec.exclude` field to exclude matched Space API resources from the backup. + +Different fields are available to exclude resources based on labels or names: +- `spec.exclude.groups` to exclude some groups from the backup. +- `spec.exclude.controlPlanes` to exclude some control planes from the backup. +- `spec.exclude.secrets` to exclude some secrets from the backup. +- `spec.exclude.extras` to exclude some extra resources from the backup. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackup +metadata: + name: my-backup +spec: + ttl: 168h # Backup is garbage collected after 7 days + configRef: + kind: SpaceBackupConfig + name: default + match: + groups: + labelSelectors: + - matchLabels: + environment: production + exclude: + groups: + names: + - not-this-one-please +``` + +### Exclude resources in control planes' backups + +By default, it backs up all resources in a selected control plane. + +Use the `spec.controlPlaneBackups.excludedResources` field to exclude resources from control planes' backups. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackup +metadata: + name: my-backup +spec: + ttl: 168h # Backup is garbage collected after 7 days + configRef: + kind: SpaceBackupConfig + name: default + controlPlaneBackups: + excludedResources: + - secrets + - buckets.s3.aws.upbound.io +``` + +## Create a manual backup + +[SpaceBackup][spacebackup] is a cluster-scoped resource that causes a single backup to occur for the whole Space. + +Below is an example of a manual SpaceBackup: + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackup +metadata: + name: my-backup +spec: + configRef: + kind: SpaceBackupConfig + name: default + deletionPolicy: Delete +``` + + +The backup specification `DeletionPolicy` defines backup deletion actions, +including the deletion of the backup file from the bucket. The `Deletion Policy` +value defaults to `Orphan`. Set it to `Delete` to remove uploaded files +in the bucket. +For more information on the backup and restore process, review the [Spaces API documentation][spaces-api-documentation]. + +### Set the time to live + +Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. + +```yaml +apiVersion: admin.spaces.upbound.io/v1alpha1 +kind: SpaceBackup +metadata: + name: my-backup +spec: + ttl: 168h # Backup is garbage collected after 7 days +... +``` + +## Restore from a space backup + +Space Backup and Restore focuses only on disaster recovery. The restore procedure assumes a new Space installation with no existing resources. The restore procedure is idempotent, so you can run it multiple times without any side effects in case of failures. + +To restore a Space from an existing Space Backup, follow these steps: + +1. Install Spaces from scratch as needed. +2. Create a `SpaceBackupConfig` as needed to access the SpaceBackup from the object storage, for example named `my-backup-config`. +3. Select the backup you want to restore from, for example `my-backup`. +4. Run the following command to restore the Space: + +```shell +export SPACE_BACKUP_CONFIG=my-backup-config +export SPACE_BACKUP=my-backup +kubectl exec -ti -n upbound-system deployments/spaces-controller -c spaces -- hyperspace restore $SPACE_BACKUP $SPACE_BACKUP_CONFIG +``` + +### Restore specific control planes + +:::important +This feature is available from Spaces v1.11. +::: + +Instead of restoring the whole Space, you can choose to restore specific control planes +from a backup using the `--controlplanes` flag. You can also use +the `--skip-space-restore` flag to skip restoring Space objects. +This allows Spaces admins to restore individual control planes without +needing to restore the entire Space. + +```shell +export SPACE_BACKUP_CONFIG=my-backup-config +export SPACE_BACKUP=my-backup +kubectl exec -ti -n upbound-system deployments/spaces-controller -c spaces +-- hyperspace restore $SPACE_BACKUP $SPACE_BACKUP_CONFIG --controlplanes default/ctp1,default/ctp2 --skip-space-restore +``` + + +[shared-backups]: /spaces/howtos/self-hosted/workload-id/backup-restore-config/ +[spacebackupconfig]: /reference/apis/spaces-api/v1_9 +[thanos-object-storage]: https://thanos.io/tip/thanos/storage.md/ +[spacebackupschedule]: /reference/apis/spaces-api/v1_9 +[cron-formatted]: https://en.wikipedia.org/wiki/Cron +[spacebackup]: /reference/apis/spaces-api/v1_9 +[spaces-api-documentation]: /reference/apis/spaces-api/v1_9 + diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/gitops-with-argocd.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/gitops-with-argocd.md new file mode 100644 index 000000000..004247a10 --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/gitops-with-argocd.md @@ -0,0 +1,142 @@ +--- +title: GitOps with ArgoCD in Self-Hosted Spaces +sidebar_position: 80 +description: Set up GitOps workflows with Argo CD in self-hosted Spaces +plan: "business" +--- + +:::info Deployment Model +This guide applies to **self-hosted Spaces** deployments. For Upbound Cloud Spaces, see [GitOps with Upbound Control Planes](/spaces/howtos/cloud-spaces/gitops-on-upbound/). +::: + +GitOps is an approach for managing a system by declaratively describing desired resources' configurations in Git and using controllers to realize the desired state. Upbound's control planes are compatible with this pattern and it's strongly recommended you integrate GitOps in the platforms you build on Upbound. + + +## Integrate with Argo CD + + +[Argo CD][argo-cd] is a project in the Kubernetes ecosystem commonly used for +GitOps. You can use it in tandem with Upbound control planes to achieve GitOps +flows. The sections below explain how to integrate these tools with Upbound. + +### Configure connection secrets for control planes + +You can configure control planes to write their connection details to a secret. +Do this by setting the +[`spec.writeConnectionSecretToRef`][spec-writeconnectionsecrettoref] field in a +control plane manifest. For example: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: ctp1 + namespace: default +spec: + writeConnectionSecretToRef: + name: kubeconfig-ctp1 + namespace: default +``` + + +### Configure Argo CD + + +To configure Argo CD for Annotation resource tracking, edit the Argo CD +ConfigMap in the Argo CD namespace. Add `application.resourceTrackingMethod: +annotation` to the data section as below. + +Next, configure the [auto respect RBAC for the Argo CD +controller][auto-respect-rbac-for-the-argo-cd-controller-1]. By default, Argo CD +attempts to discover some Kubernetes resource types that don't exist in a +control plane. You must configure Argo CD to respect the cluster's RBAC rules so +that Argo CD can sync. Add `resource.respectRBAC: normal` to the data section as +below. + +```bash +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-cm +data: + ... + application.resourceTrackingMethod: annotation + resource.respectRBAC: normal +``` + +:::tip +The `resource.respectRBAC` configuration above tells Argo to respect RBAC for +_all_ cluster contexts. If you're using an Argo CD instance to manage more than +only control planes, you should consider changing the `clusters` string match +for the configuration to apply only to control planes. For example, if every +control plane context name followed the convention of being named +`controlplane-`, you could set the string match to be `controlplane-*` +::: + + +### Create a cluster context definition + + +Once the control plane is ready, extract the following values from the secret +containing the kubeconfig: + +```bash +kubeconfig_content=$(kubectl get secrets kubeconfig-ctp1 -n default -o jsonpath='{.data.kubeconfig}' | base64 -d) +server=$(echo "$kubeconfig_content" | grep 'server:' | awk '{print $2}') +bearer_token=$(echo "$kubeconfig_content" | grep 'token:' | awk '{print $2}') +ca_data=$(echo "$kubeconfig_content" | grep 'certificate-authority-data:' | awk '{print $2}') +``` + +Generate a new secret in the cluster where you installed Argo, using the prior +values extracted: + +```yaml +cat < + +import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; + + + + +:::important +This feature is only available for select Business Critical customers. You can't +set up your own Managed Space without the assistance of Upbound. If you're +interested in this deployment mode, please [contact us][contact]. +::: + + + +A Managed Space deployed on AWS is a single-tenant deployment of a control plane +space in your AWS organization in an isolated sub-account. With Managed Spaces, +you can use the same API, CLI, and Console that Upbound offers, with the benefit +of running entirely in a cloud account that you own and Upbound manages for you. + +The following guide walks you through setting up a Managed Space in your AWS +organization. If you have any questions while working through this guide, +contact your Upbound Account Representative for help. + + + + + +A Managed Space deployed on GCP is a single-tenant deployment of a control plane +space in your GCP organization in an isolated project. With Managed Spaces, you +can use the same API, CLI, and Console that Upbound offers, with the benefit of +running entirely in a cloud account that you own and Upbound manages for you. + +The following guide walks you through setting up a Managed Space in your GCP +organization. If you have any questions while working through this guide, +contact your Upbound Account Representative for help. + + + + +## Managed Space on your cloud architecture + + + +A Managed Space is a deployment of the Upbound Spaces software inside an +Upbound-controlled sub-account in your AWS cloud environment. The Spaces +software runs in this sub-account, orchestrated by Kubernetes. Backups and +billing data get stored inside bucket or blob storage in the same sub-account. +The control planes deployed and controlled by the Spaces software runs on the +Kubernetes cluster which gets deployed into the sub-account. + +The diagram below illustrates the high-level architecture of Upbound Managed Spaces: + +![Upbound Managed Spaces arch](/img/managed-arch-aws.png) + +The Spaces software gets deployed on an EKS Cluster in the region of your +choice. This EKS cluster is where your control planes are ultimately run. +Upbound also deploys buckets, 1 for the collection of the billing data and 1 for +control plane backups. + +Upbound doesn't have access to other sub-accounts nor your organization-level +settings in your cloud environment. Outside of your cloud organization, Upbound +runs the Upbound Console, which includes the Upbound API and web application, +including the dashboard you see at `console.upbound.io`. By default, all +connections are encrypted, but public. Optionally, you also have the option to +use private network connectivity through [AWS PrivateLink][aws-privatelink]. + + + + + + +A Managed Space is a deployment of the Upbound Spaces software inside an +Upbound-controlled project in your GCP cloud environment. The Spaces software +runs in this project, orchestrated by Kubernetes. Backups and billing data get +stored inside bucket or blob storage in the same project. The control planes +deployed and controlled by the Spaces software runs on the Kubernetes cluster +which gets deployed into the project. + +The diagram below illustrates the high-level architecture of Upbound Managed Spaces: + +![Upbound Managed Spaces arch](/img/managed-arch-gcp.png) + +The Spaces software gets deployed on a GKE Cluster in the region of your choice. +This GKE cluster is where your control planes are ultimately run. Upbound also +deploys cloud buckets, 1 for the collection of the billing data and 1 for +control plane backups. + +Upbound doesn't have access to other projects nor your organization-level +settings in your cloud environment. Outside of your cloud organization, Upbound +runs the Upbound Console, which includes the Upbound API and web application, +including the dashboard you see at `console.upbound.io`. By default, all +connections are encrypted, but public. Optionally, you also have the option to +use private network connectivity through [GCP Private Service +Connect][gcp-private-service-connect]. + + + +## Prerequisites + +- An organization created on Upbound + + + +- You should have a preexisting AWS organization to complete this guide. +- You must create a new AWS sub-account. Read the [AWS documentation][aws-documentation] to learn how to create a new sub-account in an existing organization on AWS. + +After the sub-account information gets provided to Upbound, **don't change it +any further.** Any changes made to the sub-account or the resources created by +Upbound for the purposes of the Managed Space deployments voids the SLA you have +with Upbound. If you want to make configuration changes, contact your Upbound +Solutions Architect. + + + + + +- You should have a preexisting GCP organization with an active Cloud Billing account to complete this guide. +- You must create a new GCP project. Read the [GCP documentation][gcp-documentation] to learn how to create a new project in an existing organization on GCP. + +After the project information gets provided to Upbound, **don't change it any +further.** Any changes made to the project or the resources created by Upbound +for the purposes of the Managed Space deployments voids the SLA you have with +Upbound. If you want to make configuration changes, contact your Upbound +Solutions Architect. + + + + + +## Set up cross-account management + +Upbound supports using AWS Key Management Service with cross-account IAM +permissions. This enables the isolation of keys so the infrastructure operated +by Upbound has limited access to symmetric keys. + +In the KMS key's account, apply the baseline key policy: + +```json +{ + "Sid": "Allow Upbound to use this key", + "Effect": "Allow", + "Principal": { + "AWS": ["[Managed Space sub-account ID]"] + }, + "Action": ["kms:Encrypt", "kms:Decrypt", "kms:ReEncrypt*", "kms:GenerateDataKey*", "kms:DescribeKey"], + "Resource": "*" +} +``` + +You need another key policy to let the sub-account create persistent resources +with the KMS key: + +```json +{ + "Sid": "Allow attachment of persistent resources for an Upbound Managed Space", + "Effect": "Allow", + "Principal": { + "AWS": "[Managed Space sub-account ID]" + }, + "Action": ["kms:CreateGrant", "kms:ListGrants", "kms:RevokeGrant"], + "Resource": "*", + "Condition": { + "Bool": { + "kms:GrantIsForAWSResource": "true" + } + } +} +``` + +### Configure PrivateLink + +By default, all connections to the Upbound Console are encrypted, but public. +AWS PrivateLink is a feature that allows VPC peering whereby your traffic +doesn't traverse the public internet. To have this configured, contact your +Upbound Account Representative. + + + + + +## Enable APIs + +Enable the following APIs in the new project: + +- Kubernetes Engine API +- Cloud Resource Manager API +- Compute Engine API +- Cloud DNS API + +:::tip +Read how to enable APIs in a GCP project [here][here]. +::: + +## Create a service account + +Create a service account in the new project. Name the service account, +upbound-sa. Give the service account the following roles: + +- Compute Admin +- Project IAM Admin +- Service Account Admin +- DNS Administrator +- Editor + +Select the service account you just created. Select keys. Add a new key and +select JSON. The key gets downloaded to your machine. Save this for later. + +## Create a DNS Zone + +Create a DNS Zone, set the **Zone type** to `Public`. + +### Configure Private Service Connect + +By default, all connections to the Upbound Console are encrypted, but public. +GCP Private Service Connect is a feature that allows VPC peering whereby your +traffic doesn't traverse the public internet. To have this configured, contact +your Upbound Account Representative. + + + +## Provide information to Upbound + +Once these policies get attached to the key, tell your Upbound Account +Representative, providing them the following: + + + +- the full ARN of the KMS key. +- the name of the organization that you created in Upbound. Use the up CLI command, `up org list`, so see this information. +- Confirmation of which region in AWS you want the deployment to target. + + + + + +- The service account JSON key +- The NS records associated with the DNS name created in the last step. +- the name of the organization that you created in Upbound. Use the up CLI command, `up org list`, so see this information. +- Confirmation of which region in GCP you want the deployment to target. + + + +Once Upbound has this information, the request gets processed in a business day. + +## Use your Managed Space + +Once the Managed Space gets deployed, you can see it in the Space selector when browsing your environment on [`console.upbound.io`][console-upbound-io]. + + + + +[contact]: https://www.upbound.io/contact-us +[aws-privatelink]: #configure-privatelink +[aws-documentation]: https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_create.html#orgs_manage_accounts_create-new +[gcp-private-service-connect]: #configure-private-service-connect +[gcp-documentation]: https://cloud.google.com/resource-manager/docs/creating-managing-organization +[here]: https://cloud.google.com/apis/docs/getting-started#enabling_apis +[console-upbound-io]: https://console.upbound.io/ diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/oidc-configuration.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/oidc-configuration.md new file mode 100644 index 000000000..cbef4dc42 --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/oidc-configuration.md @@ -0,0 +1,289 @@ +--- +title: Configure OIDC +sidebar_position: 20 +description: Configure OIDC in your Space +--- +:::important +This guide is only applicable for administrators who've deployed self-hosted Spaces. general RBAC in Upbound, read [Upbound RBAC][upbound-rbac]. +::: + +Upbound uses the Kubernetes [Structured Authentication Configuration][structured-auth-config] to validate OIDC tokens sent to the API. Upbound stores this +configuration as a `ConfigMap` and authenticates with the Upbound router +component during installation with Helm. + +This guide walks you through how to create and apply an authentication +configuration to validate Upbound with an external identity provider. Each +section focuses on a specific part of the configuration file. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. + +For details on authentication and access control across versions, see the . related platform authentication features, see the [Platform manual](../../../../platform/). +::: + +## Creating the `AuthenticationConfiguration` file + +First, create a file called `config.yaml` with an `AuthenticationConfiguration` +kind. The `AuthenticationConfiguration` is the initial authentication structure +necessary for Upbound to communicate with your chosen identity provider. + +```yaml +apiVersion: apiserver.config.k8s.io/v1beta1 +kind: AuthenticationConfiguration +jwt: +- issuer: + url: oidc-issuer-url + audiences: + - oidc-client-id + claimMappings: # optional + username: + claim: oidc-username-claim + prefix: oidc-username-prefix + groups: + claim: oidc-groups-claim + prefix: oidc-groups-prefix +``` + + +For detailed configuration options, including the CEL-based token validation, +review the feature [documentation][structured-auth-config]. + + +The `AuthenticationConfiguration` allows you to configure multiple JWT +authenticators as separate issuers. + +### Configure an issuer + +The `jwt` array requires an `issuer` specification and typically contains: + +- A `username` claim mapping +- A `groups` claim mapping +Optionally, the configuration may also include: +- A set of claim validation rules +- A set of user validation rules + +The `issuer` URL must be unique across all configured authenticators. + +```yaml +issuer: + url: https://example.com + discoveryUrl: https://discovery.example.com/.well-known/openid-configuration + certificateAuthority: |- + + audiences: + - client-id-a + - client-id-b + audienceMatchPolicy: MatchAny +``` + +By default, the authenticator assumes the OIDC Discovery URL is +`{issuer.url}/.well-known/openid-configuration`. Most identity providers follow +this structure, and you can omit the `discoveryUrl` field. To use a separate +discovery service, specify the full path to the discovery endpoint in this +field. + +If the CA for the Issuer isn't public, provide the PEM encoded CA for the Discovery URL. + +At least one of the `audiences` entries must match the `aud` claim in the JWT. +For OIDC tokens, this is the Client ID of the application attempting to access +the Upbound API. Having multiple values set allows the same configuration to +apply to multiple client applications, for example the `kubectl` CLI and an +Internal Developer Portal. + +If you specify multiple `audiences` , `audienceMatchPolicy` must equal `MatchAny`. + +### Configure `claimMappings` + +#### Username claim mapping + +By default, the authenticator uses the `sub` claim as the user name. To override this, either: + +- specify *both* `claim` and `prefix`. `prefix` may be explicitly set to the empty string. +or + +- specify a CEL `expression` to calculate the user name. + +```yaml +claimMappings: + username: + claim: "sub" + prefix: "keycloak" + # + expression: 'claims.username + ":external-user"' +``` + + +#### Groups claim mapping + +By default, this configuration doesn't map groups, unless you either: + +- specify both `claim` and `prefix`. `prefix` may be explicitly set to the empty string. +or + +- specify a CEL `expression` that returns a string or list of strings. + + +```yaml +claimMappings: + groups: + claim: "groups" + prefix: "" + # + expression: 'claims.roles.split(",")' +``` + + +### Validation rules + + +Validation rules are outside the scope of this document. Review the +[documentation][structured-auth-config] for more information. Examples include +using CEL expressions to validate authentication such as: + + +- Validating that a token claim has a specific value +- Validating that a token has a limited lifetime +- Ensuring usernames and groups don't contain reserved prefixes + +## Required claims + +To interact with Space and ControlPlane APIs, users must have the `upbound.io/aud` claim set to one of the following: + +| Upbound.io Audience | Notes | +| -------------------------------------------------------- | -------------------------------------------------------------------- | +| `[]` | No Access to Space-level or ControlPlane APIs | +| `['upbound:spaces:api']` | This Identity is only for Space-level APIs | +| `['upbound:spaces:controlplanes']` | This Identity is only for ControlPlane APIs | +| `['upbound:spaces:api', 'upbound:spaces:controlplanes']` | This Identity is for both Space-level and ControlPlane APIs | + + +You can set this claim in two ways: + +- In the identity provider mapped in the ID token. +- Inject in the authenticator with the `jwt.claimMappings.extra` array. + +For example: +```yaml +apiVersion: apiserver.config.k8s.io/v1beta1 +kind: AuthenticationConfiguration +jwt: +- issuer: + url: https://keycloak:8443/realms/master + certificateAuthority: |- + + audiences: + - master-realm + audienceMatchPolicy: MatchAny + claimMappings: + username: + claim: "preferred_username" + prefix: "keycloak:" + groups: + claim: "groups" + prefix: "" + extra: + - key: 'upbound.io/aud' + valueExpression: "['upbound:spaces:controlplanes', 'upbound:spaces:api']" +``` + +## Install the `AuthenticationConfiguration` + +Once you create an `AuthenticationConfiguration` file, specify this file as a +`ConfigMap` in the host cluster for the Upbound Space. + +```sh +kubectl create configmap -n upbound-system --from-file=config.yaml=./path/to/config.yaml +``` + + +To enable OIDC authentication and disable Upbound IAM when installing the Space, +reference the configuration and pass an empty value to the Upbound IAM issuer +parameter: + + +```sh +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "authentication.structuredConfig=" \ + --set "router.controlPlane.extraArgs[0]=--upbound-iam-issuer-url=" +``` + +## Configure RBAC + + +In this scenario, the external identity provider handles authentication, but +permissions for Spaces and ControlPlane APIs use standard RBAC objects. + +### Spaces APIs + +The Spaces APIs include: +```yaml +- apiGroups: + - spaces.upbound.io + resources: + - controlplanes + - sharedexternalsecrets + - sharedsecretstores + - backups + - backupschedules + - sharedbackups + - sharedbackupconfigs + - sharedbackupschedules +- apiGroups: + - observability.spaces.upbound.io + resources: + - sharedtelemetryconfigs +``` + +### ControlPlane APIs + + + +Crossplane specifies three [roles][crossplane-managed-clusterroles] for a +ControlPlane: admin, editor, and viewer. These map to the verbs `admin`, `edit`, +and `view` on the `controlplanes/k8s` resource in the `spaces.upbound.io` API +group. + + +### Control access + +The `groups` claim in the `AuthenticationConfiguration` allows you to control +resource access when you create a `ClusterRoleBinding`. A `ClusterRole` defines +the role parameters and a `ClusterRoleBinding` subject. + +The example below allows `admin` permissions for all ControlPlanes to members of +the `ctp-admins` group: + +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: allow-ctp-admin +rules: +- apiGroups: + - spaces.upbound.io + resources: + - controlplanes/k8s + verbs: + - admin +``` + +ctp-admins ClusterRoleBinding +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: allow-ctp-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: allow-ctp-admin +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: Group + name: ctp-admins +``` + +[structured-auth-config]: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#using-authentication-configuration +[crossplane-managed-clusterroles]: https://github.com/crossplane/crossplane/blob/master/design/design-doc-rbac-manager.md#managed-rbac-clusterroles +[upbound-rbac]: /manuals/platform/concepts/authorization/upbound-rbac diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/proxies-config.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/proxies-config.md new file mode 100644 index 000000000..3802e4cb0 --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/proxies-config.md @@ -0,0 +1,31 @@ +--- +title: Proxied configuration +sidebar_position: 20 +description: Configure Upbound within a proxied environment +--- + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments in proxied environments. Proxy configuration procedures are consistent across versions. + +For version-specific deployment considerations, see the . +::: + + + +When you install Upbound with Helm in a proxied environment, please update the specified registry with your internal registry. + + + +```bash +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "authentication.hubIdentities=true" \ + --set "authorization.hubRBAC=true" \ + --set "registry=registry.company.corp/spaces" \ + --set "controlPlanes.uxp.registryOverride=registry.company.corp/xpkg.upbound.io" \ + --set "controlPlanes.uxp.repository=registry.company.corp/spaces" \ + --wait +``` diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/query-api.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/query-api.md new file mode 100644 index 000000000..c112e9001 --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/query-api.md @@ -0,0 +1,396 @@ +--- +title: Deploy Query API infrastructure +weight: 130 +description: Query API +aliases: + - /all-spaces/self-hosted-spaces/query-api + - /self-hosted-spaces/query-api + - all-spaces/self-hosted-spaces/query-api +--- + + + + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+). Query API has evolved across versions: + +- **Cloud Spaces**: Available since v1.6 (enabled by default) +- **Self-Hosted**: Available since v1.8 (requires manual enablement) + +For details on Query API availability across versions, see the . +::: + +:::important + +This feature is in preview. The Query API is available in the Cloud Space offering in `v1.6` and enabled by default. + +This is a requirement to be able to connect a Space since `v1.8.0`, and is off by default, see below to enable it. + +::: + +Upbound's Query API allows users to inspect objects and resources within their control planes. The read-only `up alpha query` and `up alpha get` CLI commands allow you to gather information on your control planes in a fast and efficient package. These commands follow the [`kubectl` conventions][kubectl-conventions] for filtering, sorting, and retrieving information from your Space. + +Query API requires a PostgreSQL database to store the data. You can use the default PostgreSQL instance provided by Upbound or bring your own PostgreSQL instance. + +## Managed setup + +:::tip +If you don't have specific requirements for your setup, Upbound recommends following this approach. +::: + +To enable this feature, set `features.alpha.apollo.enabled=true` and `apollo.apollo.storage.postgres.create=true` when installing Spaces. + +However, you need to install CloudNativePG (`CNPG`) to provide the PostgreSQL instance. You can let the `up` CLI do this for you, or install it manually. + +For more customization, see the [Helm chart reference][helm-chart-reference]. You can modify the number +of PostgreSQL instances, pooling instances, storage size, and more. + +If you have specific requirements not addressed in the Helm chart, see below for more information on how to bring your own [PostgreSQL setup][postgresql-setup]. + +### Using the up CLI + +Before you begin, make sure you have the most recent version of the [`up` CLI installed][up-cli-installed]. + +To enable this feature, set `features.alpha.apollo.enabled=true` and `apollo.apollo.storage.postgres.create=true` when installing Spaces: + +```bash +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "features.alpha.apollo.enabled=true" \ + --set "apollo.apollo.storage.postgres.create=true" +``` + +`up space init` and `up space upgrade` install CloudNativePG automatically, if needed. + +### Helm chart + +If you are installing the Helm chart in some other way, you can manually install CloudNativePG in one of the [supported ways][supported-ways], for example: + +```shell +kubectl apply --server-side -f \ + https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.24.1/cnpg-1.24.1.yaml +kubectl rollout status -n cnpg-system deployment cnpg-controller-manager -w --timeout 120s +``` + +Next, install the Spaces Helm chart with the necessary values, for example: + +```shell +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + ... + --set "features.alpha.apollo.enabled=true" \ + --set "apollo.apollo.storage.postgres.create=true" \ + --wait +``` + +## Self-hosted PostgreSQL configuration + + +If your workflow requires more customization, you can provide your own +PostgreSQL instance and configure credentials manually. + +Using your own PostgreSQL instance requires careful architecture consideration. +Review the architecture and requirements guidelines. + +### Architecture + +The Query API architecture uses three components, other than a PostgreSQL database: +* **Apollo Syncers**: Watching `ETCD` for changes and syncing them to PostgreSQL. One, or more, per control plane. +* **Apollo Server**: Serving the Query API out of the data in PostgreSQL. One, or more, per Space. + +The default setup also uses the `PgBouncer` connection pooler to manage connections from the syncers. +```mermaid +graph LR + User[User] + + subgraph Cluster["Cluster (Spaces)"] + direction TB + Apollo[apollo] + + subgraph ControlPlanes["Control Planes"] + APIServer[API Server] + Syncer[apollo-syncer] + end + end + + PostgreSQL[(PostgreSQL)] + + User -->|requests| Apollo + + Apollo -->|connects| PostgreSQL + Apollo -->|creates schemas & users| PostgreSQL + + Syncer -->|watches| APIServer + Syncer -->|writes| PostgreSQL + + PostgreSQL -->|data| Apollo + + style PostgreSQL fill:#e1f5ff,stroke:#333,stroke-width:2px,color:#000 + style Apollo fill:#ffe1e1,stroke:#333,stroke-width:2px,color:#000 + style Cluster fill:#f0f0f0,stroke:#333,stroke-width:2px,color:#000 + style ControlPlanes fill:#fff,stroke:#666,stroke-width:1px,stroke-dasharray: 5 5,color:#000 +``` + + +Each component needs to connect to the PostgreSQL database. + +In the event of database issues, you can provide a new database and the syncers +automatically repopulate the data. + +### Requirements + +* A PostgreSQL 16 instance or cluster. +* A database, for example named `upbound`. +* **Optional**: A dedicated user for the Apollo Syncers, otherwise the Spaces Controller generates a dedicated set of credentials per syncer with the necessary permissions, for example named `syncer`. +* A dedicated **superuser or admin account** for the Apollo Server. +* **Optional**: A connection pooler, like PgBouncer, to manage connections from the Apollo Syncers. If you didn't provide the optional users, you might have to configure the pooler to allow users to connect using the same credentials as PostgreSQL. +* **Optional**: A read replica for the Apollo Syncers to connect to, to reduce load on the primary database, this might cause a slight delay in the data being available through the Query API. + +Below you can find examples of setups to get you started, you can mix and match the examples to suit your needs. + +### In-cluster setup + +:::tip + +If you don't have strong opinions on your setup, but still want full control on +the resources created for some unsupported customizations, Upbound recommends +the in-cluster setup. + +::: + +For more customization than the managed setup, you can use CloudNativePG for +PostgreSQL in the same cluster. + +For in-cluster setup, manually deploy the operator in one of the [supported ways][supported-ways-1], for example: + +```shell +kubectl apply --server-side -f \ + https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.24.1/cnpg-1.24.1.yaml +kubectl rollout status -n cnpg-system deployment cnpg-controller-manager -w --timeout 120s +``` + +Then create a `Cluster` and `Pooler` in the `upbound-system` namespace, for example: + +```shell +kubectl create ns upbound-system + +kubectl apply -f - < + +### External setup + + +:::tip + +If you want to run your PostgreSQL instance outside the cluster, but are fine with credentials being managed by the `apollo` user, this is the suggested way to proceed. + +::: + +When using this setup, you must manually create the required Secrets in the +`upbound-system` namespace. The `apollo` user must have permissions to create +schemas and users. + +```shell + +kubectl create ns upbound-system + +# A Secret containing the necessary credentials to connect to the PostgreSQL instance +kubectl create secret generic spaces-apollo-pg-app -n upbound-system \ + --from-literal=password=supersecret + +# A Secret containing the necessary CA certificate to verify the connection to the PostgreSQL instance +kubectl create secret generic spaces-apollo-pg-ca -n upbound-system \ + --from-file=ca.crt=/path/to/ca.crt +``` + +Next, install Spaces with the necessary settings: + +```shell +export PG_URL=your-postgres-host:5432 +export PG_POOLED_URL=your-pgbouncer-host:5432 # this could be the same as above + +helm upgrade --install ... \ + --set "features.alpha.apollo.enabled=true" \ + --set "apollo.apollo.storage.postgres.create=false" \ + --set "apollo.apollo.storage.postgres.connection.url=$PG_URL" \ + --set "apollo.apollo.storage.postgres.connection.credentials.secret.name=spaces-apollo-pg-app" \ + --set "apollo.apollo.storage.postgres.connection.credentials.format=basicauth" \ + --set "apollo.apollo.storage.postgres.connection.ca.name=spaces-apollo-pg-ca" \ + --set "apollo.apollo.storage.postgres.connection.syncer.url=$PG_POOLED_URL" +``` + +### External setup with all custom credentials + +For custom credentials with Apollo Syncers or Server, create a new secret in the +`upbound-system` namespace: + +```shell +export APOLLO_SYNCER_USER=syncer +export APOLLO_SERVER_USER=apollo + +kubectl create ns upbound-system + +# A Secret containing the necessary credentials to connect to the PostgreSQL instance +kubectl create secret generic spaces-apollo-pg-app -n upbound-system \ + --from-literal=password=supersecret + +# A Secret containing the necessary CA certificate to verify the connection to the PostgreSQL instance +kubectl create secret generic spaces-apollo-pg-ca -n upbound-system \ + --from-file=ca.crt=/path/to/ca.crt + +# A Secret containing the necessary credentials for the Apollo Syncers to connect to the PostgreSQL instance. +# These will be used by all Syncers in the Space. +kubectl create secret generic spaces-apollo-pg-syncer -n upbound-system \ + --from-literal=username=$APOLLO_SYNCER_USER \ + --from-literal=password=supersecret + +# A Secret containing the necessary credentials for the Apollo Server to connect to the PostgreSQL instance. +kubectl create secret generic spaces-apollo-pg-apollo -n upbound-system \ + --from-literal=username=$APOLLO_SERVER_USER \ + --from-literal=password=supersecret +``` + +Next, install Spaces with the necessary settings: + +```shell +export PG_URL=your-postgres-host:5432 +export PG_POOLED_URL=your-pgbouncer-host:5432 # this could be the same as above + +helm ... \ + --set "features.alpha.apollo.enabled=true" \ + --set "apollo.apollo.storage.postgres.create=false" \ + --set "apollo.apollo.storage.postgres.connection.url=$PG_URL" \ + --set "apollo.apollo.storage.postgres.connection.credentials.secret.name=spaces-apollo-pg-app" \ + --set "apollo.apollo.storage.postgres.connection.credentials.format=basicauth" \ + --set "apollo.apollo.storage.postgres.connection.ca.name=spaces-apollo-pg-ca" \ + --set "apollo.apollo.storage.postgres.connection.syncer.url=$PG_POOLED_URL" \ + + #. the syncers + --set "apollo.apollo.storage.postgres.connection.syncer.credentials.format=basicauth" \ + --set "apollo.apollo.storage.postgres.connection.syncer.credentials.user=$APOLLO_SYNCER_USER" \ + --set "apollo.apollo.storage.postgres.connection.syncer.credentials.secret.name=spaces-apollo-pg-syncer" \ + + #. the server + --set "apollo.apollo.storage.postgres.connection.apollo.credentials.format=basicauth" \ + --set "apollo.apollo.storage.postgres.connection.apollo.credentials.user=$APOLLO_SERVER_USER" \ + --set "apollo.apollo.storage.postgres.connection.apollo.credentials.secret.name=spaces-apollo-pg-apollo" \ + --set "apollo.apollo.storage.postgres.connection.apollo.url=$PG_POOLED_URL" +``` + + +## Using the Query API + + +See the [Query API documentation][query-api-documentation] for more information on how to use the Query API. + + + + +[postgresql-setup]: #self-hosted-postgresql-configuration +[up-cli-installed]: /manuals/cli/overview +[query-api-documentation]: /spaces/howtos/query-api + +[helm-chart-reference]: /reference/helm-reference +[kubectl-conventions]: https://kubernetes.io/docs/reference/kubectl/generated/kubectl_get/ +[supported-ways]: https://cloudnative-pg.io/documentation/current/installation_upgrade/ +[supported-ways-1]: https://cloudnative-pg.io/documentation/current/installation_upgrade/ +[cloudnativepg-documentation]: https://cloudnative-pg.io/documentation/1.24/storage/#configuration-via-a-pvc-template +[postgresql-cluster]: https://cloudnative-pg.io/documentation/1.24/resource_management/ +[pooler]: https://cloudnative-pg.io/documentation/1.24/connection_pooling/#pod-templates +[postgresql-cluster-2]: https://cloudnative-pg.io/documentation/1.24/replication/ +[pooler-3]: https://cloudnative-pg.io/documentation/1.24/connection_pooling/#high-availability-ha +[postgresql-cluster-4]: https://cloudnative-pg.io/documentation/1.24/operator_capability_levels/#override-of-operand-images-through-the-crd +[pooler-5]: https://cloudnative-pg.io/documentation/1.24/connection_pooling/#pod-templates +[cloudnativepg-documentation-6]: https://cloudnative-pg.io/documentation/1.24/postgresql_conf/ diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/scaling-resources.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/scaling-resources.md new file mode 100644 index 000000000..7bb04d2c2 --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/scaling-resources.md @@ -0,0 +1,184 @@ +--- +title: Scaling vCluster and etcd Resources +weight: 950 +description: A guide for scaling vCluster and etcd resources in self-hosted Spaces +aliases: + - /all-spaces/self-hosted-spaces/scaling-resources + - /spaces/scaling-resources +--- + +In large workloads or control plane migration, you may performance impacting +resource constraints. This guide explains how to scale vCluster and `etcd` +resources for optimal performance in your self-hosted Space. + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Scaling procedures are consistent across versions. + +For version-specific resource requirements and capacity planning, see the . +::: + +## Signs of resource constraints + +You may need to scale your vCluster or `etcd` resources if you observe: + +- API server timeout errors such as `http: Handler timeout` +- Error messages about `too many requests` and requests to `try again later` +- Operations like provider installation failing with errors like `cannot apply provider package secret` +- vCluster pods experiencing continuous restarts +- API performance degrades with high resource volume + + +## Scaling vCluster resources + + +The vCluster component handles Kubernetes API requests for your control planes. +Deployments with multiple control planes or providers may exceed default resource allocations. + +```yaml +# Default settings +controlPlanes.vcluster.resources.limits.cpu: "3000m" +controlPlanes.vcluster.resources.limits.memory: "3960Mi" +controlPlanes.vcluster.resources.requests.cpu: "170m" +controlPlanes.vcluster.resources.requests.memory: "1320Mi" +``` + +For larger workloads, like migrating from an existing control plane with several +providers, increase these resource limits in your Spaces `values.yaml` file. + +```yaml +controlPlanes: + vcluster: + resources: + limits: + cpu: "4000m" # Increase to 4 cores + memory: "6Gi" # Increase to 6GB memory + requests: + cpu: "500m" # Increase baseline CPU request + memory: "2Gi" # Increase baseline memory request +``` + +## Scaling `etcd` storage + +Kubernetes relies on `etcd` performance, which can lead to IOPS (input/output +operations per second) bottlenecks. Upbound allocates `50Gi` volumes for `etcd` +in cloud environments to ensure adequate IOPS performance. + +```yaml +# Default setting +controlPlanes.etcd.persistence.size: "5Gi" +``` + +For production environments or when migrating large control planes, increase +`etcd` volume size and specify an appropriate storage class: + +```yaml +controlPlanes: + etcd: + persistence: + size: "50Gi" # Recommended for production + storageClassName: "fast-ssd" # Use a high-performance storage class +``` + +### Storage class considerations + +For AWS: +- Use GP3 volumes with adequate IOPS +-. AWS GP3 volumes, IOPS scale with volume size (3000 IOPS baseline) +-. optimal performance, provision at least 32Gi to support up to 16,000 IOPS + +For GCP and Azure: +- Use SSD-based persistent disk types for optimal performance +- Consider premium storage options for high-throughput workloads + +## Scaling Crossplane resources + +Crossplane manages provider resources in your control planes. You may need to increase provider resources for larger deployments: + +```yaml +# Default settings +controlPlanes.uxp.resourcesCrossplane.requests.cpu: "370m" +controlPlanes.uxp.resourcesCrossplane.requests.memory: "400Mi" +``` + + +For environments with many providers or managed resources: + + +```yaml +controlPlanes: + uxp: + resourcesCrossplane: + limits: + cpu: "1000m" # Add CPU limit + memory: "1Gi" # Add memory limit + requests: + cpu: "500m" # Increase CPU request + memory: "512Mi" # Increase memory request +``` + +## High availability configuration + +For production environments, enable High Availability mode to ensure resilience: + +```yaml +controlPlanes: + ha: + enabled: true +``` + +## Best practices for migration scenarios + +When migrating from existing control planes into a self-hosted Space: + +1. **Pre-scale resources**: Scale up resources before performing the migration +2. **Monitor resource usage**: Watch resource consumption during and after migration with `kubectl top pods` +3. **Scale incrementally**: If issues persist, increase resources incrementally until performance stabilizes +4. **Consider storage performance**: `etcd` is sensitive to storage I/O performance + +## Helm values configuration + +Apply these settings through your Spaces Helm values file: + +```yaml +controlPlanes: + vcluster: + resources: + limits: + cpu: "4000m" + memory: "6Gi" + requests: + cpu: "500m" + memory: "2Gi" + etcd: + persistence: + size: "50Gi" + storageClassName: "gp3" # Use your cloud provider's fast storage class + uxp: + resourcesCrossplane: + limits: + cpu: "1000m" + memory: "1Gi" + requests: + cpu: "500m" + memory: "512Mi" + ha: + enabled: true #. production environments +``` + +Apply the configuration using Helm: + +```bash +helm upgrade --install spaces oci://xpkg.upbound.io/spaces-artifacts/spaces \ + -f values.yaml \ + -n upbound-system +``` + +## Considerations + +- **Provider count**: Each provider adds resource overhead - consider using provider families to optimize resource usage +- **Managed resources**: The number of managed resources impacts CPU usage more than memory +- **Vertical pod autoscaling**: Consider using vertical pod autoscaling in Kubernetes to automatically adjust resources based on usage +- **Storage performance**: Storage performance is as important as capacity for etcd +- **Network latency**: Low-latency connections between components improve performance + + diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/self-hosted-spaces-deployment.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/self-hosted-spaces-deployment.md new file mode 100644 index 000000000..e549e3939 --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/self-hosted-spaces-deployment.md @@ -0,0 +1,461 @@ +--- +title: Deployment Workflow +sidebar_position: 3 +description: A quickstart guide for Upbound Spaces +tier: "business" +--- +import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; + + + + + +This guide deploys a self-hosted Upbound cluster in AWS. + + + + + +This guide deploys a self-hosted Upbound cluster in Azure. + + + + + +This guide deploys a self-hosted Upbound cluster in GCP. + + + +Disconnected Spaces allows you to host control planes in your preferred environment. + +## Prerequisites + +To get started deploying your own Disconnected Space, you need: + +- An Upbound organization account string, provided by your Upbound account representative +- A `token.json` license, provided by your Upbound account representative + + + +- An AWS account and the AWS CLI + + + + + +- An Azure account and the Azure CLI + + + + + +- An GCP account and the GCP CLI + + + +:::important +Disconnected Spaces are a business critical feature of Upbound and requires a license token to successfully complete the installation. [Contact Upbound][contact-upbound] if you want to try out Upbound with Disconnected Spaces. +::: + +## Provision the hosting environment + +### Create a cluster + + + +Configure the name and target region you want the EKS cluster deployed to. + +```ini +export SPACES_CLUSTER_NAME=upbound-space-quickstart +export SPACES_REGION=us-east-1 +``` + +Provision a 3-node cluster using eksctl. + +```bash +cat < + + + +Configure the name and target region you want the AKS cluster deployed to. + +```ini +export SPACES_RESOURCE_GROUP_NAME=upbound-space-quickstart +export SPACES_CLUSTER_NAME=upbound-space-quickstart +export SPACES_LOCATION=westus +``` + +Provision a new Azure resource group. + +```bash +az group create --name ${SPACES_RESOURCE_GROUP_NAME} --location ${SPACES_LOCATION} +``` + +Provision a 3-node cluster. + +```bash +az aks create -g ${SPACES_RESOURCE_GROUP_NAME} -n ${SPACES_CLUSTER_NAME} \ + --enable-managed-identity \ + --node-count 3 \ + --node-vm-size Standard_D4s_v4 \ + --enable-addons monitoring \ + --enable-msi-auth-for-monitoring \ + --generate-ssh-keys \ + --network-plugin kubenet \ + --network-policy calico +``` + +Get the kubeconfig of your AKS cluster. + +```bash +az aks get-credentials --resource-group ${SPACES_RESOURCE_GROUP_NAME} --name ${SPACES_CLUSTER_NAME} +``` + + + + + +Configure the name and target region you want the GKE cluster deployed to. + +```ini +export SPACES_PROJECT_NAME=upbound-spaces-project +export SPACES_CLUSTER_NAME=upbound-spaces-quickstart +export SPACES_LOCATION=us-west1-a +``` + +Create a new project and set it as the current project. + +```bash +gcloud projects create ${SPACES_PROJECT_NAME} +gcloud config set project ${SPACES_PROJECT_NAME} +``` + +Provision a 3-node cluster. + +```bash +gcloud container clusters create ${SPACES_CLUSTER_NAME} \ + --enable-network-policy \ + --num-nodes=3 \ + --zone=${SPACES_LOCATION} \ + --machine-type=e2-standard-4 +``` + +Get the kubeconfig of your GKE cluster. + +```bash +gcloud container clusters get-credentials ${SPACES_CLUSTER_NAME} --zone=${SPACES_LOCATION} +``` + + + +## Configure the pre-install + +### Set your Upbound organization account details + +Set your Upbound organization account string as an environment variable for use in future steps + +```ini +export UPBOUND_ACCOUNT= +``` + +### Set up pre-install configurations + +Export the path of the license token JSON file provided by your Upbound account representative. + +```ini {copy-lines="2"} +# Change the path to where you saved the token. +export SPACES_TOKEN_PATH="/path/to/token.json" +``` + +Set the version of Spaces software you want to install. + +```ini +export SPACES_VERSION= +``` + +Set the router host and cluster type. The `SPACES_ROUTER_HOST` is the domain name that's used to access the control plane instances. It's used by the ingress controller to route requests. + +```ini +export SPACES_ROUTER_HOST="proxy.upbound-127.0.0.1.nip.io" +``` + +:::important +Make sure to replace the placeholder text in `SPACES_ROUTER_HOST` and provide a real domain that you own. +::: + + +## Install the Spaces software + + +### Install cert-manager + +Install cert-manager. + +```bash +kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.11.0/cert-manager.yaml +kubectl wait deployment -n cert-manager cert-manager-webhook --for condition=Available=True --timeout=360s +``` + + + +### Install ALB Load Balancer + +```bash +helm install aws-load-balancer-controller aws-load-balancer-controller --namespace kube-system \ + --repo https://aws.github.io/eks-charts \ + --set clusterName=${SPACES_CLUSTER_NAME} \ + --set serviceAccount.create=false \ + --set serviceAccount.name=aws-load-balancer-controller \ + --wait +``` + + + +### Install ingress-nginx + +Starting with Spaces v1.10.0, you need to configure the ingress-nginx +controller to allow SSL-passthrough mode. You can do so by passing the +`--enable-ssl-passthrough=true` command-line option to the controller. +The following Helm install command enables this with the `controller.extraArgs` +parameter: + + + +```bash +helm upgrade --install ingress-nginx ingress-nginx \ + --create-namespace --namespace ingress-nginx \ + --repo https://kubernetes.github.io/ingress-nginx \ + --version 4.12.1 \ + --set 'controller.service.type=LoadBalancer' \ + --set 'controller.extraArgs.enable-ssl-passthrough=true' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-type=external' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-scheme=internet-facing' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-nlb-target-type=ip' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-healthcheck-protocol=http' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-healthcheck-path=/healthz' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-healthcheck-port=10254' \ + --wait +``` + + + + + +```bash +helm upgrade --install ingress-nginx ingress-nginx \ + --create-namespace --namespace ingress-nginx \ + --repo https://kubernetes.github.io/ingress-nginx \ + --version 4.12.1 \ + --set 'controller.service.type=LoadBalancer' \ + --set 'controller.extraArgs.enable-ssl-passthrough=true' \ + --set 'controller.service.annotations.service\.beta\.kubernetes\.io/azure-load-balancer-health-probe-request-path=/healthz' \ + --wait +``` + + + + + +```bash +helm upgrade --install ingress-nginx ingress-nginx \ + --create-namespace --namespace ingress-nginx \ + --repo https://kubernetes.github.io/ingress-nginx \ + --version 4.12.1 \ + --set 'controller.service.type=LoadBalancer' \ + --set 'controller.extraArgs.enable-ssl-passthrough=true' \ + --wait +``` + + + +### Install Upbound Spaces software + +Create an image pull secret so that the cluster can pull Upbound Spaces images. + +```bash +kubectl create ns upbound-system +kubectl -n upbound-system create secret docker-registry upbound-pull-secret \ + --docker-server=https://xpkg.upbound.io \ + --docker-username="$(jq -r .accessId $SPACES_TOKEN_PATH)" \ + --docker-password="$(jq -r .token $SPACES_TOKEN_PATH)" +``` + +Log in with Helm to be able to pull chart images for the installation commands. + +```bash +jq -r .token $SPACES_TOKEN_PATH | helm registry login xpkg.upbound.io -u $(jq -r .accessId $SPACES_TOKEN_PATH) --password-stdin +``` + +Install the Spaces software. + +```bash +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "authentication.hubIdentities=true" \ + --set "authorization.hubRBAC=true" \ + --wait +``` + +### Create a DNS record + +:::important +If you chose to create a public ingress, you also need to create a DNS record for the load balancer of the public facing ingress. Do this before you create your first control plane. +::: + +Create a DNS record for the load balancer of the public facing ingress. To get the address for the Ingress, run the following: + + + +```bash +kubectl get ingress \ + -n upbound-system mxe-router-ingress \ + -o jsonpath='{.status.loadBalancer.ingress[0].hostname}' +``` + + + + + +```bash +kubectl get ingress \ + -n upbound-system mxe-router-ingress \ + -o jsonpath='{.status.loadBalancer.ingress[0].ip}' +``` + + + + + +```bash +kubectl get ingress \ + -n upbound-system mxe-router-ingress \ + -o jsonpath='{.status.loadBalancer.ingress[0].ip}' +``` + + + +If the preceding command doesn't return a load balancer address then your provider may not have allocated it yet. Once it's available, add a DNS record for the `ROUTER_HOST` to point to the given load balancer address. If it's an IPv4 address, add an A record. If it's a domain name, add a CNAME record. + +## Configure the up CLI + +With your kubeconfig pointed at the Kubernetes cluster where you installed +Upbound Spaces, create a new profile in the `up` CLI. This profile interacts +with your Space: + +```bash +up profile create --use ${SPACES_CLUSTER_NAME} --type=disconnected --organization ${UPBOUND_ACCOUNT} +``` + +Optionally, log in to your Upbound account using the new profile so you can use the Upbound Marketplace with this profile as well: + +```bash +up login +``` + + +## Connect to your Space + + +Use `up ctx` to create a kubeconfig context pointed at your new Space: + +```bash +up ctx disconnected/$(kubectl config current-context) +``` + +## Create your first control plane + +You can now create a control plane with the `up` CLI: + +```bash +up ctp create ctp1 +``` + +You can also create a control plane with kubectl: + +```yaml +cat < +```yaml +observability: + spacesCollector: + env: + - name: API_KEY + valueFrom: + secretKeyRef: + name: my-secret + key: api-key + config: + exporters: + otlphttp: + endpoint: "" + headers: + api-key: ${env:API_KEY} + exportPipeline: + logs: + - otlphttp + metrics: + - otlphttp + traces: + - otlphttp +``` + + +You can export metrics, logs, and traces from your Crossplane installation, Spaces +infrastructure (controller, API, router, etc.), provider-helm, and +provider-kubernetes. + +### Router metrics + +The Spaces router component uses Envoy as a reverse proxy and exposes detailed +metrics about request handling, circuit breakers, and connection pooling. +Upbound collects these metrics in your Space after you enable Space-level +observability. + +Envoy metrics in Upbound include: + +- **Upstream cluster metrics** - Request status codes, timeouts, retries, and latency for traffic to control planes and services +- **Circuit breaker metrics** - Connection and request circuit breaker state for both `DEFAULT` and `HIGH` priority levels +- **Downstream listener metrics** - Client connections and requests received +- **HTTP connection manager metrics** - End-to-end HTTP request processing and latency + +For a complete list of available router metrics and example PromQL queries, see the [Router metrics reference][router-ref]. + +### Router tracing + +The Spaces router generates distributed traces through OpenTelemetry integration, +providing end-to-end visibility into request flow across the system. Use these +traces to debug latency issues, understand request paths, and correlate errors +across services. + +The router uses: + +- **Protocol**: OTLP (OpenTelemetry Protocol) over gRPC +- **Service name**: `spaces-router` +- **Transport**: TLS-encrypted connection to telemetry collector + +#### Trace configuration + +Enable tracing and configure the sampling rate with the following Helm values: + +```yaml +observability: + enabled: true + tracing: + enabled: true + sampling: + rate: 0.1 # Sample 10% of new traces (0.0-1.0) +``` + +The sampling behavior depends on whether a parent trace context exists: + +- **With parent context**: If a `traceparent` header is present, the parent's + sampling decision is respected, enabling proper distributed tracing across services. +- **Root spans**:. new traces without a parent, Envoy samples based on + `x-request-id` hashing. The default sampling rate is 10%. + +#### TLS configuration for external collectors + +To send traces to an external OTLP collector, configure the endpoint and TLS settings: + +```yaml +observability: + enabled: true + tracing: + enabled: true + endpoint: "otlp-gateway.example.com" + port: 443 + tls: + caBundleSecretRef: "custom-ca-secret" +``` + +If `caBundleSecretRef` is set, the router uses the CA bundle from the referenced +Kubernetes secret. The secret must contain a key named `ca.crt` with the +PEM-encoded CA bundle. If not set, the router uses the Spaces CA for the +in-cluster collector. + +#### Custom trace tags + +The router adds custom tags to every span to enable filtering and grouping by +control plane: + +| Tag | Source | Description | +|-----|--------|-------------| +| `controlplane.id` | `x-upbound-mxp-id` header | Control plane UUID | +| `controlplane.name` | `x-upbound-mxp-host` header | Internal vcluster hostname | +| `hostcluster.id` | `x-upbound-hostcluster-id` header | Host cluster identifier | + +These tags enable queries like "show all slow requests to control plane X" or +"find errors for control planes in host cluster Y." + +#### Example trace + +The following example shows the attributes from a successful GET request: + +```text +Span: ingress +├─ Service: spaces-router +├─ Duration: 8.025ms +├─ Attributes: +│ ├─ http.method: GET +│ ├─ http.status_code: 200 +│ ├─ upstream_cluster: ctp-b2b37aaa-ee55-492c-ba0c-4d561a6325fa-api-cluster +│ ├─ controlplane.id: b2b37aaa-ee55-492c-ba0c-4d561a6325fa +│ ├─ controlplane.name: vcluster.mxp-b2b37aaa-ee55-492c-ba0c-4d561a6325fa-system +│ └─ response_size: 1827 +``` + +## Available metrics + +Space-level observability collects metrics from multiple infrastructure components: + +### Infrastructure component metrics + +- Crossplane controller metrics +- Spaces controller, API, and router metrics +- Provider metrics (provider-helm, provider-kubernetes) + +### Router metrics + +The router component exposes Envoy proxy metrics for monitoring traffic flow and +service health. Key metric categories include: + +- `envoy_cluster_upstream_rq_*` - Upstream request metrics (status codes, timeouts, retries, latency) +- `envoy_cluster_circuit_breakers_*` - Circuit breaker state and capacity +- `envoy_listener_downstream_*` - Client connection and request metrics +- `envoy_http_downstream_*` - HTTP request processing metrics + +Example query to monitor total request rate: + +```promql +sum(rate(envoy_cluster_upstream_rq_total{job="spaces-router-envoy"}[5m])) +``` + +Example query for P95 latency: + +```promql +histogram_quantile( + 0.95, + sum by (le) ( + rate(envoy_cluster_upstream_rq_time_bucket{job="spaces-router-envoy"}[5m]) + ) +) +``` + +For detailed router metrics documentation and more query examples, see the [Router metrics reference][router-ref]. + + +## OpenTelemetryCollector image + + +Control plane (`SharedTelemetry`) and Space observability deploy the same custom +OpenTelemetry Collector image. The OpenTelemetry Collector image supports +`otlhttp`, `datadog`, and `debug` exporters. + +For more information on observability configuration, review the [Helm chart reference][helm-chart-reference]. + +## Observability in control planes + +Read the [observability documentation][observability-documentation] to learn +about the features Upbound offers for collecting telemetry from control planes. + + +## Router metrics reference {#router-ref} + +To avoid overwhelming observability tools with hundreds of Envoy metrics, an +allow-list filters metrics to only the following metric families. + +### Upstream cluster metrics + +Metrics tracking requests sent from Envoy to configured upstream clusters. +Individual control planes, spaces-api, and other services are each considered +an upstream cluster. Use these metrics to monitor service health, identify +upstream errors, and measure backend latency. + +| Metric | Description | +|--------|-------------| +| `envoy_cluster_upstream_rq_xx_total` | HTTP status codes (2xx, 3xx, 4xx, 5xx) with label `envoy_response_code_class` | +| `envoy_cluster_upstream_rq_timeout_total` | Requests that timed out waiting for upstream | +| `envoy_cluster_upstream_rq_retry_limit_exceeded_total` | Requests that exhausted retry attempts | +| `envoy_cluster_upstream_rq_total` | Total upstream requests | +| `envoy_cluster_upstream_rq_time_bucket` | Latency histogram (for P50/P95/P99 calculations) | +| `envoy_cluster_upstream_rq_time_sum` | Sum of request durations | +| `envoy_cluster_upstream_rq_time_count` | Count of requests | + +### Circuit breaker metrics + + + +Metrics tracking circuit breaker state and remaining capacity. Circuit breakers +prevent cascading failures by limiting connections and concurrent requests to +unhealthy upstreams. Two priority levels exist: `DEFAULT` for watch requests and +`HIGH` for API requests. + + +| Name | Description | +|--------|-------------| +| `envoy_cluster_circuit_breakers_default_cx_open` | `DEFAULT` priority connection circuit breaker open (gauge) | +| `envoy_cluster_circuit_breakers_default_rq_open` | `DEFAULT` priority request circuit breaker open (gauge) | +| `envoy_cluster_circuit_breakers_default_remaining_cx` | Available `DEFAULT` priority connections (gauge) | +| `envoy_cluster_circuit_breakers_default_remaining_rq` | Available `DEFAULT` priority request slots (gauge) | +| `envoy_cluster_circuit_breakers_high_cx_open` | `HIGH` priority connection circuit breaker open (gauge) | +| `envoy_cluster_circuit_breakers_high_rq_open` | `HIGH` priority request circuit breaker open (gauge) | +| `envoy_cluster_circuit_breakers_high_remaining_cx` | Available `HIGH` priority connections (gauge) | +| `envoy_cluster_circuit_breakers_high_remaining_rq` | Available `HIGH` priority request slots (gauge) | + +### Downstream listener metrics + +Metrics tracking requests received from clients such as kubectl and API consumers. +Use these metrics to monitor client connection patterns, overall request volume, +and responses sent to external users. + +| Name | Description | +|--------|-------------| +| `envoy_listener_downstream_rq_xx_total` | HTTP status codes for responses sent to clients | +| `envoy_listener_downstream_rq_total` | Total requests received from clients | +| `envoy_listener_downstream_cx_total` | Total connections from clients | +| `envoy_listener_downstream_cx_active` | Currently active client connections (gauge) | + + + +### HTTP connection manager metrics + + +Metrics from Envoy's HTTP connection manager tracking end-to-end request +processing. These metrics provide a comprehensive view of the HTTP request +lifecycle including status codes and client-perceived latency. + +| Name | Description | +|--------|-------------| +| `envoy_http_downstream_rq_xx` | HTTP status codes (note: no `_total` suffix for this metric family) | +| `envoy_http_downstream_rq_total` | Total HTTP requests received | +| `envoy_http_downstream_rq_time_bucket` | Downstream request latency histogram | +| `envoy_http_downstream_rq_time_sum` | Sum of downstream request durations | +| `envoy_http_downstream_rq_time_count` | Count of downstream requests | + +[router-ref]: #router-ref +[observability-documentation]: /spaces/howtos/observability +[opentelemetry-collector]: https://opentelemetry.io/docs/collector/ +[opentelemetry-operator]: https://opentelemetry.io/docs/kubernetes/operator/ +[helm-chart-reference]: /reference/helm-reference diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/spaces-management.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/spaces-management.md new file mode 100644 index 000000000..3df61c306 --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/spaces-management.md @@ -0,0 +1,219 @@ +--- +title: Interacting with Disconnected Spaces +sidebar_position: 10 +description: Common operations in Spaces +--- + +:::info API Version Information +This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Spaces management procedures are consistent across versions. + +For version compatibility details, see the . +::: + +## Spaces management + +### Create a Space + +To install an Upbound Space into a cluster, it's recommended you dedicate an entire Kubernetes cluster for the Space. You can use [up space init][up-space-init] to install an Upbound Space. Below is an example: + +```bash +up space init "v1.9.0" +``` +:::tip +For a full guide to get started with Spaces, read the [quickstart][quickstart] guide: +::: + +You can also install the helm chart for Spaces directly. In order for a Spaces install to succeed, you must install some prerequisites first and configure them. This includes: + +- UXP +- provider-helm and provider-kubernetes +- cert-manager + +Furthermore, the Spaces chart requires a pull secret, which Upbound must provide to you. + +```bash +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "v1.9.0" \ + --set "ingress.host=your-host.com" \ + --set "clusterType=eks" \ + --set "account=your-upbound-account" \ + --wait +``` +For a complete tutorial of the helm install, read one of the deployment guides for [AWS][aws], [Azure][azure] , or [GCP][gcp] which cover the step-by-step process. + +### Upgrade a Space + +To upgrade a Space from one version to the next, use [up space upgrade][up-space-upgrade]. Spaces supports upgrading from version `ver x.N.*` to version `ver x.N+1.*`. + +```bash +up space upgrade "v1.9.0" +``` + +You can also upgrade a Space by manually bumping the Helm chart version. Before +upgrading, review the release notes for any breaking changes or +special requirements: + +1. Review the release notes for the target version in the [Spaces Release Notes][spaces-release-notes] +2. Upgrade the Space by updating the helm chart version: + +```bash +helm -n upbound-system upgrade spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "v1.9.0" \ + --reuse-values \ + --wait +``` + +For major version upgrades or configuration changes, extract your current values +and adjust: + +```bash +# Extract current values to a file +helm -n upbound-system get values spaces > spaces-values.yaml + +# Upgrade with modified values +helm -n upbound-system upgrade spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "v1.9.0" \ + -f spaces-values.yaml \ + --wait +``` + +### Downgrade a Space + +To rollback a Space from one version to the previous, use [up space upgrade][up-space-upgrade-1]. Spaces supports downgrading from version `ver x.N.*` to version `ver x.N-1.*`. + +```bash +up space upgrade --rollback +``` + +You can also downgrade a Space manually using Helm by specifying an earlier version: + +```bash +helm -n upbound-system upgrade spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "v1.8.0" \ + --reuse-values \ + --wait +``` + +When downgrading, make sure to: +1. Check the [release notes][release-notes] for specific downgrade instructions +2. Verify compatibility between the downgraded Space and any control planes +3. Back up any critical data before proceeding + +### Uninstall a Space + +To uninstall a Space from a Kubernetes cluster, use [up space destroy][up-space-destroy]. A destroy operation uninstalls core components and orphans control planes and their associated resources. + +```bash +up space destroy +``` + +## Control plane management + +You can manage control planes in a Space via the [up CLI][up-cli] or the Spaces-local Kubernetes API. When you install a Space, it defines new a API type, `kind: Controlplane`, that you can use to create and manage control planes in the Space. + +### Create a control plane + +To create a control plane in a Space using `up`, run the following: + +```bash +up ctp create ctp1 +``` + +You can also declare a new control plane like the example below and apply it to your Spaces cluster: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: ctp1 + namespace: default +spec: + writeConnectionSecretToRef: + name: kubeconfig-ctp1 + namespace: default +``` + +This manifest: + +- Creates a new control plane in the space called `ctp1`. +- Publishes the kubeconfig to connect to the control plane to a secret in the Spaces cluster, called `kubeconfig-ctp1` + +### Connect to a control plane + +To connect to a control plane in a Space using `up`, run the following: + +```bash +up ctp connect new-control-plane +``` + +The command changes your kubeconfig's current context to the control plane you specify. If you want to change your kubeconfig back to a previous context, run: + +```bash +up ctp disconnect +``` + +If you configured your control plane to publish connection details, you can also access it this way. Once the control plane is ready, use the secret (containing connection details) to connect to the API server of your control plane. + +```bash +kubectl get secret -n default -o jsonpath='{.data.kubeconfig}' | base64 -d > /tmp/.yaml +``` + +Reference the kubeconfig whenever you want to interact directly with the API server of the control plane (vs the Space's API server): + +```bash +kubectl get providers --kubeconfig=/tmp/.yaml +``` + +### Configure a control plane + +Spaces offers a built-in feature that allows you to connect a control plane to a Git source. This experience is like when a control plane runs in [Upbound's SaaS environment][upbound-s-saas-environment]. Upbound recommends using the built-in Git integration to drive configuration of your control planes in a Space. + +Learn more in the [Spaces Git integration][spaces-git-integration] documentation. + +### List control planes + +To list all control planes in a Space using `up`, run the following: + +```bash +up ctp list +``` + +Or you can use Kubernetes-style semantics to list the control plane: + +```bash +kubectl get controlplanes +``` + + +### Delete a control plane + +To delete a control plane in a Space using `up`, run the following: + +```bash +up ctp delete ctp1 +``` + +Or you can use Kubernetes-style semantics to delete the control plane: + +```bash +kubectl delete controlplane ctp1 +``` + + +[up-space-init]: /reference/cli-reference +[quickstart]: / +[aws]: /spaces/howtos/self-hosted/self-hosted-spaces-deployment +[azure]:/spaces/howtos/self-hosted/self-hosted-spaces-deployment +[gcp]:/spaces/howtos/self-hosted/self-hosted-spaces-deployment +[up-space-upgrade]: /reference/cli-reference +[spaces-release-notes]: /reference/release-notes/spaces +[up-space-upgrade-1]: /reference/cli-reference +[release-notes]: /reference/release-notes/spaces +[up-space-destroy]: /reference/cli-reference +[up-cli]: /reference/cli-reference +[upbound-s-saas-environment]: /spaces/howtos/self-hosted/spaces-management +[spaces-git-integration]: /spaces/howtos/self-hosted/gitops diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/troubleshooting.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/troubleshooting.md new file mode 100644 index 000000000..8d1ca6517 --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/troubleshooting.md @@ -0,0 +1,132 @@ +--- +title: Troubleshooting +sidebar_position: 100 +description: A guide for troubleshooting an issue that occurs in a Space +--- + +Find guidance below on how to find solutions for issues you encounter when deploying and using an Upbound Space. Use the tips below as a supplement to the observability metrics discussed in the [Observability][observability] page. + +## General tips + +Most issues fall into two general categories: + +1. issues with the Spaces management plane +2. issues on a control plane + +If your control plane doesn't reach a `Ready` state, it's indicative of the former. If your control plane is in a created and running state, but resources aren't reconciling, it's indicative of the latter. + +### Spaces component layout + +Run `kubectl get pods -A` against the cluster hosting a Space. You should see a variety of pods across several namespaces. It should look something like this: + +```bash +NAMESPACE NAME READY STATUS RESTARTS AGE +cert-manager cert-manager-6d6769565c-mc5df 1/1 Running 0 25m +cert-manager cert-manager-cainjector-744bb89575-nw4fg 1/1 Running 0 25m +cert-manager cert-manager-webhook-759d6dcbf7-ps4mq 1/1 Running 0 25m +ingress-nginx ingress-nginx-controller-7f8ccfccc6-6szlp 1/1 Running 0 25m +kube-system coredns-5d78c9869d-4p477 1/1 Running 0 26m +kube-system coredns-5d78c9869d-pdxt6 1/1 Running 0 26m +kube-system etcd-kind-control-plane 1/1 Running 0 26m +kube-system kindnet-8s7pq 1/1 Running 0 26m +kube-system kube-apiserver-kind-control-plane 1/1 Running 0 26m +kube-system kube-controller-manager-kind-control-plane 1/1 Running 0 26m +kube-system kube-proxy-l68r8 1/1 Running 0 26m +kube-system kube-scheduler-kind-control-plane 1/1 Running 0 26m +local-path-storage local-path-provisioner-6bc4bddd6b-qsdjt 1/1 Running 0 26m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system coredns-5dc69d6447-f56rh-x-kube-system-x-vcluster 1/1 Running 0 21m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system crossplane-6b6d67bc66-6b8nx-x-upbound-system-x-vcluster 1/1 Running 0 20m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system crossplane-rbac-manager-78f6fc7cb4-pjkhc-x-upbound-s-12253c3c4e 1/1 Running 0 20m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system kube-state-metrics-7f8f4dcc5b-8p8c4 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system mxp-gateway-68f546b9c8-xnz5j-x-upbound-system-x-vcluster 1/1 Running 0 20m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system mxp-ksm-config-54655667bb-hv9br 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system mxp-readyz-5f7f97d967-b98bw 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system otlp-collector-56d7d46c8d-g5sh5-x-upbound-system-x-vcluster 1/1 Running 0 20m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-67c9fb8959-ppb2m 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-api-6bfbccc49d-ffgpj 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-controller-7cc6855656-8c46b 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-etcd-0 1/1 Running 0 22m +mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vector-754b494b84-wljw4 1/1 Running 0 22m +mxp-system mxp-charts-chartmuseum-7587f77558-8tltb 1/1 Running 0 23m +upbound-system crossplane-b4dc7b4c9-6hjh5 1/1 Running 0 25m +upbound-system crossplane-contrib-provider-helm-ce18dd03e6e4-7945d8985-4gcwr 1/1 Running 0 24m +upbound-system crossplane-contrib-provider-kubernetes-1f1e32c1957d-577756gs2x4 1/1 Running 0 24m +upbound-system crossplane-rbac-manager-d8cb49cbc-gbvvf 1/1 Running 0 25m +upbound-system spaces-controller-6647677cf9-5zl5q 1/1 Running 0 24m +upbound-system spaces-router-bc78c96d7-kzts2 2/2 Running 0 24m +``` + +What you are seeing is: + +- Pods in the `upbound-system` namespace are components required to run the management plane of the Space. This includes the `spaces-controller`, `spaces-router`, and install of UXP. +- Pods in the `mxp-{GUID}-system` namespace are components that collectively power a control plane. Notable call outs include pod names that look like `vcluster-api-{GUID}` and `vcluster-controller-{GUID}`, which are integral components of a control plane. +- Pods in other notable namespaces, including `cert-manager` and `ingress-nginx`, are prerequisite components that support a Space's successful operation. + + + +### Troubleshooting tips for the Spaces management plane + +Start by getting the status of all the pods in a Space: + +1. Make sure the current context of your kubeconfig points at the Kubernetes cluster hosting your Space +2. Get the status of all the pods in the Space: +```bash +kubectl get pods -A +``` +3. Scan the `Status` column to see if any of the pods report a status besides `Running`. +4. Scan the `Restarts` column to see if any of the pods have restarted. +5. If you notice a Status other than `Running` or see pods that restarted, you should investigate their events by running +```bash +kubectl describe pod -n +``` + +Next, inspect the status of objects and releases: + +1. Make sure the current context of your kubeconfig points at the Kubernetes cluster hosting your Space +2. Inspect the objects in your Space. If any are unhealthy, describe those objects to get the events: +```bash +kubectl get objects +``` +3. Inspect the releases in your Space. If any are unhealthy, describe those releases to get the events: +```bash +kubectl get releases +``` + +### Troubleshooting tips for control planes in a Space + +General troubleshooting in a control plane starts by fetching the events of the control plane: + +1. Make sure the current context of your kubeconfig points at the Kubernetes cluster hosting your Space +2. Run the following to fetch your control planes. +```bash +kubectl get ctp +``` +3. Describe the control plane by providing its name, found in the preceding instruction. +```bash +kubectl describe controlplanes.spaces.upbound.io +``` + +## Issues + + +### Your control plane is stuck in a 'creating' state + +#### Error: unknown field "ports" in io.k8s.api.networking.v1.NetworkPolicySpec + +This error is emitted by a Helm release named `control-plane-host-policies` attempting to be installed by the Spaces software. The full error is: + +_CannotCreateExternalResource failed to install release: unable to build kubernetes objects from release manifest: error validating "": error validating data: ValidationError(NetworkPolicy.spec): unknown field "ports" in io.k8s.api.networking.v1.NetworkPolicySpec_ + +This error may be caused by running a Space on an earlier version of Kubernetes than is supported (`v1.26 or later`). To resolve this issue, upgrade the host Kubernetes cluster version to 1.25 or later. + +### Your Spaces install fails + +#### Error: You tried to install a Space on a previous Crossplane installation + +If you try to install a Space on an existing cluster that previously had Crossplane or UXP on it, you may encounter errors. Due to how the Spaces installer tests for the presence of UXP, it may detect orphaned CRDs that weren't cleaned up by the previous uninstall of Crossplane. You may need to manually [remove old Crossplane CRDs][remove-old-crossplane-crds] for the installer to properly detect the UXP prerequisite. + + + + +[observability]: /spaces/howtos/observability +[remove-old-crossplane-crds]: https://docs.crossplane.io/latest/guides/uninstall-crossplane/ diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/use-argo.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/use-argo.md new file mode 100644 index 000000000..d58f7db44 --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/use-argo.md @@ -0,0 +1,228 @@ +--- +title: Use ArgoCD Plugin +sidebar_position: 15 +description: A guide for integrating Argo with control planes in a Space. +aliases: + - /all-spaces/self-hosted-spaces/use-argo + - /deploy/disconnected-spaces/use-argo-flux + - /all-spaces/self-hosted-spaces/use-argo-flux + - /connect/use-argo +--- + + +:::info API Version Information +This guide covers the ArgoCD Plugin, a preview feature available in all supported versions (v1.9-v1.14+) for self-hosted deployments. + +For details on GitOps patterns and related features across versions, see [Automation and GitOps Overview](/spaces/howtos/automation-and-gitops/overview/). +::: + +:::important +This feature is in preview and is off by default. To enable, set `features.alpha.argocdPlugin.enabled=true` when installing Spaces: + +```bash +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + ... + --set "features.alpha.argocdPlugin.enabled=true" +``` +::: + +Spaces provides an optional plugin to assist with integrating a control plane in a Space with Argo CD. You must enable the plugin for the entire Space at Spaces install or upgrade time. The plugin's job is to propagate the connection details of each control plane in a Space to Argo CD. By default, Upbound stores these connection details in a Kubernetes secret named after the control plane. To run Argo CD across multiple namespaces, Upbound recommends enabling the `features.alpha.argocdPlugin.useUIDFormatForCTPSecrets` flag to use a UID-based format for secret names to avoid conflicts. + +:::tip +For general guidance on integrating Upbound with GitOps flows, see [GitOps with Control Planes][gitops-with-control-planes]. +::: + +## On cluster Argo CD + +If you are running Argo CD on the same cluster as the Space, run the following to enable the plugin: + + + + + + +```bash {hl_lines="3-4"} +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "features.alpha.argocdPlugin.enabled=true" \ + --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ + --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" +``` + + + + + +```bash {hl_lines="7-8"} +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "features.alpha.argocdPlugin.enabled=true" \ + --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ + --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ + --wait +``` + + + + + + +The important flags are: + +- `features.alpha.argocdPlugin.enabled=true` +- `features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true` +- `features.alpha.argocdPlugin.target.secretNamespace=argocd` + +The first flag enables the feature and the second indicates the namespace on the cluster where you installed Argo CD. + +Be sure to [configure Argo][configure-argo] after it's installed. + +## External cluster Argo CD + +If you are running Argo CD on an external cluster from where you installed your Space, you need to provide some extra flags: + + + + + + +```bash {hl_lines="3-7"} +up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "features.alpha.argocdPlugin.enabled=true" \ + --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ + --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ + --set "features.alpha.argocdPlugin.target.externalCluster.enabled=true" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig" +``` + + + + + +```bash {hl_lines="7-11"} +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "features.alpha.argocdPlugin.enabled=true" \ + --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ + --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ + --set "features.alpha.argocdPlugin.target.externalCluster.enabled=true" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig" \ + --wait +``` + + + + + +```bash +helm -n upbound-system upgrade --install spaces \ + oci://xpkg.upbound.io/spaces-artifacts/spaces \ + --version "${SPACES_VERSION}" \ + --set "ingress.host=${SPACES_ROUTER_HOST}" \ + --set "account=${UPBOUND_ACCOUNT}" \ + --set "features.alpha.argocdPlugin.enabled=true" \ + --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ + --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ + --set "features.alpha.argocdPlugin.target.externalCluster.enabled=true" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster" \ + --set "features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig" \ + --wait +``` + +The extra flags are: + +- `features.alpha.argocdPlugin.target.externalCluster.enabled=true` +- `features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true` +- `features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster` +- `features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig` + +These flags tell the plugin (running in Spaces) where your Argo CD instance is. After you've done this at install-time, you also need to create a `Secret` on the Spaces cluster. This secret must contain a kubeconfig pointing to your Argo CD instance. The secret needs to be in the same namespace as the `spaces-controller`, which is `upbound-system`. + +Once you enable the plugin and configure it, the plugin automatically propagates connection details for your control planes to your Argo CD instance. You can then target the control plane and use Argo to sync Crossplane-related objects to it. + +Be sure to [configure Argo][configure-argo-1] after it's installed. + +## Configure Argo + +Argo's default configuration causes it to try to query for resource kinds that don't exist in control planes. You should configure Argo's [general configmap][general-configmap] to include the resource group/kinds which make sense in the context of control planes. example, the concept of `nodes` isn't exposed in control planes. + +To configure Argo CD, connect to the cluster where you've installed it and edit the configmap: + +```bash +kubectl edit configmap argocd-cm -n argocd +``` + +Adjust the resource inclusions and exclusions under the `data` field of the configmap: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-cm + namespace: argocd +data: + resource.exclusions: | + - apiGroups: + - "*" + kinds: + - "*" + clusters: + - "*" + resource.inclusions: | + - apiGroups: + - "*" + kinds: + - Provider + - Configuration + clusters: + - "*" +``` + +The preceding configuration causes Argo to exclude syncing **all** resource group/kinds--except Crossplane `providers` and `configurations`--for **all** control planes. You're encouraged to adjust the `resource.inclusions` to include the types that make sense for your control plane, such as an `XRD` you've built with Crossplane. You're also encouraged to customize the `clusters` pattern to selectively apply these exclusions/inclusions to control planes (for example, `control-plane-prod-*`). + +## Control plane connection secrets + +To deploy control planes through Argo CD, you need to configure the `writeConnectionSecretToRef` field in your control plane spec. This field specifies where to store the control plane's `kubeconfig` and makes connection details available to Argo CD. + +### Basic Configuration + +In your control plane manifest, include the `writeConnectionSecretToRef` field: + +```yaml +apiVersion: spaces.upbound.io/v1beta1 +kind: ControlPlane +metadata: + name: my-control-plane + namespace: my-control-plane-group +spec: + writeConnectionSecretToRef: + name: kubeconfig-my-control-plane + namespace: my-control-plane-group + # ... other control plane configuration +``` + +### Parameters + +The `writeConnectionSecretToRef` field requires two parameters: + +- `name`: A unique name for the secret containing the kubeconfig (`kubeconfig-my-control-plane`) +- `namespace`: The Kubernetes namespace where you store the secret, which must match the metadata namespace. The system copies it into the `argocd` namespace when you set the `features.alpha.argocdPlugin.target.secretNamespace=argocd` configuration parameter. + +Control plane labels automatically propagate to the connection secret, which allows you to use label selectors in Argo CD for automated discovery and management. + +This configuration enables Argo CD to automatically discover and manage resources on your control planes. + + +[gitops-with-control-planes]: /spaces/howtos/cloud-spaces/gitops +[configure-argo]: #configure-argo +[configure-argo-1]: #configure-argo +[general-configmap]: https://argo-cd.readthedocs.io/en/stable/operator-manual/argocd-cm-yaml/ diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/workload-id/_category_.json b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/workload-id/_category_.json new file mode 100644 index 000000000..c5ecc93f6 --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/workload-id/_category_.json @@ -0,0 +1,11 @@ +{ + "label": "Workload Identity Configuration", + "position": 2, + "collapsed": true, + "customProps": { + "plan": "business" + } + +} + + diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/workload-id/backup-restore-config.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/workload-id/backup-restore-config.md new file mode 100644 index 000000000..935ca69ec --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/workload-id/backup-restore-config.md @@ -0,0 +1,384 @@ +--- +title: Backup and Restore Workload ID +weight: 1 +description: Configure workload identity for Spaces Backup and Restore +--- +import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; + + + + + + + +Workload-identity authentication lets you use access policies to grant temporary +AWS credentials to your Kubernetes pod with a service account. Assigning IAM roles and service accounts allows the pod to assume the IAM role dynamically and much more securely than static credentials. + +This guide walks you through creating an IAM trust role policy and applying it +to your EKS cluster to handle backup and restore storage. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary Azure credentials to your Kubernetes pod based on +a service account. Assigning managed identities and service accounts allows the pod to +authenticate with Azure resources dynamically and much more securely than static credentials. + +This guide walks you through creating a managed identity and federated credential for your AKS +cluster to handle backup and restore storage. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary GCP credentials to your Kubernetes pod based on +a service account. Assigning IAM roles and service accounts allows the pod to +access cloud resources dynamically and much more securely than static credentials. + +This guide walks you through configuring workload identity for your GKE +cluster to handle backup and restore storage. + + + +## Prerequisites + + +To set up a workload-identity, you'll need: + + +- A self-hosted Space cluster +- Administrator access in your cloud provider +- Helm and `kubectl` + +## About the backup and restore component + +The `mxp-controller` component handles backup and restore workloads. It needs to +access your cloud storage to store and retrieve backups. By default, this +component runs in each control plane's host namespace. + +## Configuration + + + +Upbound supports workload-identity configurations in AWS with IAM Roles for +Service Accounts and EKS pod identity association. + +#### IAM Roles for Service Accounts (IRSA) + +With IRSA, you can associate a Kubernetes service account in an EKS cluster with +an AWS IAM role. Upbound authenticates workloads with that service account as +the IAM role using temporary credentials instead of static role credentials. +IRSA relies on AWS `AssumeRoleWithWebIdentity` `STS` to exchange OIDC ID tokens with +the IAM role's temporary credentials. IRSA uses the `eks.amazon.aws/role-arn` +annotation to link the service account and the IAM role. + +First, create an IAM role with appropriate permissions to access your S3 bucket: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:ListBucket", + "s3:DeleteObject" + ], + "Resource": [ + "arn:aws:s3:::${YOUR_BACKUP_BUCKET}", + "arn:aws:s3:::${YOUR_BACKUP_BUCKET}/*" + ] + } + ] +} +``` + +Next, ensure your EKS cluster has an OIDC identity provider: + +```shell +eksctl utils associate-iam-oidc-provider --cluster ${YOUR_CLUSTER_NAME} --approve +``` + +Configure the IAM role trust policy with the namespace for each +provisioned control plane. + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:oidc-provider/${YOUR_OIDC_PROVIDER}" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + "${YOUR_OIDC_PROVIDER}:aud": "sts.amazonaws.com", + "${YOUR_OIDC_PROVIDER}:sub": "system:serviceaccount:${YOUR_NAMESPACE}:mxp-controller" + } + } + } + ] +} +``` + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the Backup and Restore component: + +```shell +--set controlPlanes.mxpController.serviceAccount.annotations."eks\.amazonaws\.com/role-arn"="${SPACES_BR_IAM_ROLE_ARN}" +``` + +This command allows the backup and restore component to authenticate with your +dedicated IAM role in your EKS cluster environment. + +#### EKS pod identities + +Upbound also supports EKS Pod Identity configuration. EKS Pod Identities allow +you to create a pod identity association with your Kubernetes namespace, a +service account, and an IAM role, which allows the EKS control plane to +automatically handle the credential exchange. + +First, create an IAM role with appropriate permissions to access your S3 bucket: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:ListBucket", + "s3:DeleteObject" + ], + "Resource": [ + "arn:aws:s3:::${YOUR_BACKUP_BUCKET}", + "arn:aws:s3:::${YOUR_BACKUP_BUCKET}/*" + ] + } + ] +} +``` + +When you install or upgrade your Space with Helm, add the backup/restore values: + +```shell +helm upgrade spaces spaces-helm-chart \ + --set "billing.enabled=true" \ + --set "backup.enabled=true" \ + --set "backup.storage.provider=aws" \ + --set "backup.storage.aws.region= ${YOUR_AWS_REGION}" \ + --set "backup.storage.aws.bucket= ${YOUR_BACKUP_BUCKET}" +``` + +After Upbound provisions your control plane, create a Pod Identity Association +with the `aws` CLI: + +```shell +aws eks create-pod-identity-association \ + --cluster-name ${YOUR_CLUSTER_NAME} \ + --namespace ${YOUR_CONTROL_PLANE_NAMESPACE} \ + --service-account mxp-controller \ + --role-arn arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/backup-restore-role +``` + + + + + +Upbound supports workload-identity configurations in Azure with Azure's built-in +workload identity feature. + +#### Prepare your cluster + +First, enable the OIDC issuer and workload identity in your AKS cluster: + +```shell +az aks update --resource-group ${YOUR_RESOURCE_GROUP} --name ${YOUR_AKS_CLUSTER_NAME} --enable-oidc-issuer --enable-workload-identity +``` + +Next, find and store the OIDC issuer URL as an environment variable: + +```shell +export AKS_OIDC_ISSUER="$(az aks show --name ${YOUR_AKS_CLUSTER_NAME} --resource-group ${YOUR_RESOURCE_GROUP} --query "oidcIssuerProfile.issuerUrl" --output tsv)" +``` + +#### Create a User-Assigned Managed Identity + +Create a new managed identity to associate with the backup and restore component: + +```shell +az identity create --name backup-restore-identity --resource-group ${YOUR_RESOURCE_GROUP} --location ${YOUR_LOCATION} +``` + +Retrieve the client ID and store it as an environment variable: + +```shell +export USER_ASSIGNED_CLIENT_ID="$(az identity show --name backup-restore-identity --resource-group ${YOUR_RESOURCE_GROUP} --query clientId -otsv)" +``` + +Grant the managed identity you created to access your Azure Storage account: + +```shell +az role assignment create \ + --role "Storage Blob Data Contributor" \ + --assignee ${USER_ASSIGNED_CLIENT_ID} \ + --scope /subscriptions/${YOUR_SUBSCRIPTION_ID}/resourceGroups/${YOUR_RESOURCE_GROUP}/providers/Microsoft.Storage/storageAccounts/${YOUR_STORAGE_ACCOUNT} +``` + +#### Apply the managed identity role + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the backup and restore component: + +```shell +--set controlPlanes.mxpController.serviceAccount.annotations."azure\.workload\.identity/client-id"="${YOUR_USER_ASSIGNED_CLIENT_ID}" +--set controlPlanes.mxpController.pod.customLabels."azure\.workload\.identity/use"="true" +``` + +#### Create a Federated Identity credential + +```shell +az identity federated-credential create \ + --name backup-restore-federated-identity \ + --identity-name backup-restore-identity \ + --resource-group ${YOUR_RESOURCE_GROUP} \ + --issuer ${AKS_OIDC_ISSUER} \ + --subject system:serviceaccount:${YOUR_CONTROL_PLANE_NAMESPACE}:mxp-controller +``` + + + + + +Upbound supports workload-identity configurations in GCP with IAM principal +identifiers and service account impersonation. + +#### Prepare your cluster + +First, enable Workload Identity Federation on your GKE cluster: + +```shell +gcloud container clusters update ${YOUR_CLUSTER_NAME} \ + --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ + --region=${YOUR_REGION} +``` + +#### Create a Google Service Account + +Create a service account for the backup and restore component: + +```shell +gcloud iam service-accounts create backup-restore-sa \ + --display-name "Backup Restore Service Account" \ + --project ${YOUR_PROJECT_ID} +``` + +Grant the service account access to your Google Cloud Storage bucket: + +```shell +gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ + --member "serviceAccount:backup-restore-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" \ + --role "roles/storage.objectAdmin" +``` + +#### Configure Workload Identity + +Create an IAM binding to grant the Kubernetes service account access to the Google service account: + +```shell +gcloud iam service-accounts add-iam-policy-binding \ + backup-restore-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com \ + --role roles/iam.workloadIdentityUser \ + --member "serviceAccount:${YOUR_PROJECT_ID}.svc.id.goog[${YOUR_CONTROL_PLANE_NAMESPACE}/mxp-controller]" +``` + +#### Apply the service account configuration + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the backup and restore component: + +```shell +--set controlPlanes.mxpController.serviceAccount.annotations."iam\.gke\.io/gcp-service-account"="backup-restore-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" +``` + + + +## Verify your configuration + +After you apply the configuration use `kubectl` to verify the service account +has the correct annotation: + +```shell +kubectl get serviceaccount mxp-controller -n ${YOUR_CONTROL_PLANE_NAMESPACE} -o yaml +``` + +Verify the `mxp-controller` pod is running: + +```shell +kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep mxp-controller +``` + +## Restart workload + +You must manually restart a workload's pod when you add the workload identity annotations to the running pod's service account. + + + +This restart enables the EKS pod identity webhook to inject the necessary +environment for using IRSA. + + + + + +This restart enables the workload identity webhook to inject the necessary +environment for using Azure workload identity. + + + + + +This restart enables the workload identity webhook to inject the necessary +environment for using GCP workload identity. + + + +```shell +kubectl rollout restart deployment mxp-controller -n ${YOUR_CONTROL_PLANE_NAMESPACE} +``` + +## Use cases + + +Configuring backup and restore with workload identity eliminates the need for +static credentials in your cluster and the overhead of credential rotation. +These benefits are helpful in: + +* Disaster recovery scenarios +* Control plane migration +* Compliance requirements +* Rollbacks after unsuccessful upgrades + +## Next steps + +Now that you have a workload identity configured for the backup and restore +component, visit the [Backup Configuration][backup-restore-guide] documentation. + +Other workload identity guides are: +* [Billing][billing] +* [Shared Secrets][secrets] + +[backup-restore-guide]: /spaces/howtos/backup-and-restore +[billing]: /spaces/howtos/self-hosted/workload-id/billing-config +[secrets]: /spaces/howtos/self-hosted/workload-id/eso-config diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/workload-id/billing-config.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/workload-id/billing-config.md new file mode 100644 index 000000000..323a6122f --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/workload-id/billing-config.md @@ -0,0 +1,454 @@ +--- +title: Billing Workload ID +weight: 1 +description: Configure workload identity for Spaces Billing +--- +import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; + + + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary AWS credentials to your Kubernetes pod based on +a service account. Assigning IAM roles and service accounts allows the pod to +assume the IAM role dynamically and much more securely than static credentials. + +This guide walks you through creating an IAM trust role policy and applying it to your EKS +cluster for billing in your Space cluster. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary Azure credentials to your Kubernetes pod based on +a service account. Assigning managed identities and service accounts allows the pod to +authenticate with Azure resources dynamically and much more securely than static credentials. + +This guide walks you through creating a managed identity and federated credential for your AKS +cluster for billing in your Space cluster. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary GCP credentials to your Kubernetes pod based on +a service account. Assigning IAM roles and service accounts allows the pod to +access cloud resources dynamically and much more securely than static +credentials. + +This guide walks you through configuring workload identity for your GKE +cluster's billing component. + + + +## Prerequisites + + +To set up a workload-identity, you'll need: + + +- A self-hosted Space cluster +- Administrator access in your cloud provider +- Helm and `kubectl` + +## About the billing component + +The `vector.dev` component handles billing metrics collection in spaces. It +stores account data in your cloud storage. By default, this component runs in +each control plane's host namespace. + +## Configuration + + + +Upbound supports workload-identity configurations in AWS with IAM Roles for +Service Accounts and EKS pod identity association. + +#### IAM Roles for Service Accounts (IRSA) + +With IRSA, you can associate a Kubernetes service account in an EKS cluster with +an AWS IAM role. Upbound authenticates workloads with that service account as +the IAM role using temporary credentials instead of static role credentials. +IRSA relies on AWS `AssumeRoleWithWebIdentity` `STS` to exchange OIDC ID tokens with +the IAM role's temporary credentials. IRSA uses the `eks.amazon.aws/role-arn` +annotation to link the service account and the IAM role. + +**Create an IAM role and trust policy** + +First, create an IAM role appropriate permissions to access your S3 bucket: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:ListBucket", + "s3:DeleteObject" + ], + "Resource": [ + "arn:aws:s3:::${YOUR_BILLING_BUCKET}", + "arn:aws:s3:::${YOUR_BILLING_BUCKET}/*" + ] + } + ] +} +``` + +You must configure the IAM role trust policy with the exact match for each +provisioned control plane. An example of a trust policy for a single control +plane is below: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:oidc-provider/${YOUR_OIDC_PROVIDER}" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + ":aud": "sts.amazonaws.com", + ":sub": "system:serviceaccount:${YOUR_NAMESPACE}:vector" + } + } + } + ] +} +``` + +**Configure the EKS OIDC provider** + +Next, ensure your EKS cluster has an OIDC identity provider: + +```shell +eksctl utils associate-iam-oidc-provider --cluster ${YOUR_CLUSTER_NAME} --approve +``` + +**Apply the IAM role** + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the Billing component: + +```shell +--set "billing.enabled=true" +--set "billing.storage.provider=aws" +--set "billing.storage.aws.region=${YOUR_AWS_REGION}" +--set "billing.storage.aws.bucket=${YOUR_BILLING_BUCKET}" +--set "billing.storage.secretRef.name=" +--set controlPlanes.vector.serviceAccount.customAnnotations."eks\.amazonaws\.com/role-arn"="arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_BILLING_ROLE_NAME}" +``` + +:::important +You **must** set the `billing.storage.secretRef.name` to an empty string to +enable workload identity for the billing component +::: + +#### EKS pod identities + +Upbound also supports EKS Pod Identity configuration. EKS Pod Identities allow +you to create a pod identity association with your Kubernetes namespace, a +service account, and an IAM role, which allows the EKS control plane to +automatically handle the credential exchange. + +**Create an IAM role** + +First, create an IAM role appropriate permissions to access your S3 bucket: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:ListBucket" + ], + "Resource": [ + "arn:aws:s3:::${YOUR_BILLING_BUCKET}", + "arn:aws:s3:::${YOUR_BILLING_BUCKET}/*" + ] + } + ] +} +``` + +**Configure your Space with Helm** + +When you install or upgrade your Space with Helm, add the billing values: + +```shell +helm upgrade spaces spaces-helm-chart \ + --set "billing.enabled=true" \ + --set "billing.storage.provider=aws" \ + --set "billing.storage.aws.region=${YOUR_AWS_REGION}" \ + --set "billing.storage.aws.bucket=${YOUR_BILLING_BUCKET}" \ + --set "billing.storage.secretRef.name=" +``` + +**Create a Pod Identity Association** + +After Upbound provisions your control plane, create a Pod Identity Association +with the `aws` CLI: + +```shell +aws eks create-pod-identity-association \ + --cluster-name ${YOUR_CLUSTER_NAME} \ + --namespace ${YOUR_CONTROL_PLANE_NAMESPACE} \ + --service-account vector \ + --role-arn arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_BILLING_ROLE_NAME} +``` + + + + + +Upbound supports workload-identity configurations in Azure with Azure's built-in +workload identity feature. + +First, enable the OIDC issuer and workload identity in your AKS cluster: + +```shell +az aks update --resource-group ${YOUR_RESOURCE_GROUP} --name ${YOUR_AKS_CLUSTER_NAME} --enable-oidc-issuer --enable-workload-identity +``` + +Next, find and store the OIDC issuer URL as an environment variable: + +```shell +export AKS_OIDC_ISSUER="$(az aks show --name ${YOUR_AKS_CLUSTER_NAME} --resource-group ${YOUR_RESOURCE_GROUP} --query "oidcIssuerProfile.issuerUrl" --output tsv)" +``` + +Create a new managed identity to associate with the billing component: + +```shell +az identity create --name billing-identity --resource-group ${YOUR_RESOURCE_GROUP} --location ${YOUR_LOCATION} +``` + +Retrieve the client ID and store it as an environment variable: + +```shell +export USER_ASSIGNED_CLIENT_ID="$(az identity show --name billing-identity --resource-group ${YOUR_RESOURCE_GROUP} --query clientId -otsv)" +``` + +Grant the managed identity you created to access your Azure Storage account: + +```shell +az role assignment create --role "Storage Blob Data Contributor" --assignee $USER_ASSIGNED_CLIENT_ID --scope /subscriptions/${YOUR_SUBSCRIPTION_ID}/resourceGroups/${YOUR_RESOURCE_GROUP}/providers/Microsoft.Storage/storageAccounts/${YOUR_STORAGE_ACCOUNT} +``` + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the billing component: + +```shell +--set "billing.enabled=true" +--set "billing.storage.provider=azure" +--set "billing.storage.azure.storageAccount=${SPACES_BILLING_STORAGE_ACCOUNT}" +--set "billing.storage.azure.container=${SPACES_BILLING_STORAGE_CONTAINER}" +--set "billing.storage.secretRef.name=" +--set controlPlanes.vector.serviceAccount.customAnnotations."azure\.workload\.identity/client-id"="${SPACES_BILLING_APP_ID}" +--set controlPlanes.vector.pod.customLabels."azure\.workload\.identity/use"="true" +``` + +Create a federated credential to establish trust between the managed identity +and your AKS OIDC provider: + +```shell +az identity federated-credential create \ + --name billing-federated-identity \ + --identity-name billing-identity \ + --resource-group ${YOUR_RESOURCE_GROUP} \ + --issuer ${AKS_OIDC_ISSUER} \ + --subject system:serviceaccount:${YOUR_CONTROL_PLANE_NAMESPACE}:vector +``` + + + + + +Upbound supports workload-identity configurations in GCP with IAM principal +identifiers or service account impersonation. + +#### IAM principal identifiers + +IAM principal identifiers allow you to grant permissions directly to +Kubernetes service accounts without additional annotation. Upbound recommends +this approach for ease-of-use and flexibility. + +First, enable Workload Identity Federation on your GKE cluster: + +```shell +gcloud container clusters update ${YOUR_CLUSTER_NAME} \ + --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ + --region=${YOUR_REGION} +``` + +Next, configure your Spaces installation with the Spaces Helm chart parameters: + +```shell +--set "billing.enabled=true" +--set "billing.storage.provider=gcp" +--set "billing.storage.gcp.bucket=${YOUR_BILLING_BUCKET}" +--set "billing.storage.secretRef.name=" +``` + +:::important +You **must** set the `billing.storage.secretRef.name` to an empty string to +enable workload identity for the billing component. +::: + +Grant the necessary permissions to your Kubernetes service account: + +```shell +gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ + --member="principalSet://iam.googleapis.com/projects/${YOUR_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${YOUR_PROJECT_ID}.svc.id.goog/attribute.kubernetes_namespace/${YOUR_CONTROL_PLANE_NAMESPACE}/attribute.kubernetes_service_account/vector" \ + --role="roles/storage.objectAdmin" +``` + +Enable uniform bucket-level access on your storage bucket: + +```shell +gcloud storage buckets update gs://${YOUR_BILLING_BUCKET} --uniform-bucket-level-access +``` + +#### Service account impersonation + +Service account impersonation allows you to link a Kubernetes service account to +a GCP service account. The Kubernetes service account assumes the permissions of +the GCP service account you specify. + +Enable workload id federation on your GKE cluster: + +```shell +gcloud container clusters update ${YOUR_CLUSTER_NAME} \ + --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ + --region=${YOUR_REGION} +``` + +Next, create a dedicated service account for your billing operations: + +```shell +gcloud iam service-accounts create billing-sa \ + --project=${YOUR_PROJECT_ID} +``` + +Grant storage permissions to the service account you created: + +```shell +gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ + --member="serviceAccount:billing-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" \ + --role="roles/storage.objectAdmin" +``` + +Link the Kubernetes service account to the GCP service account: + +```shell +gcloud iam service-accounts add-iam-policy-binding \ + billing-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com \ + --role="roles/iam.workloadIdentityUser" \ + --member="serviceAccount:${YOUR_PROJECT_ID}.svc.id.goog[${YOUR_CONTROL_PLANE_NAMESPACE}/vector]" +``` + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the billing component: + +```shell +--set "billing.enabled=true" +--set "billing.storage.provider=gcp" +--set "billing.storage.gcp.bucket=${YOUR_BILLING_BUCKET}" +--set "billing.storage.secretRef.name=" +--set controlPlanes.vector.serviceAccount.customAnnotations."iam\.gke\.io/gcp-service-account"="billing-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" +``` + + + +## Verify your configuration + +After you apply the configuration use `kubectl` to verify the service account +has the correct annotation: + +```shell +kubectl get serviceaccount vector -n ${YOUR_CONTROL_PLANE_NAMESPACE} -o yaml +``` + +Verify the `vector` pod is running: + +```shell +kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep vector +``` + +## Restart workload + + + +You must manually restart a workload's pod when you add the +`eks.amazonaws.com/role-arn key` annotation to the running pod's service +account. + +This restart enables the EKS pod identity webhook to inject the necessary +environment for using IRSA. + + + + + +You must manually restart a workload's pod when you add the workload identity annotations to the running pod's service account. + +This restart enables the workload identity webhook to inject the necessary +environment for using Azure workload identity. + + + + + +GCP workload identity doesn't require pod restarts after configuration changes. +If you do need to restart the workload, use the `kubectl` command to force the +component restart: + + + +```shell +kubectl rollout restart deployment vector +``` + + +## Use cases + + +Using workload identity authentication for billing eliminates the need for static +credentials in your cluster as well as the overhead of credential rotation. +These benefits are helpful in: + +* Resource usage tracking across teams/projects +* Cost allocation for multi-tenant environments +* Financial auditing requirements +* Capacity billing and resource optimization +* Automated billing workflows + +## Next steps + +Now that you have workload identity configured for the billing component, visit +the [Billing guide][billing-guide] for more information. + +Other workload identity guides are: +* [Backup and restore][backuprestore] +* [Shared Secrets][secrets] + +[billing-guide]: /spaces/howtos/self-hosted/billing +[backuprestore]: /spaces/howtos/self-hosted/workload-id/backup-restore-config +[secrets]: /spaces/howtos/self-hosted/workload-id/eso-config diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/workload-id/eso-config.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/workload-id/eso-config.md new file mode 100644 index 000000000..c1418c171 --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/workload-id/eso-config.md @@ -0,0 +1,503 @@ +--- +title: Shared Secrets Workload ID +weight: 1 +description: Configure workload identity for Spaces Shared Secrets +--- +import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; + + + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary AWS credentials to your Kubernetes pod based on +a service account. Assigning IAM roles and service accounts allows the pod to +assume the IAM role dynamically and much more securely than static credentials. + +This guide walks you through creating an IAM trust role policy and applying it to your EKS +cluster for secret sharing with Kubernetes. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary Azure credentials to your Kubernetes pod based on +a service account. Assigning managed identities and service accounts allows the pod to +authenticate with Azure resources dynamically and much more securely than static credentials. + +This guide walks you through creating a managed identity and federated credential for your AKS +cluster for shared secrets in your Space cluster. + + + + + +Workload-identity authentication lets you use access policies to grant your +self-hosted Space cluster access to your cloud providers. Workload identity +authentication grants temporary GCP credentials to your Kubernetes pod based on +a service account. Assigning IAM roles and service accounts allows the pod to +access cloud resources dynamically and much more securely than static +credentials. + +This guide walks you through configuring workload identity for your GKE +cluster's Shared Secrets component. + + + +## Prerequisites + + +To set up a workload-identity, you'll need: + + +- A self-hosted Space cluster +- Administrator access in your cloud provider +- Helm and `kubectl` + + +## About the Shared Secrets component + + + + +The External Secrets Operator (ESO) runs in each control plane's host namespace as `external-secrets-controller`. It needs to access +your external secrets management service like AWS Secrets Manager. + +To configure your shared secrets workflow controller, you must: + +* Annotate the Kubernetes service account to associate it with a cloud-side + principal (such as an IAM role, service account, or enterprise application). The workload must then + use this service account. +* Label the workload (pod) to allow the injection of a temporary credential set, + enabling authentication. + + + + + +The External Secrets Operator (ESO) component runs in each control plane's host +namespace as `external-secrets-controller`. It synchronizes secrets from +external APIs into Kubernetes secrets. Shared secrets allow you to manage +credentials outside your Kubernetes cluster while making them available to your +application + + + + + +The External Secrets Operator (ESO) component runs in each control plane's host +namespace as `external-secrets-controller`. It synchronizes secrets from +external APIs into Kubernetes secrets. Shared secrets allow you to manage +credentials outside your Kubernetes cluster while making them available to your +application + + + +## Configuration + + + +Upbound supports workload-identity configurations in AWS with IAM Roles for +Service Accounts or EKS pod identity association. + +#### IAM Roles for Service Accounts (IRSA) + +With IRSA, you can associate a Kubernetes service account in an EKS cluster with +an AWS IAM role. Upbound authenticates workloads with that service account as +the IAM role using temporary credentials instead of static role credentials. +IRSA relies on AWS `AssumeRoleWithWebIdentity` `STS` to exchange OIDC ID tokens with +the IAM role's temporary credentials. IRSA uses the `eks.amazon.aws/role-arn` +annotation to link the service account and the IAM role. + +**Create an IAM role and trust policy** + +First, create an IAM role with appropriate permissions to access AWS Secrets Manager: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "secretsmanager:GetSecretValue", + "secretsmanager:DescribeSecret", + "ssm:GetParameter" + ], + "Resource": [ + "arn:aws:secretsmanager:${YOUR_REGION}:${YOUR_AWS_ACCOUNT_ID}:secret:${YOUR_SECRET_PREFIX}*", + "arn:aws:ssm:${YOUR_REGION}:${YOUR_AWS_ACCOUNT_ID}:parameter/${YOUR_PARAMETER_PREFIX}*" + ] + } + ] +} +``` + +You must configure the IAM role trust policy with the exact match for each +provisioned control plane. An example of a trust policy for a single control +plane is below: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:oidc-provider/${YOUR_OIDC_PROVIDER}" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + ":aud": "sts.amazonaws.com" + }, + "StringLike": { + ":sub": "system:serviceaccount:*:external-secrets-controller" + } + } + } + ] +} +``` + +**Configure the EKS OIDC provider** + +Next, ensure your EKS cluster has an OIDC identity provider: + +```shell +eksctl utils associate-iam-oidc-provider --cluster ${YOUR_CLUSTER_NAME} --approve +``` + +**Apply the IAM role** + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the shared secrets component: + +```yaml +--set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."eks\.amazonaws\.com/role-arn"="arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_ESO_ROLE_NAME}" +``` + +This command allows the shared secrets component to authenticate with your +dedicated IAM role in your EKS cluster environment. + +#### EKS pod identities + +Upbound also supports EKS Pod Identity configuration. EKS Pod Identities allow +you to create a pod identity association with your Kubernetes namespace, a +service account, and an IAM role, which allows the EKS control plane to +automatically handle the credential exchange. + +**Create an IAM role** + +First, create an IAM role with appropriate permissions to access AWS Secrets Manager: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "secretsmanager:GetSecretValue", + "secretsmanager:DescribeSecret", + "ssm:GetParameter" + ], + "Resource": [ + "arn:aws:secretsmanager:${YOUR_AWS_REGION}:${YOUR_AWS_ACCOUNT_ID}:secret:${YOUR_SECRET_PREFIX}*", + "arn:aws:ssm:${YOUR_AWS_REGION}:${YOUR_AWS_ACCOUNT_ID}:parameter/${YOUR_PARAMETER_PREFIX}*" + ] + } + ] +} +``` + +**Configure your Space with Helm** + +When you install or upgrade your Space with Helm, add the shared secrets value: + +```shell +helm upgrade spaces spaces-helm-chart \ + --set "sharedSecrets.enabled=true" +``` + +**Create a Pod Identity Association** + +After Upbound provisions your control plane, create a Pod Identity Association +with the `aws` CLI: + +```shell +aws eks create-pod-identity-association \ + --cluster-name ${YOUR_CLUSTER_NAME} \ + --namespace ${YOUR_CONTROL_PLANE_NAMESPACE} \ + --service-account external-secrets-controller \ + --role-arn arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_ROLE_NAME} +``` + + + + + +Upbound supports workload-identity configurations in Azure with Azure's built-in +workload identity feature. + +First, enable the OIDC issuer and workload identity in your AKS cluster: + +```shell +az aks update --resource-group ${YOUR_RESOURCE_GROUP} --name ${YOUR_AKS_CLUSTER_NAME} --enable-oidc-issuer --enable-workload-identity +``` + +Next, find and store the OIDC issuer URL as an environment variable: + +```shell +export AKS_OIDC_ISSUER="$(az aks show --name ${YOUR_AKS_CLUSTER_NAME} --resource-group ${YOUR_RESOURCE_GROUP} --query "oidcIssuerProfile.issuerUrl" --output tsv)" +``` + +Create a new managed identity to associate with the shared secrets component: + +```shell +az identity create --name secrets-identity --resource-group ${YOUR_RESOURCE_GROUP} --location ${YOUR_LOCATION} +``` + +Retrieve the client ID and store it as an environment variable: + +```shell +export USER_ASSIGNED_CLIENT_ID="$(az identity show --name secrets-identity --resource-group ${YOUR_RESOURCE_GROUP} --query clientId -otsv)" +``` + +Grant the managed identity you created to access your Azure Storage account: + +```shell +az keyvault set-policy --name ${YOUR_KEY_VAULT_NAME} \ + --resource-group ${YOUR_RESOURCE_GROUP} \ + --object-id $(az identity show --name secrets-identity --resource-group ${YOUR_RESOURCE_GROUP} --query principalId -otsv) \ + --secret-permissions get list +``` + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the shared secrets component: + +```shell +--set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."azure\.workload\.identity/client-id"="${USER_ASSIGNED_CLIENT_ID}" +--set controlPlanes.sharedSecrets.pod.customLabels."azure\.workload\.identity/use"="true" +``` + +Next, create a federated credential to establish trust between the managed identity +and your AKS OIDC provider: + +```shell +az identity federated-credential create \ + --name secrets-federated-identity \ + --identity-name secrets-identity \ + --resource-group ${YOUR_RESOURCE_GROUP} \ + --issuer ${AKS_OIDC_ISSUER} \ + --subject system:serviceaccount:${YOUR_CONTROL_PLANE_NAMESPACE}:external-secrets-controller +``` + + + + + +Upbound supports workload-identity configurations in GCP with IAM principal +identifiers or service account impersonation. + +#### IAM principal identifiers + +IAM principal identifiers allow you to grant permissions directly to +Kubernetes service accounts without additional annotation. Upbound recommends +this approach for ease-of-use and flexibility. + +First, enable Workload Identity Federation on your GKE cluster: + +```shell +gcloud container clusters update ${YOUR_CLUSTER_NAME} \ + --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ + --region=${YOUR_REGION} +``` + +Next, grant the necessary permissions to your Kubernetes service account: + +```shell +gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ + --member="principalSet://iam.googleapis.com/projects/${YOUR_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${YOUR_PROJECT_ID}.svc.id.goog/attribute.kubernetes_namespace/${YOUR_CONTROL_PLANE_NAMESPACE}/attribute.kubernetes_service_account/external-secrets-controller" \ + --role="roles/secretmanager.secretAccessor" +``` + +#### Service account impersonation + +Service account impersonation allows you to link a Kubernetes service account to +a GCP service account. The Kubernetes service account assumes the permissions of +the GCP service account you specify. + +Enable workload id federation on your GKE cluster: + +```shell +gcloud container clusters update ${YOUR_CLUSTER_NAME} \ + --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ + --region=${YOUR_REGION} +``` + +Next, create a dedicated service account for your secrets operations: + +```shell +gcloud iam service-accounts create secrets-sa \ + --project=${YOUR_PROJECT_ID} +``` + +Grant secret access permissions to the service account you created: + +```shell +gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ + --member="serviceAccount:secrets-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" \ + --role="roles/secretmanager.secretAccessor" +``` + +Link the Kubernetes service account to the GCP service account: + +```shell +gcloud iam service-accounts add-iam-policy-binding \ + secrets-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com \ + --role="roles/iam.workloadIdentityUser" \ + --member="serviceAccount:${YOUR_PROJECT_ID}.svc.id.goog[${YOUR_CONTROL_PLANE_NAMESPACE}/external-secrets-controller]" +``` + +In your control plane, pass the `--set` flag with the Spaces Helm chart +parameters for the shared secrets component: + +```shell +--set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."iam\.gke\.io/gcp-service-account"="secrets-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" +``` + + + +## Verify your configuration + +After you apply the configuration use `kubectl` to verify the service account +has the correct annotation: + +```shell +kubectl get serviceaccount external-secrets-controller -n ${YOUR_CONTROL_PLANE_NAMESPACE} -o yaml +``` + + + +Verify the `external-secrets` pod is running correctly: + +```shell +kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep external-secrets +``` + + + + + +Verify the External Secrets Operator pod is running correctly: + +```shell +kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep external-secrets +``` + + + + + +Verify the `external-secrets` pod is running correctly: + +```shell +kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep external-secrets +``` + + + +## Restart workload + + + +You must manually restart a workload's pod when you add the +`eks.amazonaws.com/role-arn key` annotation to the running pod's service +account. + +This restart enables the EKS pod identity webhook to inject the necessary +environment for using IRSA. + + + + + +You must manually restart a workload's pod when you add the workload identity annotations to the running pod's service account. + +This restart enables the workload identity webhook to inject the necessary +environment for using Azure workload identity. + + + + + +GCP workload identity doesn't require pod restarts after configuration changes. +If you do need to restart the workload, use the `kubectl` command to force the +component restart: + + + +```shell +kubectl rollout restart deployment external-secrets +``` + +## Use cases + + + + +Shared secrets with workload identity eliminates the need for static credentials +in your cluster. These benefits are particularly helpful in: + +* Secure application credentials management +* Database connection string storage +* API token management +* Compliance with secret rotation security standards +* Multi-environment configuration with centralized secret management + + + + + +Using workload identity authentication for shared secrets eliminates the need for static +credentials in your cluster as well as the overhead of credential rotation. +These benefits are particularly helpful in: + +* Secure application credentials management +* Database connection string storage +* API token management +* Compliance with secret rotation security standards + + + + + +Configuring the external secrets operator with workload identity eliminates the need for +static credentials in your cluster and the overhead of credential rotation. +These benefits are particularly helpful in: + +* Secure application credentials management +* Database connection string storage +* API token management +* Compliance with secret rotation security standards + + + +## Next steps + +Now that you have workload identity configured for the shared secrets component, visit +the [Shared Secrets][eso-guide] guide for more information. + +Other workload identity guides are: +* [Backup and restore][backuprestore] +* [Billing][billing] + +[eso-guide]: /spaces/howtos/secrets-management +[backuprestore]: /spaces/howtos/self-hosted/workload-id/backup-restore-config +[billing]: /spaces/howtos/self-hosted/workload-id/billing-config diff --git a/spaces_versioned_docs/version-v1.9/howtos/simulations.md b/spaces_versioned_docs/version-v1.9/howtos/simulations.md new file mode 100644 index 000000000..26cb0e657 --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/howtos/simulations.md @@ -0,0 +1,110 @@ +--- +title: Simulate changes to your Control Plane Projects +sidebar_position: 100 +description: Use the Up CLI to mock operations before deploying to your environments. +--- + +:::info API Version Information +This guide covers Simulations, available in v1.10+ (GA since v1.13). version-specific availability and features, see the . + +For API specifications on the Simulations CRD, see the [Spaces API Reference](../../../reference/apis/spaces-api/). +::: + +:::important +The Simulations feature is in private preview. more information, [reach out to Upbound][reach-out-to-upbound]. +::: + +Control plane simulations allow you to preview changes to your resources before +applying them to your control planes. Like a plan or dry-run operation, +simulations expose the impact of updates to compositions or claims without +changing your actual resources. + +A control plane simulation creates a temporary copy of your control plane and +returns a preview of the desired changes. The simulation change plan helps you +reduce the risk of unexpected behavior based on your changes. + +## Simulation benefits + +Control planes are dynamic systems that automatically reconcile resources to +match your desired state. Simulations provide visibility into this +reconciliation process by showing: + + +* New resources to create +* Existing resources to change +* Existing resources to delete +* How configuration changes propagate through the system + +These insights are crucial when planning complex changes or upgrading Crossplane +packages. + +## Requirements + +Simulations are available to select customers on Upbound Cloud with Team +Tier or higher. more information, [reach out to Upbound][reach-out-to-upbound-1]. + +## How to simulate your control planes + +Before you start a simulation, build your project and use the `up +project run` command to run your control plane. + +Use the `up project simulate` command with your control plane name to start the +simulation: + +```ini {copy-lines="all"} +up project simulate --complete-after=60s --terminate-on-finish +``` + +The `complete-after` flag determines how long to run the simulation before it completes and calculates the results. Depending on the change, a simulation may not complete within your defined interval leaving unaffected resources as `unchanged`. + +The `terminate-on-finish` flag terminates the simulation after the time +you set - deleting the control plane that ran the simulation. + +At the end of your simulation, your CLI returns: +* A summary of the resources created, modified, or deleted +* Diffs for each resource affected + +## View your simulation in the Upbound Console +You can also view your simulation results in the Upbound Console: + +1. Navigate to your base control plane in the Upbound Console +2. Select the "Simulations" tab in the menu +3. Select a simulation object for a change list of all + resources affected. + +The Console provides visual indications of changes: + +- Created Resources: Marked with green +- Modified Resources: Marked with yellow +- Deleted Resources: Marked with red +- Unchanged Resources: Displayed in gray + +![Upbound Console Simulation](/img/simulations.png) + +## Considerations + +Simulations is a **private preview** feature. + +Be aware of the following limitations: + +- Simulations can't predict the exact behavior of external systems due to the + complexity and non-deterministic reconciliation pattern in Crossplane. + +- The only completion criteria for a simulation is time. Your simulation may not + receive a conclusive result within that interval. Upbound recommends the + default `60s` value. + +- Providers don't run in simulations. Simulations can't compose resources that + rely on the status of Managed Resources. + + +The Upbound team is working to improve these limitations. Your feedback is always appreciated. + +## Next steps + +For more information, follow the [tutorial][tutorial] on Simulations. + + +[tutorial]: /manuals/cli/howtos/simulations +[reach-out-to-upbound]: https://www.upbound.io/contact-us +[reach-out-to-upbound-1]: https://www.upbound.io/contact-us diff --git a/spaces_versioned_docs/version-v1.9/overview/_category_.json b/spaces_versioned_docs/version-v1.9/overview/_category_.json new file mode 100644 index 000000000..54bb16430 --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/overview/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Overview", + "position": 0 +} diff --git a/spaces_versioned_docs/version-v1.9/overview/index.md b/spaces_versioned_docs/version-v1.9/overview/index.md new file mode 100644 index 000000000..7b79f6e44 --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/overview/index.md @@ -0,0 +1,14 @@ +--- +title: Spaces Overview +sidebar_position: 0 +--- + +# Upbound Spaces + +Welcome to the Upbound Spaces documentation. This section contains comprehensive documentation for Spaces API and Spaces operations across all supported versions (v1.9 through v1.15). + +## Get Started + +- **[Concepts](/spaces/concepts/control-planes)** - Core concepts for Spaces +- **[How-To Guides](/spaces/howtos/auto-upgrade)** - Step-by-step guides for operating Spaces +- **[API Reference](/spaces/reference/)** - API specifications and resources diff --git a/spaces_versioned_docs/version-v1.9/reference/_category_.json b/spaces_versioned_docs/version-v1.9/reference/_category_.json new file mode 100644 index 000000000..4a6a139c4 --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/reference/_category_.json @@ -0,0 +1,5 @@ +{ + "label": "Spaces API", + "position": 1, + "collapsed": true +} diff --git a/spaces_versioned_docs/version-v1.9/reference/index.md b/spaces_versioned_docs/version-v1.9/reference/index.md new file mode 100644 index 000000000..5e68b0768 --- /dev/null +++ b/spaces_versioned_docs/version-v1.9/reference/index.md @@ -0,0 +1,72 @@ +--- +title: Spaces API Reference +description: Documentation for the Spaces API resources (v1.15 - Latest) +sidebar_position: 1 +--- +import CrdDocViewer from '@site/src/components/CrdViewer'; + + +This page documents the Custom Resource Definitions (CRDs) for the Spaces API. + + +## Control Planes +### Control Planes + + +## Observability +### Shared Telemetry Configs + + +## `pkg` +### Controller Revisions + + +### Controller Runtime Configs + + +### Controllers + + +### Remote Configuration Revisions + + +### Remote Configurations + + +## Policy +### Shared Upbound Policies + + +## References +### Referenced Objects + + +## Scheduling +### Environments + + +## Secrets +### Shared External Secrets + + +### Shared Secret Stores + + +## Simulations + + +## Spaces Backups +### Backups + + +### Backup Schedules + + +### Shared Backup Configs + + +### Shared Backups + + +### Shared Backup Schedules + diff --git a/spaces_versioned_sidebars/version-v1.10-sidebars.json b/spaces_versioned_sidebars/version-v1.10-sidebars.json new file mode 100644 index 000000000..e74749c75 --- /dev/null +++ b/spaces_versioned_sidebars/version-v1.10-sidebars.json @@ -0,0 +1,91 @@ +{ + "sidebar": [ + { + "type": "doc", + "id": "overview/index", + "label": "Overview" + }, + { + "type": "category", + "label": "Concepts", + "items": [ + "concepts/control-planes", + "concepts/deployment-modes", + "concepts/groups" + ] + }, + { + "type": "category", + "label": "How-To Guides", + "items": [ + "howtos/auto-upgrade", + "howtos/backup-and-restore", + "howtos/ctp-connector", + "howtos/debugging-a-ctp", + "howtos/managed-service", + "howtos/mcp-connector-guide", + "howtos/migrating-to-mcps", + "howtos/observability", + "howtos/query-api", + "howtos/secrets-management", + { + "type": "category", + "label": "Automation and GitOps", + "items": [ + "howtos/automation-and-gitops/overview" + ] + }, + { + "type": "category", + "label": "Cloud Spaces", + "items": [ + "howtos/cloud-spaces/dedicated-spaces-deployment", + "howtos/cloud-spaces/gitops-on-upbound" + ] + }, + { + "type": "category", + "label": "Self-Hosted", + "items": [ + "howtos/self-hosted/administer-features", + "howtos/self-hosted/attach-detach", + "howtos/self-hosted/billing", + "howtos/self-hosted/capacity-licensing", + "howtos/self-hosted/certs", + "howtos/self-hosted/configure-ha", + "howtos/self-hosted/controllers", + "howtos/self-hosted/declarative-ctps", + "howtos/self-hosted/deployment-reqs", + "howtos/self-hosted/dr", + "howtos/self-hosted/gitops-with-argocd", + "howtos/self-hosted/managed-spaces-deployment", + "howtos/self-hosted/oidc-configuration", + "howtos/self-hosted/proxies-config", + "howtos/self-hosted/query-api", + "howtos/self-hosted/scaling-resources", + "howtos/self-hosted/self-hosted-spaces-deployment", + "howtos/self-hosted/space-observability", + "howtos/self-hosted/spaces-management", + "howtos/self-hosted/troubleshooting", + { + "type": "category", + "label": "Workload ID", + "items": [ + "howtos/self-hosted/workload-id/backup-restore-config", + "howtos/self-hosted/workload-id/billing-config", + "howtos/self-hosted/workload-id/eso-config" + ] + } + ] + } + ] + }, + { + "type": "category", + "label": "API Reference", + "items": [ + "reference/index" + ] + } + ] +} diff --git a/spaces_versioned_sidebars/version-v1.11-sidebars.json b/spaces_versioned_sidebars/version-v1.11-sidebars.json new file mode 100644 index 000000000..e74749c75 --- /dev/null +++ b/spaces_versioned_sidebars/version-v1.11-sidebars.json @@ -0,0 +1,91 @@ +{ + "sidebar": [ + { + "type": "doc", + "id": "overview/index", + "label": "Overview" + }, + { + "type": "category", + "label": "Concepts", + "items": [ + "concepts/control-planes", + "concepts/deployment-modes", + "concepts/groups" + ] + }, + { + "type": "category", + "label": "How-To Guides", + "items": [ + "howtos/auto-upgrade", + "howtos/backup-and-restore", + "howtos/ctp-connector", + "howtos/debugging-a-ctp", + "howtos/managed-service", + "howtos/mcp-connector-guide", + "howtos/migrating-to-mcps", + "howtos/observability", + "howtos/query-api", + "howtos/secrets-management", + { + "type": "category", + "label": "Automation and GitOps", + "items": [ + "howtos/automation-and-gitops/overview" + ] + }, + { + "type": "category", + "label": "Cloud Spaces", + "items": [ + "howtos/cloud-spaces/dedicated-spaces-deployment", + "howtos/cloud-spaces/gitops-on-upbound" + ] + }, + { + "type": "category", + "label": "Self-Hosted", + "items": [ + "howtos/self-hosted/administer-features", + "howtos/self-hosted/attach-detach", + "howtos/self-hosted/billing", + "howtos/self-hosted/capacity-licensing", + "howtos/self-hosted/certs", + "howtos/self-hosted/configure-ha", + "howtos/self-hosted/controllers", + "howtos/self-hosted/declarative-ctps", + "howtos/self-hosted/deployment-reqs", + "howtos/self-hosted/dr", + "howtos/self-hosted/gitops-with-argocd", + "howtos/self-hosted/managed-spaces-deployment", + "howtos/self-hosted/oidc-configuration", + "howtos/self-hosted/proxies-config", + "howtos/self-hosted/query-api", + "howtos/self-hosted/scaling-resources", + "howtos/self-hosted/self-hosted-spaces-deployment", + "howtos/self-hosted/space-observability", + "howtos/self-hosted/spaces-management", + "howtos/self-hosted/troubleshooting", + { + "type": "category", + "label": "Workload ID", + "items": [ + "howtos/self-hosted/workload-id/backup-restore-config", + "howtos/self-hosted/workload-id/billing-config", + "howtos/self-hosted/workload-id/eso-config" + ] + } + ] + } + ] + }, + { + "type": "category", + "label": "API Reference", + "items": [ + "reference/index" + ] + } + ] +} diff --git a/spaces_versioned_sidebars/version-v1.12-sidebars.json b/spaces_versioned_sidebars/version-v1.12-sidebars.json new file mode 100644 index 000000000..95bcd1142 --- /dev/null +++ b/spaces_versioned_sidebars/version-v1.12-sidebars.json @@ -0,0 +1,92 @@ +{ + "sidebar": [ + { + "type": "doc", + "id": "overview/index", + "label": "Overview" + }, + { + "type": "category", + "label": "Concepts", + "items": [ + "concepts/control-planes", + "concepts/deployment-modes", + "concepts/groups" + ] + }, + { + "type": "category", + "label": "How-To Guides", + "items": [ + "howtos/auto-upgrade", + "howtos/backup-and-restore", + "howtos/control-plane-topologies", + "howtos/ctp-connector", + "howtos/debugging-a-ctp", + "howtos/managed-service", + "howtos/mcp-connector-guide", + "howtos/migrating-to-mcps", + "howtos/observability", + "howtos/query-api", + "howtos/secrets-management", + { + "type": "category", + "label": "Automation and GitOps", + "items": [ + "howtos/automation-and-gitops/overview" + ] + }, + { + "type": "category", + "label": "Cloud Spaces", + "items": [ + "howtos/cloud-spaces/dedicated-spaces-deployment", + "howtos/cloud-spaces/gitops-on-upbound" + ] + }, + { + "type": "category", + "label": "Self-Hosted", + "items": [ + "howtos/self-hosted/administer-features", + "howtos/self-hosted/attach-detach", + "howtos/self-hosted/billing", + "howtos/self-hosted/capacity-licensing", + "howtos/self-hosted/certs", + "howtos/self-hosted/configure-ha", + "howtos/self-hosted/controllers", + "howtos/self-hosted/declarative-ctps", + "howtos/self-hosted/deployment-reqs", + "howtos/self-hosted/dr", + "howtos/self-hosted/gitops-with-argocd", + "howtos/self-hosted/managed-spaces-deployment", + "howtos/self-hosted/oidc-configuration", + "howtos/self-hosted/proxies-config", + "howtos/self-hosted/query-api", + "howtos/self-hosted/scaling-resources", + "howtos/self-hosted/self-hosted-spaces-deployment", + "howtos/self-hosted/space-observability", + "howtos/self-hosted/spaces-management", + "howtos/self-hosted/troubleshooting", + { + "type": "category", + "label": "Workload ID", + "items": [ + "howtos/self-hosted/workload-id/backup-restore-config", + "howtos/self-hosted/workload-id/billing-config", + "howtos/self-hosted/workload-id/eso-config" + ] + } + ] + } + ] + }, + { + "type": "category", + "label": "API Reference", + "items": [ + "reference/index" + ] + } + ] +} diff --git a/spaces_versioned_sidebars/version-v1.13-sidebars.json b/spaces_versioned_sidebars/version-v1.13-sidebars.json new file mode 100644 index 000000000..244102567 --- /dev/null +++ b/spaces_versioned_sidebars/version-v1.13-sidebars.json @@ -0,0 +1,93 @@ +{ + "sidebar": [ + { + "type": "doc", + "id": "overview/index", + "label": "Overview" + }, + { + "type": "category", + "label": "Concepts", + "items": [ + "concepts/control-planes", + "concepts/deployment-modes", + "concepts/groups" + ] + }, + { + "type": "category", + "label": "How-To Guides", + "items": [ + "howtos/auto-upgrade", + "howtos/backup-and-restore", + "howtos/control-plane-topologies", + "howtos/ctp-connector", + "howtos/debugging-a-ctp", + "howtos/managed-service", + "howtos/mcp-connector-guide", + "howtos/migrating-to-mcps", + "howtos/observability", + "howtos/query-api", + "howtos/secrets-management", + "howtos/simulations", + { + "type": "category", + "label": "Automation and GitOps", + "items": [ + "howtos/automation-and-gitops/overview" + ] + }, + { + "type": "category", + "label": "Cloud Spaces", + "items": [ + "howtos/cloud-spaces/dedicated-spaces-deployment", + "howtos/cloud-spaces/gitops-on-upbound" + ] + }, + { + "type": "category", + "label": "Self-Hosted", + "items": [ + "howtos/self-hosted/administer-features", + "howtos/self-hosted/attach-detach", + "howtos/self-hosted/billing", + "howtos/self-hosted/capacity-licensing", + "howtos/self-hosted/certs", + "howtos/self-hosted/configure-ha", + "howtos/self-hosted/controllers", + "howtos/self-hosted/declarative-ctps", + "howtos/self-hosted/deployment-reqs", + "howtos/self-hosted/dr", + "howtos/self-hosted/gitops-with-argocd", + "howtos/self-hosted/managed-spaces-deployment", + "howtos/self-hosted/oidc-configuration", + "howtos/self-hosted/proxies-config", + "howtos/self-hosted/query-api", + "howtos/self-hosted/scaling-resources", + "howtos/self-hosted/self-hosted-spaces-deployment", + "howtos/self-hosted/space-observability", + "howtos/self-hosted/spaces-management", + "howtos/self-hosted/troubleshooting", + { + "type": "category", + "label": "Workload ID", + "items": [ + "howtos/self-hosted/workload-id/backup-restore-config", + "howtos/self-hosted/workload-id/billing-config", + "howtos/self-hosted/workload-id/eso-config" + ] + } + ] + } + ] + }, + { + "type": "category", + "label": "API Reference", + "items": [ + "reference/index" + ] + } + ] +} diff --git a/spaces_versioned_sidebars/version-v1.14-sidebars.json b/spaces_versioned_sidebars/version-v1.14-sidebars.json new file mode 100644 index 000000000..1dbc6cfdd --- /dev/null +++ b/spaces_versioned_sidebars/version-v1.14-sidebars.json @@ -0,0 +1,96 @@ +{ + "sidebar": [ + { + "type": "doc", + "id": "overview/index", + "label": "Overview" + }, + { + "type": "category", + "label": "Concepts", + "items": [ + "concepts/control-planes", + "concepts/deployment-modes", + "concepts/groups" + ] + }, + { + "type": "category", + "label": "How-To Guides", + "items": [ + "howtos/api-connector", + "howtos/auto-upgrade", + "howtos/backup-and-restore", + "howtos/control-plane-topologies", + "howtos/ctp-connector", + "howtos/debugging-a-ctp", + "howtos/managed-service", + "howtos/mcp-connector-guide", + "howtos/migrating-to-mcps", + "howtos/observability", + "howtos/query-api", + "howtos/secrets-management", + "howtos/simulations", + { + "type": "category", + "label": "Automation and GitOps", + "items": [ + "howtos/automation-and-gitops/overview" + ] + }, + { + "type": "category", + "label": "Cloud Spaces", + "items": [ + "howtos/cloud-spaces/dedicated-spaces-deployment", + "howtos/cloud-spaces/gitops-on-upbound" + ] + }, + { + "type": "category", + "label": "Self-Hosted", + "items": [ + "howtos/self-hosted/administer-features", + "howtos/self-hosted/attach-detach", + "howtos/self-hosted/billing", + "howtos/self-hosted/capacity-licensing", + "howtos/self-hosted/certs", + "howtos/self-hosted/configure-ha", + "howtos/self-hosted/controllers", + "howtos/self-hosted/ctp-audit-logs", + "howtos/self-hosted/declarative-ctps", + "howtos/self-hosted/deployment-reqs", + "howtos/self-hosted/dr", + "howtos/self-hosted/gitops-with-argocd", + "howtos/self-hosted/managed-spaces-deployment", + "howtos/self-hosted/oidc-configuration", + "howtos/self-hosted/proxies-config", + "howtos/self-hosted/query-api", + "howtos/self-hosted/scaling-resources", + "howtos/self-hosted/self-hosted-spaces-deployment", + "howtos/self-hosted/space-observability", + "howtos/self-hosted/spaces-management", + "howtos/self-hosted/troubleshooting", + "howtos/self-hosted/use-argo", + { + "type": "category", + "label": "Workload ID", + "items": [ + "howtos/self-hosted/workload-id/backup-restore-config", + "howtos/self-hosted/workload-id/billing-config", + "howtos/self-hosted/workload-id/eso-config" + ] + } + ] + } + ] + }, + { + "type": "category", + "label": "API Reference", + "items": [ + "reference/index" + ] + } + ] +} diff --git a/spaces_versioned_sidebars/version-v1.15-sidebars.json b/spaces_versioned_sidebars/version-v1.15-sidebars.json new file mode 100644 index 000000000..1dbc6cfdd --- /dev/null +++ b/spaces_versioned_sidebars/version-v1.15-sidebars.json @@ -0,0 +1,96 @@ +{ + "sidebar": [ + { + "type": "doc", + "id": "overview/index", + "label": "Overview" + }, + { + "type": "category", + "label": "Concepts", + "items": [ + "concepts/control-planes", + "concepts/deployment-modes", + "concepts/groups" + ] + }, + { + "type": "category", + "label": "How-To Guides", + "items": [ + "howtos/api-connector", + "howtos/auto-upgrade", + "howtos/backup-and-restore", + "howtos/control-plane-topologies", + "howtos/ctp-connector", + "howtos/debugging-a-ctp", + "howtos/managed-service", + "howtos/mcp-connector-guide", + "howtos/migrating-to-mcps", + "howtos/observability", + "howtos/query-api", + "howtos/secrets-management", + "howtos/simulations", + { + "type": "category", + "label": "Automation and GitOps", + "items": [ + "howtos/automation-and-gitops/overview" + ] + }, + { + "type": "category", + "label": "Cloud Spaces", + "items": [ + "howtos/cloud-spaces/dedicated-spaces-deployment", + "howtos/cloud-spaces/gitops-on-upbound" + ] + }, + { + "type": "category", + "label": "Self-Hosted", + "items": [ + "howtos/self-hosted/administer-features", + "howtos/self-hosted/attach-detach", + "howtos/self-hosted/billing", + "howtos/self-hosted/capacity-licensing", + "howtos/self-hosted/certs", + "howtos/self-hosted/configure-ha", + "howtos/self-hosted/controllers", + "howtos/self-hosted/ctp-audit-logs", + "howtos/self-hosted/declarative-ctps", + "howtos/self-hosted/deployment-reqs", + "howtos/self-hosted/dr", + "howtos/self-hosted/gitops-with-argocd", + "howtos/self-hosted/managed-spaces-deployment", + "howtos/self-hosted/oidc-configuration", + "howtos/self-hosted/proxies-config", + "howtos/self-hosted/query-api", + "howtos/self-hosted/scaling-resources", + "howtos/self-hosted/self-hosted-spaces-deployment", + "howtos/self-hosted/space-observability", + "howtos/self-hosted/spaces-management", + "howtos/self-hosted/troubleshooting", + "howtos/self-hosted/use-argo", + { + "type": "category", + "label": "Workload ID", + "items": [ + "howtos/self-hosted/workload-id/backup-restore-config", + "howtos/self-hosted/workload-id/billing-config", + "howtos/self-hosted/workload-id/eso-config" + ] + } + ] + } + ] + }, + { + "type": "category", + "label": "API Reference", + "items": [ + "reference/index" + ] + } + ] +} diff --git a/spaces_versioned_sidebars/version-v1.9-sidebars.json b/spaces_versioned_sidebars/version-v1.9-sidebars.json new file mode 100644 index 000000000..e74749c75 --- /dev/null +++ b/spaces_versioned_sidebars/version-v1.9-sidebars.json @@ -0,0 +1,91 @@ +{ + "sidebar": [ + { + "type": "doc", + "id": "overview/index", + "label": "Overview" + }, + { + "type": "category", + "label": "Concepts", + "items": [ + "concepts/control-planes", + "concepts/deployment-modes", + "concepts/groups" + ] + }, + { + "type": "category", + "label": "How-To Guides", + "items": [ + "howtos/auto-upgrade", + "howtos/backup-and-restore", + "howtos/ctp-connector", + "howtos/debugging-a-ctp", + "howtos/managed-service", + "howtos/mcp-connector-guide", + "howtos/migrating-to-mcps", + "howtos/observability", + "howtos/query-api", + "howtos/secrets-management", + { + "type": "category", + "label": "Automation and GitOps", + "items": [ + "howtos/automation-and-gitops/overview" + ] + }, + { + "type": "category", + "label": "Cloud Spaces", + "items": [ + "howtos/cloud-spaces/dedicated-spaces-deployment", + "howtos/cloud-spaces/gitops-on-upbound" + ] + }, + { + "type": "category", + "label": "Self-Hosted", + "items": [ + "howtos/self-hosted/administer-features", + "howtos/self-hosted/attach-detach", + "howtos/self-hosted/billing", + "howtos/self-hosted/capacity-licensing", + "howtos/self-hosted/certs", + "howtos/self-hosted/configure-ha", + "howtos/self-hosted/controllers", + "howtos/self-hosted/declarative-ctps", + "howtos/self-hosted/deployment-reqs", + "howtos/self-hosted/dr", + "howtos/self-hosted/gitops-with-argocd", + "howtos/self-hosted/managed-spaces-deployment", + "howtos/self-hosted/oidc-configuration", + "howtos/self-hosted/proxies-config", + "howtos/self-hosted/query-api", + "howtos/self-hosted/scaling-resources", + "howtos/self-hosted/self-hosted-spaces-deployment", + "howtos/self-hosted/space-observability", + "howtos/self-hosted/spaces-management", + "howtos/self-hosted/troubleshooting", + { + "type": "category", + "label": "Workload ID", + "items": [ + "howtos/self-hosted/workload-id/backup-restore-config", + "howtos/self-hosted/workload-id/billing-config", + "howtos/self-hosted/workload-id/eso-config" + ] + } + ] + } + ] + }, + { + "type": "category", + "label": "API Reference", + "items": [ + "reference/index" + ] + } + ] +} diff --git a/spaces_versions.json b/spaces_versions.json new file mode 100644 index 000000000..07dee0e04 --- /dev/null +++ b/spaces_versions.json @@ -0,0 +1 @@ +["v1.15","v1.14", "v1.13", "v1.12", "v1.11", "v1.10", "v1.9"] diff --git a/src/sidebars/spaces.js b/src/sidebars/spaces.js new file mode 100644 index 000000000..4b746709a --- /dev/null +++ b/src/sidebars/spaces.js @@ -0,0 +1,96 @@ +module.exports = { + sidebar: [ + { + type: 'doc', + id: 'overview/index', + label: 'Overview', + }, + { + type: 'category', + label: 'Concepts', + items: [ + 'concepts/control-planes', + 'concepts/deployment-modes', + 'concepts/groups', + ], + }, + { + type: 'category', + label: 'How-To Guides', + items: [ + 'howtos/api-connector', + 'howtos/auto-upgrade', + 'howtos/backup-and-restore', + 'howtos/control-plane-topologies', + 'howtos/ctp-connector', + 'howtos/debugging-a-ctp', + 'howtos/managed-service', + 'howtos/mcp-connector-guide', + 'howtos/migrating-to-mcps', + 'howtos/observability', + 'howtos/query-api', + 'howtos/secrets-management', + 'howtos/simulations', + { + type: 'category', + label: 'Automation and GitOps', + items: [ + 'howtos/automation-and-gitops/overview', + ], + }, + { + type: 'category', + label: 'Cloud Spaces', + items: [ + 'howtos/cloud-spaces/dedicated-spaces-deployment', + 'howtos/cloud-spaces/gitops-on-upbound', + ], + }, + { + type: 'category', + label: 'Self-Hosted', + items: [ + 'howtos/self-hosted/administer-features', + 'howtos/self-hosted/attach-detach', + 'howtos/self-hosted/billing', + 'howtos/self-hosted/capacity-licensing', + 'howtos/self-hosted/certs', + 'howtos/self-hosted/configure-ha', + 'howtos/self-hosted/controllers', + 'howtos/self-hosted/ctp-audit-logs', + 'howtos/self-hosted/declarative-ctps', + 'howtos/self-hosted/deployment-reqs', + 'howtos/self-hosted/dr', + 'howtos/self-hosted/gitops-with-argocd', + 'howtos/self-hosted/managed-spaces-deployment', + 'howtos/self-hosted/oidc-configuration', + 'howtos/self-hosted/proxies-config', + 'howtos/self-hosted/query-api', + 'howtos/self-hosted/scaling-resources', + 'howtos/self-hosted/self-hosted-spaces-deployment', + 'howtos/self-hosted/space-observability', + 'howtos/self-hosted/spaces-management', + 'howtos/self-hosted/troubleshooting', + 'howtos/self-hosted/use-argo', + { + type: 'category', + label: 'Workload ID', + items: [ + 'howtos/self-hosted/workload-id/backup-restore-config', + 'howtos/self-hosted/workload-id/billing-config', + 'howtos/self-hosted/workload-id/eso-config', + ], + }, + ], + }, + ], + }, + { + type: 'category', + label: 'API Reference', + items: [ + 'reference/index', + ], + }, + ], +}; diff --git a/src/theme/DocItem/Layout/index.js b/src/theme/DocItem/Layout/index.js new file mode 100644 index 000000000..37307ff17 --- /dev/null +++ b/src/theme/DocItem/Layout/index.js @@ -0,0 +1,72 @@ +import React from 'react'; +import Layout from '@theme-original/DocItem/Layout'; +import { useLocation } from '@docusaurus/router'; +import styles from './layout.module.css'; + +export default function LayoutWrapper(props) { + const location = useLocation(); + const isSpacesPage = location.pathname.startsWith('/spaces'); + + const versions = [ + { label: '1.15 (Latest)', value: '' }, + { label: '1.14', value: 'v1.14' }, + { label: '1.13', value: 'v1.13' }, + { label: '1.12', value: 'v1.12' }, + { label: '1.11', value: 'v1.11' }, + { label: '1.10', value: 'v1.10' }, + { label: '1.9', value: 'v1.9' }, + ]; + + const getCurrentVersion = () => { + const pathSegments = location.pathname.split('/').filter(Boolean); + if (pathSegments[0] === 'spaces') { + if (/^v\d+\.\d+$/.test(pathSegments[1])) { + return pathSegments[1]; + } + } + return ''; + }; + + const handleVersionChange = (e) => { + const selectedVersion = e.target.value; + const pathSegments = location.pathname.split('/').filter(Boolean); + let newPath = location.pathname; + + if (pathSegments[0] === 'spaces') { + if (/^v\d+\.\d+$/.test(pathSegments[1])) { + const contentPath = '/' + pathSegments.slice(2).join('/'); + newPath = selectedVersion ? `/spaces/${selectedVersion}${contentPath}` : `/spaces${contentPath}`; + } else { + const contentPath = '/' + pathSegments.slice(1).join('/'); + newPath = selectedVersion ? `/spaces/${selectedVersion}${contentPath}` : `/spaces${contentPath}`; + } + } + + window.location.href = newPath; + }; + + return ( + <> + {isSpacesPage && ( +
+ + +
+ )} + + + ); +} diff --git a/src/theme/DocItem/Layout/layout.module.css b/src/theme/DocItem/Layout/layout.module.css new file mode 100644 index 000000000..dae1030f0 --- /dev/null +++ b/src/theme/DocItem/Layout/layout.module.css @@ -0,0 +1,57 @@ +.versionSelector { + background-color: #f8f9fa; + border-bottom: 1px solid #dee2e6; + padding: 12px 20px; + display: flex; + align-items: center; + gap: 10px; + margin-bottom: 16px; +} + +html[data-theme='dark'] .versionSelector { + background-color: #2d2d2d; + border-bottom-color: #444; +} + +.label { + font-weight: 500; + font-size: 14px; + color: #333; + margin: 0; +} + +html[data-theme='dark'] .label { + color: #d0d0d0; +} + +.select { + padding: 6px 12px; + border: 1px solid #ced4da; + border-radius: 4px; + font-size: 14px; + cursor: pointer; + background-color: white; + color: #333; + font-weight: 500; + min-width: 120px; +} + +html[data-theme='dark'] .select { + background-color: #3d3d3d; + color: #e0e0e0; + border-color: #555; +} + +.select:hover { + border-color: #999; +} + +html[data-theme='dark'] .select:hover { + border-color: #888; +} + +.select:focus { + outline: none; + border-color: #80bdff; + box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.25); +} diff --git a/versions.json b/versions.json new file mode 100644 index 000000000..e9cead82b --- /dev/null +++ b/versions.json @@ -0,0 +1 @@ +["1.15","1.14", "1.13", "1.12", "1.11", "1.10", "1.9"] From e3975370a392866cbb89d121be91eb71b436cfb7 Mon Sep 17 00:00:00 2001 From: Rae Sharp Date: Wed, 28 Jan 2026 19:08:48 -0500 Subject: [PATCH 02/11] add links from sidebar and navbar --- docs/manuals/_category_.json | 4 + .../provider-terraform/_category_.json | 4 + .../uxp/{features => }/crossplane-web-ui.md | 0 docusaurus.config.js | 42 +++++---- src/sidebars/main.js | 86 ++++++++++++++++++- 5 files changed, 117 insertions(+), 19 deletions(-) create mode 100644 docs/manuals/_category_.json create mode 100644 docs/manuals/packages/providers/provider-terraform/_category_.json rename docs/manuals/uxp/{features => }/crossplane-web-ui.md (100%) diff --git a/docs/manuals/_category_.json b/docs/manuals/_category_.json new file mode 100644 index 000000000..5533079be --- /dev/null +++ b/docs/manuals/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Manuals", + "position": 2 +} diff --git a/docs/manuals/packages/providers/provider-terraform/_category_.json b/docs/manuals/packages/providers/provider-terraform/_category_.json new file mode 100644 index 000000000..1dc2168df --- /dev/null +++ b/docs/manuals/packages/providers/provider-terraform/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Provider Terraform", + "collapsed": true +} diff --git a/docs/manuals/uxp/features/crossplane-web-ui.md b/docs/manuals/uxp/crossplane-web-ui.md similarity index 100% rename from docs/manuals/uxp/features/crossplane-web-ui.md rename to docs/manuals/uxp/crossplane-web-ui.md diff --git a/docusaurus.config.js b/docusaurus.config.js index cb5810427..d89f1a86d 100644 --- a/docusaurus.config.js +++ b/docusaurus.config.js @@ -20,16 +20,16 @@ const config = { }, ], customFields: { - apiUrl: process.env.UPBOUND_API_URL || 'https://api.upbound.io', - baseDomain: process.env.UPBOUND_BASE_DOMAIN || 'upbound.io', - launchDarklyClientId: process.env.LAUNCHDARKLY_CLIENT_ID || '', + apiUrl: process.env.UPBOUND_API_URL || "https://api.upbound.io", + baseDomain: process.env.UPBOUND_BASE_DOMAIN || "upbound.io", + launchDarklyClientId: process.env.LAUNCHDARKLY_CLIENT_ID || "", }, webpack: { jsLoader: (isServer) => ({ - loader: require.resolve('esbuild-loader'), + loader: require.resolve("esbuild-loader"), options: { - loader: 'tsx', - target: isServer ? 'node12' : 'es2017', + loader: "tsx", + target: isServer ? "node12" : "es2017", }, }), }, @@ -101,29 +101,36 @@ const config = { "./scripts/plan-plugin.js", function (context, options) { return { - name: 'custom-webpack-config', + name: "custom-webpack-config", configureWebpack(config, isServer) { - const webpack = require('webpack'); - const path = require('path'); + const webpack = require("webpack"); + const path = require("path"); return { plugins: [ new webpack.ProvidePlugin({ - process: 'process/browser.js', - React: 'react', + process: "process/browser.js", + React: "react", }), ], resolve: { fallback: { - process: require.resolve('process/browser.js'), + process: require.resolve("process/browser.js"), }, fullySpecified: false, - extensions: ['.js', '.jsx', '.ts', '.tsx', '.json', '.css'], - mainFiles: ['index'], + extensions: [ + ".js", + ".jsx", + ".ts", + ".tsx", + ".json", + ".css", + ], + mainFiles: ["index"], }, resolveLoader: { modules: [ - path.resolve(__dirname, 'node_modules'), - 'node_modules', + path.resolve(__dirname, "node_modules"), + "node_modules", ], }, }; @@ -197,7 +204,8 @@ const config = { }, { label: "Spaces", - href: "/spaces/", + type: "link", + href: "/spaces/overview/", }, { label: "CLI", diff --git a/src/sidebars/main.js b/src/sidebars/main.js index 4cc3b5de5..25cedcab0 100644 --- a/src/sidebars/main.js +++ b/src/sidebars/main.js @@ -12,9 +12,91 @@ module.exports = { }, ], manualsSidebar: [ + "manuals/index", { - type: "autogenerated", - dirName: "manuals", + type: "category", + label: "Upbound Crossplane", + link: { + type: "doc", + id: "manuals/uxp/overview", + }, + items: [ + { + type: "autogenerated", + dirName: "manuals/uxp", + }, + ], + }, + { + type: "link", + label: "Spaces ↗", + href: "/spaces/latest/overview", + }, + { + type: "category", + label: "CLI", + link: { + type: "doc", + id: "manuals/cli/overview", + }, + items: [ + { + type: "autogenerated", + dirName: "manuals/cli", + }, + ], + }, + { + type: "category", + label: "Console", + items: [ + { + type: "autogenerated", + dirName: "manuals/console", + }, + ], + }, + { + type: "category", + label: "Official Packages", + link: { + type: "doc", + id: "manuals/packages/overview", + }, + items: [ + { + type: "autogenerated", + dirName: "manuals/packages", + }, + ], + }, + { + type: "category", + label: "Marketplace", + link: { + type: "doc", + id: "manuals/marketplace/overview", + }, + items: [ + { + type: "autogenerated", + dirName: "manuals/marketplace", + }, + ], + }, + { + type: "category", + label: "Platform", + link: { + type: "doc", + id: "manuals/platform/overview", + }, + items: [ + { + type: "autogenerated", + dirName: "manuals/platform", + }, + ], }, ], referenceSidebar: [ From 545696c8cc10c2ac9986ff9b69fed60d72cf57d8 Mon Sep 17 00:00:00 2001 From: Rae Sharp Date: Wed, 28 Jan 2026 19:10:27 -0500 Subject: [PATCH 03/11] remove dropdown type --- docusaurus.config.js | 1 - src/sidebars/main.js | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/docusaurus.config.js b/docusaurus.config.js index d89f1a86d..02e82bc6c 100644 --- a/docusaurus.config.js +++ b/docusaurus.config.js @@ -204,7 +204,6 @@ const config = { }, { label: "Spaces", - type: "link", href: "/spaces/overview/", }, { diff --git a/src/sidebars/main.js b/src/sidebars/main.js index 25cedcab0..244b2171d 100644 --- a/src/sidebars/main.js +++ b/src/sidebars/main.js @@ -29,7 +29,7 @@ module.exports = { }, { type: "link", - label: "Spaces ↗", + label: "Spaces", href: "/spaces/latest/overview", }, { From 02fcdd45fa833d12a1d205604a92bcd667429ec0 Mon Sep 17 00:00:00 2001 From: Rae Sharp Date: Wed, 28 Jan 2026 19:20:18 -0500 Subject: [PATCH 04/11] update paths --- docusaurus.config.js | 2 +- src/sidebars/main.js | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docusaurus.config.js b/docusaurus.config.js index 02e82bc6c..436a77425 100644 --- a/docusaurus.config.js +++ b/docusaurus.config.js @@ -204,7 +204,7 @@ const config = { }, { label: "Spaces", - href: "/spaces/overview/", + to: "/spaces/overview/", }, { label: "CLI", diff --git a/src/sidebars/main.js b/src/sidebars/main.js index 244b2171d..f7d125f79 100644 --- a/src/sidebars/main.js +++ b/src/sidebars/main.js @@ -30,7 +30,7 @@ module.exports = { { type: "link", label: "Spaces", - href: "/spaces/latest/overview", + href: "/spaces/overview", }, { type: "category", From 8a7d1d2f6d0129cedbffebb36d6d8ae52da36618 Mon Sep 17 00:00:00 2001 From: Rae Sharp Date: Wed, 28 Jan 2026 21:20:17 -0500 Subject: [PATCH 05/11] update reference links --- docs/reference/apis/_category_.json | 2 +- docs/reference/apis/index.md | 2 +- .../reference/apis/spaces-api/_category_.json | 5 -- docs/reference/apis/spaces-api/index.md | 72 ------------------- .../version-v1.10-sidebars.json | 8 +-- .../version-v1.11-sidebars.json | 8 +-- .../version-v1.12-sidebars.json | 8 +-- .../version-v1.13-sidebars.json | 8 +-- .../version-v1.14-sidebars.json | 8 +-- .../version-v1.15-sidebars.json | 8 +-- .../version-v1.9-sidebars.json | 8 +-- src/sidebars/main.js | 40 ++++++++++- 12 files changed, 61 insertions(+), 116 deletions(-) delete mode 100644 docs/reference/apis/spaces-api/_category_.json delete mode 100644 docs/reference/apis/spaces-api/index.md diff --git a/docs/reference/apis/_category_.json b/docs/reference/apis/_category_.json index defb2571c..3044c56eb 100644 --- a/docs/reference/apis/_category_.json +++ b/docs/reference/apis/_category_.json @@ -1,5 +1,5 @@ { "label": "APIs", "position": 2, - "collapsed": true + "collapsed": true, } diff --git a/docs/reference/apis/index.md b/docs/reference/apis/index.md index 7ca35188a..fb1b9a5e2 100644 --- a/docs/reference/apis/index.md +++ b/docs/reference/apis/index.md @@ -10,7 +10,7 @@ Information on Upbound's APIs: * [Query API][query] * [UXP API][uxp] -[spaces]: /reference/apis/spaces-api/latest/ +[spaces]: /spaces/reference/ [crossplane]: /reference/apis/crossplane-api/ [testing]: /reference/apis/testing-api/ [query]: /reference/apis/query-api/ diff --git a/docs/reference/apis/spaces-api/_category_.json b/docs/reference/apis/spaces-api/_category_.json deleted file mode 100644 index 4a6a139c4..000000000 --- a/docs/reference/apis/spaces-api/_category_.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "label": "Spaces API", - "position": 1, - "collapsed": true -} diff --git a/docs/reference/apis/spaces-api/index.md b/docs/reference/apis/spaces-api/index.md deleted file mode 100644 index 5e68b0768..000000000 --- a/docs/reference/apis/spaces-api/index.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: Spaces API Reference -description: Documentation for the Spaces API resources (v1.15 - Latest) -sidebar_position: 1 ---- -import CrdDocViewer from '@site/src/components/CrdViewer'; - - -This page documents the Custom Resource Definitions (CRDs) for the Spaces API. - - -## Control Planes -### Control Planes - - -## Observability -### Shared Telemetry Configs - - -## `pkg` -### Controller Revisions - - -### Controller Runtime Configs - - -### Controllers - - -### Remote Configuration Revisions - - -### Remote Configurations - - -## Policy -### Shared Upbound Policies - - -## References -### Referenced Objects - - -## Scheduling -### Environments - - -## Secrets -### Shared External Secrets - - -### Shared Secret Stores - - -## Simulations - - -## Spaces Backups -### Backups - - -### Backup Schedules - - -### Shared Backup Configs - - -### Shared Backups - - -### Shared Backup Schedules - diff --git a/spaces_versioned_sidebars/version-v1.10-sidebars.json b/spaces_versioned_sidebars/version-v1.10-sidebars.json index e74749c75..760f672d4 100644 --- a/spaces_versioned_sidebars/version-v1.10-sidebars.json +++ b/spaces_versioned_sidebars/version-v1.10-sidebars.json @@ -81,11 +81,9 @@ ] }, { - "type": "category", - "label": "API Reference", - "items": [ - "reference/index" - ] + "type": "doc", + "id": "reference/index", + "label": "API Reference" } ] } diff --git a/spaces_versioned_sidebars/version-v1.11-sidebars.json b/spaces_versioned_sidebars/version-v1.11-sidebars.json index e74749c75..760f672d4 100644 --- a/spaces_versioned_sidebars/version-v1.11-sidebars.json +++ b/spaces_versioned_sidebars/version-v1.11-sidebars.json @@ -81,11 +81,9 @@ ] }, { - "type": "category", - "label": "API Reference", - "items": [ - "reference/index" - ] + "type": "doc", + "id": "reference/index", + "label": "API Reference" } ] } diff --git a/spaces_versioned_sidebars/version-v1.12-sidebars.json b/spaces_versioned_sidebars/version-v1.12-sidebars.json index 95bcd1142..a5278ed9e 100644 --- a/spaces_versioned_sidebars/version-v1.12-sidebars.json +++ b/spaces_versioned_sidebars/version-v1.12-sidebars.json @@ -82,11 +82,9 @@ ] }, { - "type": "category", - "label": "API Reference", - "items": [ - "reference/index" - ] + "type": "doc", + "id": "reference/index", + "label": "API Reference" } ] } diff --git a/spaces_versioned_sidebars/version-v1.13-sidebars.json b/spaces_versioned_sidebars/version-v1.13-sidebars.json index 244102567..a848d4720 100644 --- a/spaces_versioned_sidebars/version-v1.13-sidebars.json +++ b/spaces_versioned_sidebars/version-v1.13-sidebars.json @@ -83,11 +83,9 @@ ] }, { - "type": "category", - "label": "API Reference", - "items": [ - "reference/index" - ] + "type": "doc", + "id": "reference/index", + "label": "API Reference" } ] } diff --git a/spaces_versioned_sidebars/version-v1.14-sidebars.json b/spaces_versioned_sidebars/version-v1.14-sidebars.json index 1dbc6cfdd..306c1c27d 100644 --- a/spaces_versioned_sidebars/version-v1.14-sidebars.json +++ b/spaces_versioned_sidebars/version-v1.14-sidebars.json @@ -86,11 +86,9 @@ ] }, { - "type": "category", - "label": "API Reference", - "items": [ - "reference/index" - ] + "type": "doc", + "id": "reference/index", + "label": "API Reference" } ] } diff --git a/spaces_versioned_sidebars/version-v1.15-sidebars.json b/spaces_versioned_sidebars/version-v1.15-sidebars.json index 1dbc6cfdd..306c1c27d 100644 --- a/spaces_versioned_sidebars/version-v1.15-sidebars.json +++ b/spaces_versioned_sidebars/version-v1.15-sidebars.json @@ -86,11 +86,9 @@ ] }, { - "type": "category", - "label": "API Reference", - "items": [ - "reference/index" - ] + "type": "doc", + "id": "reference/index", + "label": "API Reference" } ] } diff --git a/spaces_versioned_sidebars/version-v1.9-sidebars.json b/spaces_versioned_sidebars/version-v1.9-sidebars.json index e74749c75..760f672d4 100644 --- a/spaces_versioned_sidebars/version-v1.9-sidebars.json +++ b/spaces_versioned_sidebars/version-v1.9-sidebars.json @@ -81,11 +81,9 @@ ] }, { - "type": "category", - "label": "API Reference", - "items": [ - "reference/index" - ] + "type": "doc", + "id": "reference/index", + "label": "API Reference" } ] } diff --git a/src/sidebars/main.js b/src/sidebars/main.js index f7d125f79..2654084a8 100644 --- a/src/sidebars/main.js +++ b/src/sidebars/main.js @@ -100,9 +100,45 @@ module.exports = { }, ], referenceSidebar: [ + "reference/index", { - type: "autogenerated", - dirName: "reference", + type: "category", + label: "APIs", + link: { + type: "doc", + id: "reference/apis/index", + }, + items: [ + { + type: "link", + label: "Spaces API", + href: "/spaces/reference/", + }, + { + type: "autogenerated", + dirName: "reference/apis", + }, + ], + }, + { + type: "category", + label: "Release Notes", + items: [ + { + type: "autogenerated", + dirName: "reference/release-notes", + }, + ], + }, + { + type: "category", + label: "Usage", + items: [ + { + type: "autogenerated", + dirName: "reference/usage", + }, + ], }, ], }; From aa652acb23be1473cf16f1396f7834178b07993d Mon Sep 17 00:00:00 2001 From: Rae Sharp Date: Wed, 28 Jan 2026 21:23:55 -0500 Subject: [PATCH 06/11] reference sidebars --- src/sidebars/main.js | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/sidebars/main.js b/src/sidebars/main.js index 2654084a8..c0320860c 100644 --- a/src/sidebars/main.js +++ b/src/sidebars/main.js @@ -120,6 +120,10 @@ module.exports = { }, ], }, + "reference/cli-reference", + "reference/pricing", + "reference/spaces-helm-reference", + "reference/uxp-helm-reference", { type: "category", label: "Release Notes", From 746a2c698c807ef74fb1081416a0e298ca49b4e5 Mon Sep 17 00:00:00 2001 From: Rae Sharp Date: Wed, 28 Jan 2026 21:27:18 -0500 Subject: [PATCH 07/11] update how-tos for consistency --- spaces_versioned_sidebars/version-v1.10-sidebars.json | 2 +- spaces_versioned_sidebars/version-v1.11-sidebars.json | 2 +- spaces_versioned_sidebars/version-v1.12-sidebars.json | 2 +- spaces_versioned_sidebars/version-v1.13-sidebars.json | 2 +- spaces_versioned_sidebars/version-v1.14-sidebars.json | 2 +- spaces_versioned_sidebars/version-v1.15-sidebars.json | 2 +- spaces_versioned_sidebars/version-v1.9-sidebars.json | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/spaces_versioned_sidebars/version-v1.10-sidebars.json b/spaces_versioned_sidebars/version-v1.10-sidebars.json index 760f672d4..063f5e7c8 100644 --- a/spaces_versioned_sidebars/version-v1.10-sidebars.json +++ b/spaces_versioned_sidebars/version-v1.10-sidebars.json @@ -16,7 +16,7 @@ }, { "type": "category", - "label": "How-To Guides", + "label": "How-tos", "items": [ "howtos/auto-upgrade", "howtos/backup-and-restore", diff --git a/spaces_versioned_sidebars/version-v1.11-sidebars.json b/spaces_versioned_sidebars/version-v1.11-sidebars.json index 760f672d4..063f5e7c8 100644 --- a/spaces_versioned_sidebars/version-v1.11-sidebars.json +++ b/spaces_versioned_sidebars/version-v1.11-sidebars.json @@ -16,7 +16,7 @@ }, { "type": "category", - "label": "How-To Guides", + "label": "How-tos", "items": [ "howtos/auto-upgrade", "howtos/backup-and-restore", diff --git a/spaces_versioned_sidebars/version-v1.12-sidebars.json b/spaces_versioned_sidebars/version-v1.12-sidebars.json index a5278ed9e..073b7b92f 100644 --- a/spaces_versioned_sidebars/version-v1.12-sidebars.json +++ b/spaces_versioned_sidebars/version-v1.12-sidebars.json @@ -16,7 +16,7 @@ }, { "type": "category", - "label": "How-To Guides", + "label": "How-tos", "items": [ "howtos/auto-upgrade", "howtos/backup-and-restore", diff --git a/spaces_versioned_sidebars/version-v1.13-sidebars.json b/spaces_versioned_sidebars/version-v1.13-sidebars.json index a848d4720..36dc8762d 100644 --- a/spaces_versioned_sidebars/version-v1.13-sidebars.json +++ b/spaces_versioned_sidebars/version-v1.13-sidebars.json @@ -16,7 +16,7 @@ }, { "type": "category", - "label": "How-To Guides", + "label": "How-tos", "items": [ "howtos/auto-upgrade", "howtos/backup-and-restore", diff --git a/spaces_versioned_sidebars/version-v1.14-sidebars.json b/spaces_versioned_sidebars/version-v1.14-sidebars.json index 306c1c27d..55f296ea5 100644 --- a/spaces_versioned_sidebars/version-v1.14-sidebars.json +++ b/spaces_versioned_sidebars/version-v1.14-sidebars.json @@ -16,7 +16,7 @@ }, { "type": "category", - "label": "How-To Guides", + "label": "How-tos", "items": [ "howtos/api-connector", "howtos/auto-upgrade", diff --git a/spaces_versioned_sidebars/version-v1.15-sidebars.json b/spaces_versioned_sidebars/version-v1.15-sidebars.json index 306c1c27d..55f296ea5 100644 --- a/spaces_versioned_sidebars/version-v1.15-sidebars.json +++ b/spaces_versioned_sidebars/version-v1.15-sidebars.json @@ -16,7 +16,7 @@ }, { "type": "category", - "label": "How-To Guides", + "label": "How-tos", "items": [ "howtos/api-connector", "howtos/auto-upgrade", diff --git a/spaces_versioned_sidebars/version-v1.9-sidebars.json b/spaces_versioned_sidebars/version-v1.9-sidebars.json index 760f672d4..063f5e7c8 100644 --- a/spaces_versioned_sidebars/version-v1.9-sidebars.json +++ b/spaces_versioned_sidebars/version-v1.9-sidebars.json @@ -16,7 +16,7 @@ }, { "type": "category", - "label": "How-To Guides", + "label": "How-tos", "items": [ "howtos/auto-upgrade", "howtos/backup-and-restore", From 94ff73f21f6a6a173cb5c30b1154b933c186f2ce Mon Sep 17 00:00:00 2001 From: Rae Sharp Date: Wed, 28 Jan 2026 21:29:20 -0500 Subject: [PATCH 08/11] card links --- src/components/ManualsCards.js | 2 +- src/components/ReferenceCards.js | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/components/ManualsCards.js b/src/components/ManualsCards.js index 8cfd3181f..2f5c9f0c7 100644 --- a/src/components/ManualsCards.js +++ b/src/components/ManualsCards.js @@ -10,7 +10,7 @@ const ManualsCards = () => { { title: 'Spaces', description: 'Managed Crossplane control plane environments with cloud and self-hosted deployment options.', - link: '/manuals/spaces/overview' + link: '/spaces/overview/' }, { title: 'CLI', diff --git a/src/components/ReferenceCards.js b/src/components/ReferenceCards.js index 92c2c6886..0056ed15a 100644 --- a/src/components/ReferenceCards.js +++ b/src/components/ReferenceCards.js @@ -15,7 +15,7 @@ const ReferenceCards = () => { { title: 'Spaces Helm Reference', description: 'Helm chart configuration and deployment reference documentation for Upbound Spaces.', - link: '/reference/spaces-helm-reference' + link: '/spaces/reference' }, { title: 'UXP Helm Reference', From 5d06649d14e731b11ca1658b898740a2fb7d5148 Mon Sep 17 00:00:00 2001 From: Rae Sharp Date: Wed, 28 Jan 2026 23:13:18 -0500 Subject: [PATCH 09/11] removing old vers and duplicate version files --- .../concepts/_category_.json | 0 .../concepts/control-planes.md | 0 .../concepts/deployment-modes.md | 0 .../concepts/groups.md | 0 .../howtos/_category_.json | 0 .../howtos/api-connector.md | 0 .../howtos/auto-upgrade.md | 0 .../automation-and-gitops/_category_.json | 0 .../howtos/automation-and-gitops/overview.md | 0 .../howtos/backup-and-restore.md | 0 .../howtos/cloud-spaces/_category_.json | 0 .../dedicated-spaces-deployment.md | 0 .../howtos/cloud-spaces/gitops-on-upbound.md | 0 .../howtos/control-plane-topologies.md | 0 .../howtos/ctp-connector.md | 0 .../howtos/debugging-a-ctp.md | 0 .../howtos/managed-service.md | 0 .../howtos/mcp-connector-guide.md | 0 .../howtos/migrating-to-mcps.md | 0 .../howtos/observability.md | 0 .../howtos/query-api.md | 0 .../howtos/secrets-management.md | 0 .../howtos/self-hosted/_category_.json | 0 .../howtos/self-hosted/administer-features.md | 0 .../howtos/self-hosted/attach-detach.md | 0 .../howtos/self-hosted/billing.md | 0 .../howtos/self-hosted/capacity-licensing.md | 0 .../howtos/self-hosted/certs.md | 0 .../howtos/self-hosted/configure-ha.md | 0 .../howtos/self-hosted/controllers.md | 0 .../howtos/self-hosted/ctp-audit-logs.md | 0 .../howtos/self-hosted/declarative-ctps.md | 0 .../howtos/self-hosted/deployment-reqs.md | 0 .../howtos/self-hosted/dr.md | 0 .../howtos/self-hosted/gitops-with-argocd.md | 0 .../self-hosted/managed-spaces-deployment.md | 0 .../howtos/self-hosted/oidc-configuration.md | 0 .../howtos/self-hosted/proxies-config.md | 0 .../howtos/self-hosted/query-api.md | 0 .../howtos/self-hosted/scaling-resources.md | 0 .../self-hosted-spaces-deployment.md | 0 .../howtos/self-hosted/space-observability.md | 0 .../howtos/self-hosted/spaces-management.md | 0 .../howtos/self-hosted/troubleshooting.md | 0 .../howtos/self-hosted/use-argo.md | 0 .../self-hosted/workload-id/_category_.json | 0 .../workload-id/backup-restore-config.md | 0 .../self-hosted/workload-id/billing-config.md | 0 .../self-hosted/workload-id/eso-config.md | 0 .../howtos/simulations.md | 0 .../overview/_category_.json | 0 .../overview/index.md | 0 .../reference/_category_.json | 0 .../reference/index.md | 0 .../concepts/_category_.json | 0 .../concepts/control-planes.md | 0 .../concepts/deployment-modes.md | 0 .../concepts/groups.md | 0 .../howtos/_category_.json | 0 .../howtos/api-connector.md | 0 .../howtos/auto-upgrade.md | 0 .../automation-and-gitops/_category_.json | 0 .../howtos/automation-and-gitops/overview.md | 0 .../howtos/backup-and-restore.md | 0 .../howtos/cloud-spaces/_category_.json | 0 .../dedicated-spaces-deployment.md | 0 .../howtos/cloud-spaces/gitops-on-upbound.md | 0 .../howtos/control-plane-topologies.md | 0 .../howtos/ctp-connector.md | 0 .../howtos/debugging-a-ctp.md | 0 .../howtos/managed-service.md | 0 .../howtos/mcp-connector-guide.md | 0 .../howtos/migrating-to-mcps.md | 0 .../howtos/observability.md | 0 .../howtos/query-api.md | 0 .../howtos/secrets-management.md | 0 .../howtos/self-hosted/_category_.json | 0 .../howtos/self-hosted/administer-features.md | 0 .../howtos/self-hosted/attach-detach.md | 0 .../howtos/self-hosted/billing.md | 0 .../howtos/self-hosted/capacity-licensing.md | 0 .../howtos/self-hosted/certs.md | 0 .../howtos/self-hosted/configure-ha.md | 0 .../howtos/self-hosted/controllers.md | 0 .../howtos/self-hosted/ctp-audit-logs.md | 0 .../howtos/self-hosted/declarative-ctps.md | 0 .../howtos/self-hosted/deployment-reqs.md | 0 .../howtos/self-hosted/dr.md | 0 .../howtos/self-hosted/gitops-with-argocd.md | 0 .../self-hosted/managed-spaces-deployment.md | 0 .../howtos/self-hosted/oidc-configuration.md | 0 .../howtos/self-hosted/proxies-config.md | 0 .../howtos/self-hosted/query-api.md | 0 .../howtos/self-hosted/scaling-resources.md | 0 .../self-hosted-spaces-deployment.md | 0 .../howtos/self-hosted/space-observability.md | 0 .../howtos/self-hosted/spaces-management.md | 0 .../howtos/self-hosted/troubleshooting.md | 0 .../howtos/self-hosted/use-argo.md | 0 .../self-hosted/workload-id/_category_.json | 0 .../workload-id/backup-restore-config.md | 0 .../self-hosted/workload-id/billing-config.md | 0 .../self-hosted/workload-id/eso-config.md | 0 .../howtos/simulations.md | 0 .../overview/_category_.json | 0 .../overview/index.md | 0 .../reference/_category_.json | 0 .../reference/index.md | 0 .../concepts/_category_.json | 0 .../concepts/control-planes.md | 0 .../concepts/deployment-modes.md | 0 .../concepts/groups.md | 0 .../howtos/_category_.json | 0 .../howtos/api-connector.md | 0 .../howtos/auto-upgrade.md | 0 .../automation-and-gitops/_category_.json | 0 .../howtos/automation-and-gitops/overview.md | 0 .../howtos/backup-and-restore.md | 0 .../howtos/cloud-spaces/_category_.json | 0 .../dedicated-spaces-deployment.md | 0 .../howtos/cloud-spaces/gitops-on-upbound.md | 0 .../howtos/control-plane-topologies.md | 0 .../howtos/ctp-connector.md | 0 .../howtos/debugging-a-ctp.md | 0 .../howtos/managed-service.md | 0 .../howtos/mcp-connector-guide.md | 0 .../howtos/migrating-to-mcps.md | 0 .../howtos/observability.md | 0 .../howtos/query-api.md | 0 .../howtos/secrets-management.md | 0 .../howtos/self-hosted/_category_.json | 0 .../howtos/self-hosted/administer-features.md | 0 .../howtos/self-hosted/attach-detach.md | 0 .../howtos/self-hosted/billing.md | 0 .../howtos/self-hosted/capacity-licensing.md | 0 .../howtos/self-hosted/certs.md | 0 .../howtos/self-hosted/configure-ha.md | 0 .../howtos/self-hosted/controllers.md | 0 .../howtos/self-hosted/ctp-audit-logs.md | 0 .../howtos/self-hosted/declarative-ctps.md | 0 .../howtos/self-hosted/deployment-reqs.md | 0 .../howtos/self-hosted/dr.md | 0 .../howtos/self-hosted/gitops-with-argocd.md | 0 .../self-hosted/managed-spaces-deployment.md | 0 .../howtos/self-hosted/oidc-configuration.md | 0 .../howtos/self-hosted/proxies-config.md | 0 .../howtos/self-hosted/query-api.md | 0 .../howtos/self-hosted/scaling-resources.md | 0 .../self-hosted-spaces-deployment.md | 0 .../howtos/self-hosted/space-observability.md | 0 .../howtos/self-hosted/spaces-management.md | 0 .../howtos/self-hosted/troubleshooting.md | 0 .../howtos/self-hosted/use-argo.md | 0 .../self-hosted/workload-id/_category_.json | 0 .../workload-id/backup-restore-config.md | 0 .../self-hosted/workload-id/billing-config.md | 0 .../self-hosted/workload-id/eso-config.md | 0 .../howtos/simulations.md | 0 .../overview/_category_.json | 0 .../overview/index.md | 0 .../reference/_category_.json | 0 .../reference/index.md | 0 .../howtos/self-hosted/use-argo.md | 228 - .../version-v1.12/overview/index.md | 14 - .../version-v1.13/concepts/_category_.json | 7 - .../version-v1.13/concepts/control-planes.md | 227 - .../concepts/deployment-modes.md | 53 - .../version-v1.13/concepts/groups.md | 115 - .../version-v1.13/howtos/_category_.json | 7 - .../version-v1.13/howtos/api-connector.md | 413 -- .../version-v1.13/howtos/auto-upgrade.md | 131 - .../automation-and-gitops/_category_.json | 8 - .../howtos/automation-and-gitops/overview.md | 138 - .../howtos/backup-and-restore.md | 530 -- .../howtos/cloud-spaces/_category_.json | 10 - .../dedicated-spaces-deployment.md | 33 - .../howtos/cloud-spaces/gitops-on-upbound.md | 318 -- .../howtos/control-plane-topologies.md | 566 --- .../version-v1.13/howtos/ctp-connector.md | 508 -- .../version-v1.13/howtos/debugging-a-ctp.md | 128 - .../version-v1.13/howtos/managed-service.md | 23 - .../howtos/mcp-connector-guide.md | 169 - .../version-v1.13/howtos/migrating-to-mcps.md | 439 -- .../version-v1.13/howtos/observability.md | 395 -- .../version-v1.13/howtos/query-api.md | 320 -- .../howtos/secrets-management.md | 719 --- .../howtos/self-hosted/_category_.json | 11 - .../howtos/self-hosted/administer-features.md | 121 - .../howtos/self-hosted/attach-detach.md | 198 - .../howtos/self-hosted/billing.md | 307 -- .../howtos/self-hosted/capacity-licensing.md | 591 --- .../version-v1.13/howtos/self-hosted/certs.md | 274 -- .../howtos/self-hosted/configure-ha.md | 450 -- .../howtos/self-hosted/controllers.md | 389 -- .../howtos/self-hosted/ctp-audit-logs.md | 549 --- .../howtos/self-hosted/declarative-ctps.md | 110 - .../howtos/self-hosted/deployment-reqs.md | 353 -- .../version-v1.13/howtos/self-hosted/dr.md | 412 -- .../howtos/self-hosted/gitops-with-argocd.md | 142 - .../self-hosted/managed-spaces-deployment.md | 266 - .../howtos/self-hosted/oidc-configuration.md | 289 -- .../howtos/self-hosted/proxies-config.md | 31 - .../howtos/self-hosted/query-api.md | 396 -- .../howtos/self-hosted/scaling-resources.md | 184 - .../self-hosted-spaces-deployment.md | 461 -- .../howtos/self-hosted/space-observability.md | 313 -- .../howtos/self-hosted/spaces-management.md | 219 - .../howtos/self-hosted/troubleshooting.md | 132 - .../howtos/self-hosted/use-argo.md | 228 - .../self-hosted/workload-id/_category_.json | 11 - .../workload-id/backup-restore-config.md | 384 -- .../self-hosted/workload-id/billing-config.md | 454 -- .../self-hosted/workload-id/eso-config.md | 503 -- .../version-v1.13/howtos/simulations.md | 110 - .../version-v1.13/overview/_category_.json | 4 - .../version-v1.13/overview/index.md | 14 - .../version-v1.13/reference/_category_.json | 5 - .../version-v1.13/reference/index.md | 72 - .../version-v1.14/concepts/_category_.json | 7 - .../version-v1.14/concepts/control-planes.md | 227 - .../concepts/deployment-modes.md | 53 - .../version-v1.14/concepts/groups.md | 115 - .../version-v1.14/howtos/_category_.json | 7 - .../version-v1.14/howtos/api-connector.md | 413 -- .../version-v1.14/howtos/auto-upgrade.md | 131 - .../automation-and-gitops/_category_.json | 8 - .../howtos/automation-and-gitops/overview.md | 138 - .../howtos/backup-and-restore.md | 530 -- .../howtos/cloud-spaces/_category_.json | 10 - .../dedicated-spaces-deployment.md | 33 - .../howtos/cloud-spaces/gitops-on-upbound.md | 318 -- .../howtos/control-plane-topologies.md | 566 --- .../version-v1.14/howtos/ctp-connector.md | 508 -- .../version-v1.14/howtos/debugging-a-ctp.md | 128 - .../version-v1.14/howtos/managed-service.md | 23 - .../howtos/mcp-connector-guide.md | 169 - .../version-v1.14/howtos/migrating-to-mcps.md | 439 -- .../version-v1.14/howtos/observability.md | 395 -- .../version-v1.14/howtos/query-api.md | 320 -- .../howtos/secrets-management.md | 719 --- .../howtos/self-hosted/_category_.json | 11 - .../howtos/self-hosted/administer-features.md | 121 - .../howtos/self-hosted/attach-detach.md | 198 - .../howtos/self-hosted/billing.md | 307 -- .../howtos/self-hosted/capacity-licensing.md | 591 --- .../version-v1.14/howtos/self-hosted/certs.md | 274 -- .../howtos/self-hosted/configure-ha.md | 450 -- .../howtos/self-hosted/controllers.md | 389 -- .../howtos/self-hosted/ctp-audit-logs.md | 549 --- .../howtos/self-hosted/declarative-ctps.md | 110 - .../howtos/self-hosted/deployment-reqs.md | 353 -- .../version-v1.14/howtos/self-hosted/dr.md | 412 -- .../howtos/self-hosted/gitops-with-argocd.md | 142 - .../self-hosted/managed-spaces-deployment.md | 266 - .../howtos/self-hosted/oidc-configuration.md | 289 -- .../howtos/self-hosted/proxies-config.md | 31 - .../howtos/self-hosted/query-api.md | 396 -- .../howtos/self-hosted/scaling-resources.md | 184 - .../self-hosted-spaces-deployment.md | 461 -- .../howtos/self-hosted/space-observability.md | 313 -- .../howtos/self-hosted/spaces-management.md | 219 - .../howtos/self-hosted/troubleshooting.md | 132 - .../howtos/self-hosted/use-argo.md | 228 - .../self-hosted/workload-id/_category_.json | 11 - .../workload-id/backup-restore-config.md | 384 -- .../self-hosted/workload-id/billing-config.md | 454 -- .../self-hosted/workload-id/eso-config.md | 503 -- .../version-v1.14/howtos/simulations.md | 110 - .../version-v1.14/overview/_category_.json | 4 - .../version-v1.14/overview/index.md | 14 - .../version-v1.14/reference/_category_.json | 5 - .../version-v1.14/reference/index.md | 72 - .../version-v1.15/concepts/_category_.json | 7 - .../version-v1.15/concepts/control-planes.md | 227 - .../concepts/deployment-modes.md | 53 - .../version-v1.15/concepts/groups.md | 115 - .../version-v1.15/howtos/_category_.json | 7 - .../version-v1.15/howtos/api-connector.md | 413 -- .../version-v1.15/howtos/auto-upgrade.md | 131 - .../automation-and-gitops/_category_.json | 8 - .../howtos/automation-and-gitops/overview.md | 138 - .../howtos/backup-and-restore.md | 530 -- .../howtos/cloud-spaces/_category_.json | 10 - .../dedicated-spaces-deployment.md | 33 - .../howtos/cloud-spaces/gitops-on-upbound.md | 318 -- .../howtos/control-plane-topologies.md | 566 --- .../version-v1.15/howtos/ctp-connector.md | 508 -- .../version-v1.15/howtos/debugging-a-ctp.md | 128 - .../version-v1.15/howtos/managed-service.md | 23 - .../howtos/mcp-connector-guide.md | 169 - .../version-v1.15/howtos/migrating-to-mcps.md | 439 -- .../version-v1.15/howtos/observability.md | 395 -- .../version-v1.15/howtos/query-api.md | 320 -- .../howtos/secrets-management.md | 719 --- .../howtos/self-hosted/_category_.json | 11 - .../howtos/self-hosted/administer-features.md | 121 - .../howtos/self-hosted/attach-detach.md | 198 - .../howtos/self-hosted/billing.md | 307 -- .../howtos/self-hosted/capacity-licensing.md | 591 --- .../version-v1.15/howtos/self-hosted/certs.md | 274 -- .../howtos/self-hosted/configure-ha.md | 450 -- .../howtos/self-hosted/controllers.md | 389 -- .../howtos/self-hosted/ctp-audit-logs.md | 549 --- .../howtos/self-hosted/declarative-ctps.md | 110 - .../howtos/self-hosted/deployment-reqs.md | 353 -- .../version-v1.15/howtos/self-hosted/dr.md | 412 -- .../howtos/self-hosted/gitops-with-argocd.md | 142 - .../self-hosted/managed-spaces-deployment.md | 266 - .../howtos/self-hosted/oidc-configuration.md | 289 -- .../howtos/self-hosted/proxies-config.md | 31 - .../howtos/self-hosted/query-api.md | 396 -- .../howtos/self-hosted/scaling-resources.md | 184 - .../self-hosted-spaces-deployment.md | 461 -- .../howtos/self-hosted/space-observability.md | 313 -- .../howtos/self-hosted/spaces-management.md | 219 - .../howtos/self-hosted/troubleshooting.md | 132 - .../self-hosted/workload-id/_category_.json | 11 - .../workload-id/backup-restore-config.md | 384 -- .../self-hosted/workload-id/billing-config.md | 454 -- .../self-hosted/workload-id/eso-config.md | 503 -- .../version-v1.15/howtos/simulations.md | 110 - .../version-v1.15/overview/_category_.json | 4 - .../version-v1.15/reference/_category_.json | 5 - .../version-v1.15/reference/index.md | 72 - .../version-v1.9/concepts/_category_.json | 7 - .../version-v1.9/concepts/control-planes.md | 227 - .../version-v1.9/concepts/deployment-modes.md | 53 - .../version-v1.9/concepts/groups.md | 115 - .../version-v1.9/howtos/_category_.json | 7 - .../version-v1.9/howtos/api-connector.md | 413 -- .../version-v1.9/howtos/auto-upgrade.md | 131 - .../automation-and-gitops/_category_.json | 8 - .../howtos/automation-and-gitops/overview.md | 138 - .../version-v1.9/howtos/backup-and-restore.md | 530 -- .../howtos/cloud-spaces/_category_.json | 10 - .../dedicated-spaces-deployment.md | 33 - .../howtos/cloud-spaces/gitops-on-upbound.md | 318 -- .../howtos/control-plane-topologies.md | 566 --- .../version-v1.9/howtos/ctp-connector.md | 508 -- .../version-v1.9/howtos/debugging-a-ctp.md | 128 - .../version-v1.9/howtos/managed-service.md | 23 - .../howtos/mcp-connector-guide.md | 169 - .../version-v1.9/howtos/migrating-to-mcps.md | 439 -- .../version-v1.9/howtos/observability.md | 395 -- .../version-v1.9/howtos/query-api.md | 320 -- .../version-v1.9/howtos/secrets-management.md | 719 --- .../howtos/self-hosted/_category_.json | 11 - .../howtos/self-hosted/administer-features.md | 121 - .../howtos/self-hosted/attach-detach.md | 198 - .../howtos/self-hosted/billing.md | 307 -- .../howtos/self-hosted/capacity-licensing.md | 591 --- .../version-v1.9/howtos/self-hosted/certs.md | 274 -- .../howtos/self-hosted/configure-ha.md | 450 -- .../howtos/self-hosted/controllers.md | 389 -- .../howtos/self-hosted/ctp-audit-logs.md | 549 --- .../howtos/self-hosted/declarative-ctps.md | 110 - .../howtos/self-hosted/deployment-reqs.md | 353 -- .../version-v1.9/howtos/self-hosted/dr.md | 412 -- .../howtos/self-hosted/gitops-with-argocd.md | 142 - .../self-hosted/managed-spaces-deployment.md | 266 - .../howtos/self-hosted/oidc-configuration.md | 289 -- .../howtos/self-hosted/proxies-config.md | 31 - .../howtos/self-hosted/query-api.md | 396 -- .../howtos/self-hosted/scaling-resources.md | 184 - .../self-hosted-spaces-deployment.md | 461 -- .../howtos/self-hosted/space-observability.md | 313 -- .../howtos/self-hosted/spaces-management.md | 219 - .../howtos/self-hosted/troubleshooting.md | 132 - .../howtos/self-hosted/use-argo.md | 228 - .../self-hosted/workload-id/_category_.json | 11 - .../workload-id/backup-restore-config.md | 384 -- .../self-hosted/workload-id/billing-config.md | 454 -- .../self-hosted/workload-id/eso-config.md | 503 -- .../version-v1.9/howtos/simulations.md | 110 - .../version-v1.9/overview/_category_.json | 4 - .../version-v1.9/overview/index.md | 14 - .../version-v1.9/reference/_category_.json | 5 - .../version-v1.9/reference/index.md | 72 - ...debars.json => version-1.13-sidebars.json} | 0 ...debars.json => version-1.14-sidebars.json} | 0 ...debars.json => version-1.15-sidebars.json} | 0 .../version-v1.10-sidebars.json | 89 - .../version-v1.11-sidebars.json | 89 - .../version-v1.12-sidebars.json | 90 - .../version-v1.9-sidebars.json | 89 - spaces_versions.json | 2 +- src/sidebars/spaces.js | 2 +- src/theme/DocItem/Layout/index.js | 21 +- src/theme/DocItem/Layout/layout.module.css | 51 +- ....spaces.upbound.io_spacebackupconfigs.yaml | 177 - .../admin.spaces.upbound.io_spacebackups.yaml | 787 --- ...paces.upbound.io_spacebackupschedules.yaml | 789 --- ....spaces.upbound.io_objectrolebindings.yaml | 153 - static/crds/space/v1.10/embed.go | 11 - ...ces.upbound.io_sharedtelemetryconfigs.yaml | 360 -- ...aces.upbound.io_sharedupboundpolicies.yaml | 4303 ----------------- .../v1.10/spaces.upbound.io_backups.yaml | 200 - .../spaces.upbound.io_backupschedules.yaml | 213 - .../spaces.upbound.io_controlplanes.yaml | 279 -- ...es.upbound.io_incontrolplaneoverrides.yaml | 256 - ...spaces.upbound.io_sharedbackupconfigs.yaml | 143 - .../spaces.upbound.io_sharedbackups.yaml | 291 -- ...aces.upbound.io_sharedbackupschedules.yaml | 273 -- ...aces.upbound.io_sharedexternalsecrets.yaml | 745 --- .../spaces.upbound.io_sharedsecretstores.yaml | 2702 ----------- .../v1.10/spaces.upbound.io_simulations.yaml | 243 - ....spaces.upbound.io_spacebackupconfigs.yaml | 177 - .../admin.spaces.upbound.io_spacebackups.yaml | 787 --- ...paces.upbound.io_spacebackupschedules.yaml | 789 --- ....spaces.upbound.io_objectrolebindings.yaml | 153 - ...ces.upbound.io_sharedtelemetryconfigs.yaml | 401 -- ...aces.upbound.io_sharedupboundpolicies.yaml | 4303 ----------------- .../v1.11/spaces.upbound.io_backups.yaml | 200 - .../spaces.upbound.io_backupschedules.yaml | 213 - .../spaces.upbound.io_controlplanes.yaml | 279 -- ...es.upbound.io_incontrolplaneoverrides.yaml | 256 - ...spaces.upbound.io_sharedbackupconfigs.yaml | 143 - .../spaces.upbound.io_sharedbackups.yaml | 291 -- ...aces.upbound.io_sharedbackupschedules.yaml | 273 -- ...aces.upbound.io_sharedexternalsecrets.yaml | 745 --- .../spaces.upbound.io_sharedsecretstores.yaml | 2702 ----------- .../v1.11/spaces.upbound.io_simulations.yaml | 243 - static/crds/space/v1.12/index.md | 83 - ...ces.upbound.io_sharedtelemetryconfigs.yaml | 401 -- .../pkg.upbound.io_controllerrevisions.yaml | 331 -- ...g.upbound.io_controllerruntimeconfigs.yaml | 65 - .../v1.12/pkg.upbound.io_controllers.yaml | 224 - ...bound.io_remoteconfigurationrevisions.yaml | 281 -- .../pkg.upbound.io_remoteconfigurations.yaml | 205 - ...aces.upbound.io_sharedupboundpolicies.yaml | 4303 ----------------- ...ferences.upbound.io_referencedobjects.yaml | 303 -- .../scheduling.upbound.io_environments.yaml | 244 - .../v1.12/spaces.upbound.io_backups.yaml | 382 -- .../spaces.upbound.io_backupschedules.yaml | 408 -- .../spaces.upbound.io_controlplanes.yaml | 312 -- ...es.upbound.io_incontrolplaneoverrides.yaml | 256 - ...spaces.upbound.io_sharedbackupconfigs.yaml | 268 - .../spaces.upbound.io_sharedbackups.yaml | 564 --- ...aces.upbound.io_sharedbackupschedules.yaml | 528 -- ...aces.upbound.io_sharedexternalsecrets.yaml | 745 --- .../spaces.upbound.io_sharedsecretstores.yaml | 2702 ----------- .../v1.12/spaces.upbound.io_simulations.yaml | 243 - ....spaces.upbound.io_spacebackupconfigs.yaml | 177 - .../admin.spaces.upbound.io_spacebackups.yaml | 787 --- ...paces.upbound.io_spacebackupschedules.yaml | 789 --- ....spaces.upbound.io_objectrolebindings.yaml | 153 - static/crds/space/v1.9/embed.go | 11 - ...ces.upbound.io_sharedtelemetryconfigs.yaml | 329 -- ...aces.upbound.io_sharedupboundpolicies.yaml | 4303 ----------------- .../space/v1.9/spaces.upbound.io_backups.yaml | 200 - .../spaces.upbound.io_backupschedules.yaml | 213 - .../v1.9/spaces.upbound.io_controlplanes.yaml | 276 -- ...es.upbound.io_incontrolplaneoverrides.yaml | 256 - ...spaces.upbound.io_sharedbackupconfigs.yaml | 143 - .../v1.9/spaces.upbound.io_sharedbackups.yaml | 291 -- ...aces.upbound.io_sharedbackupschedules.yaml | 273 -- ...aces.upbound.io_sharedexternalsecrets.yaml | 745 --- .../spaces.upbound.io_sharedsecretstores.yaml | 2702 ----------- .../v1.9/spaces.upbound.io_simulations.yaml | 243 - versions.json | 1 - 460 files changed, 25 insertions(+), 101988 deletions(-) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/concepts/_category_.json (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/concepts/control-planes.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/concepts/deployment-modes.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/concepts/groups.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/_category_.json (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/api-connector.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/auto-upgrade.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/automation-and-gitops/_category_.json (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/automation-and-gitops/overview.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/backup-and-restore.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/cloud-spaces/_category_.json (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/cloud-spaces/dedicated-spaces-deployment.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/cloud-spaces/gitops-on-upbound.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/control-plane-topologies.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/ctp-connector.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/debugging-a-ctp.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/managed-service.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/mcp-connector-guide.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/migrating-to-mcps.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/observability.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/query-api.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/secrets-management.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/self-hosted/_category_.json (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/self-hosted/administer-features.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/self-hosted/attach-detach.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/self-hosted/billing.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/self-hosted/capacity-licensing.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/self-hosted/certs.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/self-hosted/configure-ha.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/self-hosted/controllers.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/self-hosted/ctp-audit-logs.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/self-hosted/declarative-ctps.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/self-hosted/deployment-reqs.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/self-hosted/dr.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/self-hosted/gitops-with-argocd.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/self-hosted/managed-spaces-deployment.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/self-hosted/oidc-configuration.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/self-hosted/proxies-config.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/self-hosted/query-api.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/self-hosted/scaling-resources.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/self-hosted/self-hosted-spaces-deployment.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/self-hosted/space-observability.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/self-hosted/spaces-management.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/self-hosted/troubleshooting.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/self-hosted/use-argo.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/self-hosted/workload-id/_category_.json (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/self-hosted/workload-id/backup-restore-config.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/self-hosted/workload-id/billing-config.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/self-hosted/workload-id/eso-config.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/howtos/simulations.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/overview/_category_.json (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/overview/index.md (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/reference/_category_.json (100%) rename spaces_versioned_docs/{version-v1.10 => version-1.13}/reference/index.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/concepts/_category_.json (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/concepts/control-planes.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/concepts/deployment-modes.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/concepts/groups.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/_category_.json (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/api-connector.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/auto-upgrade.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/automation-and-gitops/_category_.json (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/automation-and-gitops/overview.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/backup-and-restore.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/cloud-spaces/_category_.json (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/cloud-spaces/dedicated-spaces-deployment.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/cloud-spaces/gitops-on-upbound.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/control-plane-topologies.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/ctp-connector.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/debugging-a-ctp.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/managed-service.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/mcp-connector-guide.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/migrating-to-mcps.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/observability.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/query-api.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/secrets-management.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/self-hosted/_category_.json (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/self-hosted/administer-features.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/self-hosted/attach-detach.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/self-hosted/billing.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/self-hosted/capacity-licensing.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/self-hosted/certs.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/self-hosted/configure-ha.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/self-hosted/controllers.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/self-hosted/ctp-audit-logs.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/self-hosted/declarative-ctps.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/self-hosted/deployment-reqs.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/self-hosted/dr.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/self-hosted/gitops-with-argocd.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/self-hosted/managed-spaces-deployment.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/self-hosted/oidc-configuration.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/self-hosted/proxies-config.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/self-hosted/query-api.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/self-hosted/scaling-resources.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/self-hosted/self-hosted-spaces-deployment.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/self-hosted/space-observability.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/self-hosted/spaces-management.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/self-hosted/troubleshooting.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/self-hosted/use-argo.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/self-hosted/workload-id/_category_.json (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/self-hosted/workload-id/backup-restore-config.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/self-hosted/workload-id/billing-config.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/self-hosted/workload-id/eso-config.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/howtos/simulations.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/overview/_category_.json (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/overview/index.md (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/reference/_category_.json (100%) rename spaces_versioned_docs/{version-v1.11 => version-1.14}/reference/index.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/concepts/_category_.json (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/concepts/control-planes.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/concepts/deployment-modes.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/concepts/groups.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/_category_.json (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/api-connector.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/auto-upgrade.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/automation-and-gitops/_category_.json (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/automation-and-gitops/overview.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/backup-and-restore.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/cloud-spaces/_category_.json (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/cloud-spaces/dedicated-spaces-deployment.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/cloud-spaces/gitops-on-upbound.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/control-plane-topologies.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/ctp-connector.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/debugging-a-ctp.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/managed-service.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/mcp-connector-guide.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/migrating-to-mcps.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/observability.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/query-api.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/secrets-management.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/self-hosted/_category_.json (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/self-hosted/administer-features.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/self-hosted/attach-detach.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/self-hosted/billing.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/self-hosted/capacity-licensing.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/self-hosted/certs.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/self-hosted/configure-ha.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/self-hosted/controllers.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/self-hosted/ctp-audit-logs.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/self-hosted/declarative-ctps.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/self-hosted/deployment-reqs.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/self-hosted/dr.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/self-hosted/gitops-with-argocd.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/self-hosted/managed-spaces-deployment.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/self-hosted/oidc-configuration.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/self-hosted/proxies-config.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/self-hosted/query-api.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/self-hosted/scaling-resources.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/self-hosted/self-hosted-spaces-deployment.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/self-hosted/space-observability.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/self-hosted/spaces-management.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/self-hosted/troubleshooting.md (100%) rename spaces_versioned_docs/{version-v1.15 => version-1.15}/howtos/self-hosted/use-argo.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/self-hosted/workload-id/_category_.json (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/self-hosted/workload-id/backup-restore-config.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/self-hosted/workload-id/billing-config.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/self-hosted/workload-id/eso-config.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/howtos/simulations.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/overview/_category_.json (100%) rename spaces_versioned_docs/{version-v1.15 => version-1.15}/overview/index.md (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/reference/_category_.json (100%) rename spaces_versioned_docs/{version-v1.12 => version-1.15}/reference/index.md (100%) delete mode 100644 spaces_versioned_docs/version-v1.12/howtos/self-hosted/use-argo.md delete mode 100644 spaces_versioned_docs/version-v1.12/overview/index.md delete mode 100644 spaces_versioned_docs/version-v1.13/concepts/_category_.json delete mode 100644 spaces_versioned_docs/version-v1.13/concepts/control-planes.md delete mode 100644 spaces_versioned_docs/version-v1.13/concepts/deployment-modes.md delete mode 100644 spaces_versioned_docs/version-v1.13/concepts/groups.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/_category_.json delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/api-connector.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/auto-upgrade.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/automation-and-gitops/_category_.json delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/automation-and-gitops/overview.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/backup-and-restore.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/cloud-spaces/_category_.json delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/cloud-spaces/dedicated-spaces-deployment.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/cloud-spaces/gitops-on-upbound.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/control-plane-topologies.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/ctp-connector.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/debugging-a-ctp.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/managed-service.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/mcp-connector-guide.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/migrating-to-mcps.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/observability.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/query-api.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/secrets-management.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/_category_.json delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/administer-features.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/attach-detach.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/billing.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/capacity-licensing.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/certs.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/configure-ha.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/controllers.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/ctp-audit-logs.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/declarative-ctps.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/deployment-reqs.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/dr.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/gitops-with-argocd.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/managed-spaces-deployment.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/oidc-configuration.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/proxies-config.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/query-api.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/scaling-resources.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/self-hosted-spaces-deployment.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/space-observability.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/spaces-management.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/troubleshooting.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/use-argo.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/workload-id/_category_.json delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/workload-id/backup-restore-config.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/workload-id/billing-config.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/self-hosted/workload-id/eso-config.md delete mode 100644 spaces_versioned_docs/version-v1.13/howtos/simulations.md delete mode 100644 spaces_versioned_docs/version-v1.13/overview/_category_.json delete mode 100644 spaces_versioned_docs/version-v1.13/overview/index.md delete mode 100644 spaces_versioned_docs/version-v1.13/reference/_category_.json delete mode 100644 spaces_versioned_docs/version-v1.13/reference/index.md delete mode 100644 spaces_versioned_docs/version-v1.14/concepts/_category_.json delete mode 100644 spaces_versioned_docs/version-v1.14/concepts/control-planes.md delete mode 100644 spaces_versioned_docs/version-v1.14/concepts/deployment-modes.md delete mode 100644 spaces_versioned_docs/version-v1.14/concepts/groups.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/_category_.json delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/api-connector.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/auto-upgrade.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/automation-and-gitops/_category_.json delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/automation-and-gitops/overview.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/backup-and-restore.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/cloud-spaces/_category_.json delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/cloud-spaces/dedicated-spaces-deployment.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/cloud-spaces/gitops-on-upbound.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/control-plane-topologies.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/ctp-connector.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/debugging-a-ctp.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/managed-service.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/mcp-connector-guide.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/migrating-to-mcps.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/observability.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/query-api.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/secrets-management.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/_category_.json delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/administer-features.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/attach-detach.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/billing.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/capacity-licensing.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/certs.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/configure-ha.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/controllers.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/ctp-audit-logs.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/declarative-ctps.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/deployment-reqs.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/dr.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/gitops-with-argocd.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/managed-spaces-deployment.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/oidc-configuration.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/proxies-config.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/query-api.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/scaling-resources.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/self-hosted-spaces-deployment.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/space-observability.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/spaces-management.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/troubleshooting.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/use-argo.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/workload-id/_category_.json delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/workload-id/backup-restore-config.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/workload-id/billing-config.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/self-hosted/workload-id/eso-config.md delete mode 100644 spaces_versioned_docs/version-v1.14/howtos/simulations.md delete mode 100644 spaces_versioned_docs/version-v1.14/overview/_category_.json delete mode 100644 spaces_versioned_docs/version-v1.14/overview/index.md delete mode 100644 spaces_versioned_docs/version-v1.14/reference/_category_.json delete mode 100644 spaces_versioned_docs/version-v1.14/reference/index.md delete mode 100644 spaces_versioned_docs/version-v1.15/concepts/_category_.json delete mode 100644 spaces_versioned_docs/version-v1.15/concepts/control-planes.md delete mode 100644 spaces_versioned_docs/version-v1.15/concepts/deployment-modes.md delete mode 100644 spaces_versioned_docs/version-v1.15/concepts/groups.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/_category_.json delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/api-connector.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/auto-upgrade.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/automation-and-gitops/_category_.json delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/automation-and-gitops/overview.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/backup-and-restore.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/cloud-spaces/_category_.json delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/cloud-spaces/dedicated-spaces-deployment.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/cloud-spaces/gitops-on-upbound.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/control-plane-topologies.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/ctp-connector.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/debugging-a-ctp.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/managed-service.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/mcp-connector-guide.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/migrating-to-mcps.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/observability.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/query-api.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/secrets-management.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/_category_.json delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/administer-features.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/attach-detach.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/billing.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/capacity-licensing.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/certs.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/configure-ha.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/controllers.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/ctp-audit-logs.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/declarative-ctps.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/deployment-reqs.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/dr.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/gitops-with-argocd.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/managed-spaces-deployment.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/oidc-configuration.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/proxies-config.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/query-api.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/scaling-resources.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/self-hosted-spaces-deployment.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/space-observability.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/spaces-management.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/troubleshooting.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/workload-id/_category_.json delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/workload-id/backup-restore-config.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/workload-id/billing-config.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/self-hosted/workload-id/eso-config.md delete mode 100644 spaces_versioned_docs/version-v1.15/howtos/simulations.md delete mode 100644 spaces_versioned_docs/version-v1.15/overview/_category_.json delete mode 100644 spaces_versioned_docs/version-v1.15/reference/_category_.json delete mode 100644 spaces_versioned_docs/version-v1.15/reference/index.md delete mode 100644 spaces_versioned_docs/version-v1.9/concepts/_category_.json delete mode 100644 spaces_versioned_docs/version-v1.9/concepts/control-planes.md delete mode 100644 spaces_versioned_docs/version-v1.9/concepts/deployment-modes.md delete mode 100644 spaces_versioned_docs/version-v1.9/concepts/groups.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/_category_.json delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/api-connector.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/auto-upgrade.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/automation-and-gitops/_category_.json delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/automation-and-gitops/overview.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/backup-and-restore.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/cloud-spaces/_category_.json delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/cloud-spaces/dedicated-spaces-deployment.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/cloud-spaces/gitops-on-upbound.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/control-plane-topologies.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/ctp-connector.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/debugging-a-ctp.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/managed-service.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/mcp-connector-guide.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/migrating-to-mcps.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/observability.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/query-api.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/secrets-management.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/_category_.json delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/administer-features.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/attach-detach.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/billing.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/capacity-licensing.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/certs.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/configure-ha.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/controllers.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/ctp-audit-logs.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/declarative-ctps.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/deployment-reqs.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/dr.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/gitops-with-argocd.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/managed-spaces-deployment.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/oidc-configuration.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/proxies-config.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/query-api.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/scaling-resources.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/self-hosted-spaces-deployment.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/space-observability.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/spaces-management.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/troubleshooting.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/use-argo.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/workload-id/_category_.json delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/workload-id/backup-restore-config.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/workload-id/billing-config.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/self-hosted/workload-id/eso-config.md delete mode 100644 spaces_versioned_docs/version-v1.9/howtos/simulations.md delete mode 100644 spaces_versioned_docs/version-v1.9/overview/_category_.json delete mode 100644 spaces_versioned_docs/version-v1.9/overview/index.md delete mode 100644 spaces_versioned_docs/version-v1.9/reference/_category_.json delete mode 100644 spaces_versioned_docs/version-v1.9/reference/index.md rename spaces_versioned_sidebars/{version-v1.13-sidebars.json => version-1.13-sidebars.json} (100%) rename spaces_versioned_sidebars/{version-v1.14-sidebars.json => version-1.14-sidebars.json} (100%) rename spaces_versioned_sidebars/{version-v1.15-sidebars.json => version-1.15-sidebars.json} (100%) delete mode 100644 spaces_versioned_sidebars/version-v1.10-sidebars.json delete mode 100644 spaces_versioned_sidebars/version-v1.11-sidebars.json delete mode 100644 spaces_versioned_sidebars/version-v1.12-sidebars.json delete mode 100644 spaces_versioned_sidebars/version-v1.9-sidebars.json delete mode 100644 static/crds/space/v1.10/admin.spaces.upbound.io_spacebackupconfigs.yaml delete mode 100644 static/crds/space/v1.10/admin.spaces.upbound.io_spacebackups.yaml delete mode 100644 static/crds/space/v1.10/admin.spaces.upbound.io_spacebackupschedules.yaml delete mode 100644 static/crds/space/v1.10/authorization.spaces.upbound.io_objectrolebindings.yaml delete mode 100644 static/crds/space/v1.10/embed.go delete mode 100644 static/crds/space/v1.10/observability.spaces.upbound.io_sharedtelemetryconfigs.yaml delete mode 100644 static/crds/space/v1.10/policy.spaces.upbound.io_sharedupboundpolicies.yaml delete mode 100644 static/crds/space/v1.10/spaces.upbound.io_backups.yaml delete mode 100644 static/crds/space/v1.10/spaces.upbound.io_backupschedules.yaml delete mode 100644 static/crds/space/v1.10/spaces.upbound.io_controlplanes.yaml delete mode 100644 static/crds/space/v1.10/spaces.upbound.io_incontrolplaneoverrides.yaml delete mode 100644 static/crds/space/v1.10/spaces.upbound.io_sharedbackupconfigs.yaml delete mode 100644 static/crds/space/v1.10/spaces.upbound.io_sharedbackups.yaml delete mode 100644 static/crds/space/v1.10/spaces.upbound.io_sharedbackupschedules.yaml delete mode 100644 static/crds/space/v1.10/spaces.upbound.io_sharedexternalsecrets.yaml delete mode 100644 static/crds/space/v1.10/spaces.upbound.io_sharedsecretstores.yaml delete mode 100644 static/crds/space/v1.10/spaces.upbound.io_simulations.yaml delete mode 100644 static/crds/space/v1.11/admin.spaces.upbound.io_spacebackupconfigs.yaml delete mode 100644 static/crds/space/v1.11/admin.spaces.upbound.io_spacebackups.yaml delete mode 100644 static/crds/space/v1.11/admin.spaces.upbound.io_spacebackupschedules.yaml delete mode 100644 static/crds/space/v1.11/authorization.spaces.upbound.io_objectrolebindings.yaml delete mode 100644 static/crds/space/v1.11/observability.spaces.upbound.io_sharedtelemetryconfigs.yaml delete mode 100644 static/crds/space/v1.11/policy.spaces.upbound.io_sharedupboundpolicies.yaml delete mode 100644 static/crds/space/v1.11/spaces.upbound.io_backups.yaml delete mode 100644 static/crds/space/v1.11/spaces.upbound.io_backupschedules.yaml delete mode 100644 static/crds/space/v1.11/spaces.upbound.io_controlplanes.yaml delete mode 100644 static/crds/space/v1.11/spaces.upbound.io_incontrolplaneoverrides.yaml delete mode 100644 static/crds/space/v1.11/spaces.upbound.io_sharedbackupconfigs.yaml delete mode 100644 static/crds/space/v1.11/spaces.upbound.io_sharedbackups.yaml delete mode 100644 static/crds/space/v1.11/spaces.upbound.io_sharedbackupschedules.yaml delete mode 100644 static/crds/space/v1.11/spaces.upbound.io_sharedexternalsecrets.yaml delete mode 100644 static/crds/space/v1.11/spaces.upbound.io_sharedsecretstores.yaml delete mode 100644 static/crds/space/v1.11/spaces.upbound.io_simulations.yaml delete mode 100644 static/crds/space/v1.12/index.md delete mode 100644 static/crds/space/v1.12/observability.spaces.upbound.io_sharedtelemetryconfigs.yaml delete mode 100644 static/crds/space/v1.12/pkg.upbound.io_controllerrevisions.yaml delete mode 100644 static/crds/space/v1.12/pkg.upbound.io_controllerruntimeconfigs.yaml delete mode 100644 static/crds/space/v1.12/pkg.upbound.io_controllers.yaml delete mode 100644 static/crds/space/v1.12/pkg.upbound.io_remoteconfigurationrevisions.yaml delete mode 100644 static/crds/space/v1.12/pkg.upbound.io_remoteconfigurations.yaml delete mode 100644 static/crds/space/v1.12/policy.spaces.upbound.io_sharedupboundpolicies.yaml delete mode 100644 static/crds/space/v1.12/references.upbound.io_referencedobjects.yaml delete mode 100644 static/crds/space/v1.12/scheduling.upbound.io_environments.yaml delete mode 100644 static/crds/space/v1.12/spaces.upbound.io_backups.yaml delete mode 100644 static/crds/space/v1.12/spaces.upbound.io_backupschedules.yaml delete mode 100644 static/crds/space/v1.12/spaces.upbound.io_controlplanes.yaml delete mode 100644 static/crds/space/v1.12/spaces.upbound.io_incontrolplaneoverrides.yaml delete mode 100644 static/crds/space/v1.12/spaces.upbound.io_sharedbackupconfigs.yaml delete mode 100644 static/crds/space/v1.12/spaces.upbound.io_sharedbackups.yaml delete mode 100644 static/crds/space/v1.12/spaces.upbound.io_sharedbackupschedules.yaml delete mode 100644 static/crds/space/v1.12/spaces.upbound.io_sharedexternalsecrets.yaml delete mode 100644 static/crds/space/v1.12/spaces.upbound.io_sharedsecretstores.yaml delete mode 100644 static/crds/space/v1.12/spaces.upbound.io_simulations.yaml delete mode 100644 static/crds/space/v1.9/admin.spaces.upbound.io_spacebackupconfigs.yaml delete mode 100644 static/crds/space/v1.9/admin.spaces.upbound.io_spacebackups.yaml delete mode 100644 static/crds/space/v1.9/admin.spaces.upbound.io_spacebackupschedules.yaml delete mode 100644 static/crds/space/v1.9/authorization.spaces.upbound.io_objectrolebindings.yaml delete mode 100644 static/crds/space/v1.9/embed.go delete mode 100644 static/crds/space/v1.9/observability.spaces.upbound.io_sharedtelemetryconfigs.yaml delete mode 100644 static/crds/space/v1.9/policy.spaces.upbound.io_sharedupboundpolicies.yaml delete mode 100644 static/crds/space/v1.9/spaces.upbound.io_backups.yaml delete mode 100644 static/crds/space/v1.9/spaces.upbound.io_backupschedules.yaml delete mode 100644 static/crds/space/v1.9/spaces.upbound.io_controlplanes.yaml delete mode 100644 static/crds/space/v1.9/spaces.upbound.io_incontrolplaneoverrides.yaml delete mode 100644 static/crds/space/v1.9/spaces.upbound.io_sharedbackupconfigs.yaml delete mode 100644 static/crds/space/v1.9/spaces.upbound.io_sharedbackups.yaml delete mode 100644 static/crds/space/v1.9/spaces.upbound.io_sharedbackupschedules.yaml delete mode 100644 static/crds/space/v1.9/spaces.upbound.io_sharedexternalsecrets.yaml delete mode 100644 static/crds/space/v1.9/spaces.upbound.io_sharedsecretstores.yaml delete mode 100644 static/crds/space/v1.9/spaces.upbound.io_simulations.yaml delete mode 100644 versions.json diff --git a/spaces_versioned_docs/version-v1.10/concepts/_category_.json b/spaces_versioned_docs/version-1.13/concepts/_category_.json similarity index 100% rename from spaces_versioned_docs/version-v1.10/concepts/_category_.json rename to spaces_versioned_docs/version-1.13/concepts/_category_.json diff --git a/spaces_versioned_docs/version-v1.10/concepts/control-planes.md b/spaces_versioned_docs/version-1.13/concepts/control-planes.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/concepts/control-planes.md rename to spaces_versioned_docs/version-1.13/concepts/control-planes.md diff --git a/spaces_versioned_docs/version-v1.10/concepts/deployment-modes.md b/spaces_versioned_docs/version-1.13/concepts/deployment-modes.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/concepts/deployment-modes.md rename to spaces_versioned_docs/version-1.13/concepts/deployment-modes.md diff --git a/spaces_versioned_docs/version-v1.10/concepts/groups.md b/spaces_versioned_docs/version-1.13/concepts/groups.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/concepts/groups.md rename to spaces_versioned_docs/version-1.13/concepts/groups.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/_category_.json b/spaces_versioned_docs/version-1.13/howtos/_category_.json similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/_category_.json rename to spaces_versioned_docs/version-1.13/howtos/_category_.json diff --git a/spaces_versioned_docs/version-v1.10/howtos/api-connector.md b/spaces_versioned_docs/version-1.13/howtos/api-connector.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/api-connector.md rename to spaces_versioned_docs/version-1.13/howtos/api-connector.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/auto-upgrade.md b/spaces_versioned_docs/version-1.13/howtos/auto-upgrade.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/auto-upgrade.md rename to spaces_versioned_docs/version-1.13/howtos/auto-upgrade.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/automation-and-gitops/_category_.json b/spaces_versioned_docs/version-1.13/howtos/automation-and-gitops/_category_.json similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/automation-and-gitops/_category_.json rename to spaces_versioned_docs/version-1.13/howtos/automation-and-gitops/_category_.json diff --git a/spaces_versioned_docs/version-v1.10/howtos/automation-and-gitops/overview.md b/spaces_versioned_docs/version-1.13/howtos/automation-and-gitops/overview.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/automation-and-gitops/overview.md rename to spaces_versioned_docs/version-1.13/howtos/automation-and-gitops/overview.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/backup-and-restore.md b/spaces_versioned_docs/version-1.13/howtos/backup-and-restore.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/backup-and-restore.md rename to spaces_versioned_docs/version-1.13/howtos/backup-and-restore.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/cloud-spaces/_category_.json b/spaces_versioned_docs/version-1.13/howtos/cloud-spaces/_category_.json similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/cloud-spaces/_category_.json rename to spaces_versioned_docs/version-1.13/howtos/cloud-spaces/_category_.json diff --git a/spaces_versioned_docs/version-v1.10/howtos/cloud-spaces/dedicated-spaces-deployment.md b/spaces_versioned_docs/version-1.13/howtos/cloud-spaces/dedicated-spaces-deployment.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/cloud-spaces/dedicated-spaces-deployment.md rename to spaces_versioned_docs/version-1.13/howtos/cloud-spaces/dedicated-spaces-deployment.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/cloud-spaces/gitops-on-upbound.md b/spaces_versioned_docs/version-1.13/howtos/cloud-spaces/gitops-on-upbound.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/cloud-spaces/gitops-on-upbound.md rename to spaces_versioned_docs/version-1.13/howtos/cloud-spaces/gitops-on-upbound.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/control-plane-topologies.md b/spaces_versioned_docs/version-1.13/howtos/control-plane-topologies.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/control-plane-topologies.md rename to spaces_versioned_docs/version-1.13/howtos/control-plane-topologies.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/ctp-connector.md b/spaces_versioned_docs/version-1.13/howtos/ctp-connector.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/ctp-connector.md rename to spaces_versioned_docs/version-1.13/howtos/ctp-connector.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/debugging-a-ctp.md b/spaces_versioned_docs/version-1.13/howtos/debugging-a-ctp.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/debugging-a-ctp.md rename to spaces_versioned_docs/version-1.13/howtos/debugging-a-ctp.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/managed-service.md b/spaces_versioned_docs/version-1.13/howtos/managed-service.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/managed-service.md rename to spaces_versioned_docs/version-1.13/howtos/managed-service.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/mcp-connector-guide.md b/spaces_versioned_docs/version-1.13/howtos/mcp-connector-guide.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/mcp-connector-guide.md rename to spaces_versioned_docs/version-1.13/howtos/mcp-connector-guide.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/migrating-to-mcps.md b/spaces_versioned_docs/version-1.13/howtos/migrating-to-mcps.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/migrating-to-mcps.md rename to spaces_versioned_docs/version-1.13/howtos/migrating-to-mcps.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/observability.md b/spaces_versioned_docs/version-1.13/howtos/observability.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/observability.md rename to spaces_versioned_docs/version-1.13/howtos/observability.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/query-api.md b/spaces_versioned_docs/version-1.13/howtos/query-api.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/query-api.md rename to spaces_versioned_docs/version-1.13/howtos/query-api.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/secrets-management.md b/spaces_versioned_docs/version-1.13/howtos/secrets-management.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/secrets-management.md rename to spaces_versioned_docs/version-1.13/howtos/secrets-management.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/_category_.json b/spaces_versioned_docs/version-1.13/howtos/self-hosted/_category_.json similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/self-hosted/_category_.json rename to spaces_versioned_docs/version-1.13/howtos/self-hosted/_category_.json diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/administer-features.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/administer-features.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/self-hosted/administer-features.md rename to spaces_versioned_docs/version-1.13/howtos/self-hosted/administer-features.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/attach-detach.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/attach-detach.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/self-hosted/attach-detach.md rename to spaces_versioned_docs/version-1.13/howtos/self-hosted/attach-detach.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/billing.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/billing.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/self-hosted/billing.md rename to spaces_versioned_docs/version-1.13/howtos/self-hosted/billing.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/capacity-licensing.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/capacity-licensing.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/self-hosted/capacity-licensing.md rename to spaces_versioned_docs/version-1.13/howtos/self-hosted/capacity-licensing.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/certs.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/certs.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/self-hosted/certs.md rename to spaces_versioned_docs/version-1.13/howtos/self-hosted/certs.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/configure-ha.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/configure-ha.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/self-hosted/configure-ha.md rename to spaces_versioned_docs/version-1.13/howtos/self-hosted/configure-ha.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/controllers.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/controllers.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/self-hosted/controllers.md rename to spaces_versioned_docs/version-1.13/howtos/self-hosted/controllers.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/ctp-audit-logs.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/ctp-audit-logs.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/self-hosted/ctp-audit-logs.md rename to spaces_versioned_docs/version-1.13/howtos/self-hosted/ctp-audit-logs.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/declarative-ctps.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/declarative-ctps.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/self-hosted/declarative-ctps.md rename to spaces_versioned_docs/version-1.13/howtos/self-hosted/declarative-ctps.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/deployment-reqs.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/deployment-reqs.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/self-hosted/deployment-reqs.md rename to spaces_versioned_docs/version-1.13/howtos/self-hosted/deployment-reqs.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/dr.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/dr.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/self-hosted/dr.md rename to spaces_versioned_docs/version-1.13/howtos/self-hosted/dr.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/gitops-with-argocd.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/gitops-with-argocd.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/self-hosted/gitops-with-argocd.md rename to spaces_versioned_docs/version-1.13/howtos/self-hosted/gitops-with-argocd.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/managed-spaces-deployment.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/managed-spaces-deployment.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/self-hosted/managed-spaces-deployment.md rename to spaces_versioned_docs/version-1.13/howtos/self-hosted/managed-spaces-deployment.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/oidc-configuration.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/oidc-configuration.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/self-hosted/oidc-configuration.md rename to spaces_versioned_docs/version-1.13/howtos/self-hosted/oidc-configuration.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/proxies-config.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/proxies-config.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/self-hosted/proxies-config.md rename to spaces_versioned_docs/version-1.13/howtos/self-hosted/proxies-config.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/query-api.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/query-api.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/self-hosted/query-api.md rename to spaces_versioned_docs/version-1.13/howtos/self-hosted/query-api.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/scaling-resources.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/scaling-resources.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/self-hosted/scaling-resources.md rename to spaces_versioned_docs/version-1.13/howtos/self-hosted/scaling-resources.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/self-hosted-spaces-deployment.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/self-hosted-spaces-deployment.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/self-hosted/self-hosted-spaces-deployment.md rename to spaces_versioned_docs/version-1.13/howtos/self-hosted/self-hosted-spaces-deployment.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/space-observability.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/space-observability.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/self-hosted/space-observability.md rename to spaces_versioned_docs/version-1.13/howtos/self-hosted/space-observability.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/spaces-management.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/spaces-management.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/self-hosted/spaces-management.md rename to spaces_versioned_docs/version-1.13/howtos/self-hosted/spaces-management.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/troubleshooting.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/troubleshooting.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/self-hosted/troubleshooting.md rename to spaces_versioned_docs/version-1.13/howtos/self-hosted/troubleshooting.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/use-argo.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/use-argo.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/self-hosted/use-argo.md rename to spaces_versioned_docs/version-1.13/howtos/self-hosted/use-argo.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/workload-id/_category_.json b/spaces_versioned_docs/version-1.13/howtos/self-hosted/workload-id/_category_.json similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/self-hosted/workload-id/_category_.json rename to spaces_versioned_docs/version-1.13/howtos/self-hosted/workload-id/_category_.json diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/workload-id/backup-restore-config.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/workload-id/backup-restore-config.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/self-hosted/workload-id/backup-restore-config.md rename to spaces_versioned_docs/version-1.13/howtos/self-hosted/workload-id/backup-restore-config.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/workload-id/billing-config.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/workload-id/billing-config.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/self-hosted/workload-id/billing-config.md rename to spaces_versioned_docs/version-1.13/howtos/self-hosted/workload-id/billing-config.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/self-hosted/workload-id/eso-config.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/workload-id/eso-config.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/self-hosted/workload-id/eso-config.md rename to spaces_versioned_docs/version-1.13/howtos/self-hosted/workload-id/eso-config.md diff --git a/spaces_versioned_docs/version-v1.10/howtos/simulations.md b/spaces_versioned_docs/version-1.13/howtos/simulations.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/howtos/simulations.md rename to spaces_versioned_docs/version-1.13/howtos/simulations.md diff --git a/spaces_versioned_docs/version-v1.10/overview/_category_.json b/spaces_versioned_docs/version-1.13/overview/_category_.json similarity index 100% rename from spaces_versioned_docs/version-v1.10/overview/_category_.json rename to spaces_versioned_docs/version-1.13/overview/_category_.json diff --git a/spaces_versioned_docs/version-v1.10/overview/index.md b/spaces_versioned_docs/version-1.13/overview/index.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/overview/index.md rename to spaces_versioned_docs/version-1.13/overview/index.md diff --git a/spaces_versioned_docs/version-v1.10/reference/_category_.json b/spaces_versioned_docs/version-1.13/reference/_category_.json similarity index 100% rename from spaces_versioned_docs/version-v1.10/reference/_category_.json rename to spaces_versioned_docs/version-1.13/reference/_category_.json diff --git a/spaces_versioned_docs/version-v1.10/reference/index.md b/spaces_versioned_docs/version-1.13/reference/index.md similarity index 100% rename from spaces_versioned_docs/version-v1.10/reference/index.md rename to spaces_versioned_docs/version-1.13/reference/index.md diff --git a/spaces_versioned_docs/version-v1.11/concepts/_category_.json b/spaces_versioned_docs/version-1.14/concepts/_category_.json similarity index 100% rename from spaces_versioned_docs/version-v1.11/concepts/_category_.json rename to spaces_versioned_docs/version-1.14/concepts/_category_.json diff --git a/spaces_versioned_docs/version-v1.11/concepts/control-planes.md b/spaces_versioned_docs/version-1.14/concepts/control-planes.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/concepts/control-planes.md rename to spaces_versioned_docs/version-1.14/concepts/control-planes.md diff --git a/spaces_versioned_docs/version-v1.11/concepts/deployment-modes.md b/spaces_versioned_docs/version-1.14/concepts/deployment-modes.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/concepts/deployment-modes.md rename to spaces_versioned_docs/version-1.14/concepts/deployment-modes.md diff --git a/spaces_versioned_docs/version-v1.11/concepts/groups.md b/spaces_versioned_docs/version-1.14/concepts/groups.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/concepts/groups.md rename to spaces_versioned_docs/version-1.14/concepts/groups.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/_category_.json b/spaces_versioned_docs/version-1.14/howtos/_category_.json similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/_category_.json rename to spaces_versioned_docs/version-1.14/howtos/_category_.json diff --git a/spaces_versioned_docs/version-v1.11/howtos/api-connector.md b/spaces_versioned_docs/version-1.14/howtos/api-connector.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/api-connector.md rename to spaces_versioned_docs/version-1.14/howtos/api-connector.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/auto-upgrade.md b/spaces_versioned_docs/version-1.14/howtos/auto-upgrade.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/auto-upgrade.md rename to spaces_versioned_docs/version-1.14/howtos/auto-upgrade.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/automation-and-gitops/_category_.json b/spaces_versioned_docs/version-1.14/howtos/automation-and-gitops/_category_.json similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/automation-and-gitops/_category_.json rename to spaces_versioned_docs/version-1.14/howtos/automation-and-gitops/_category_.json diff --git a/spaces_versioned_docs/version-v1.11/howtos/automation-and-gitops/overview.md b/spaces_versioned_docs/version-1.14/howtos/automation-and-gitops/overview.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/automation-and-gitops/overview.md rename to spaces_versioned_docs/version-1.14/howtos/automation-and-gitops/overview.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/backup-and-restore.md b/spaces_versioned_docs/version-1.14/howtos/backup-and-restore.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/backup-and-restore.md rename to spaces_versioned_docs/version-1.14/howtos/backup-and-restore.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/cloud-spaces/_category_.json b/spaces_versioned_docs/version-1.14/howtos/cloud-spaces/_category_.json similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/cloud-spaces/_category_.json rename to spaces_versioned_docs/version-1.14/howtos/cloud-spaces/_category_.json diff --git a/spaces_versioned_docs/version-v1.11/howtos/cloud-spaces/dedicated-spaces-deployment.md b/spaces_versioned_docs/version-1.14/howtos/cloud-spaces/dedicated-spaces-deployment.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/cloud-spaces/dedicated-spaces-deployment.md rename to spaces_versioned_docs/version-1.14/howtos/cloud-spaces/dedicated-spaces-deployment.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/cloud-spaces/gitops-on-upbound.md b/spaces_versioned_docs/version-1.14/howtos/cloud-spaces/gitops-on-upbound.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/cloud-spaces/gitops-on-upbound.md rename to spaces_versioned_docs/version-1.14/howtos/cloud-spaces/gitops-on-upbound.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/control-plane-topologies.md b/spaces_versioned_docs/version-1.14/howtos/control-plane-topologies.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/control-plane-topologies.md rename to spaces_versioned_docs/version-1.14/howtos/control-plane-topologies.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/ctp-connector.md b/spaces_versioned_docs/version-1.14/howtos/ctp-connector.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/ctp-connector.md rename to spaces_versioned_docs/version-1.14/howtos/ctp-connector.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/debugging-a-ctp.md b/spaces_versioned_docs/version-1.14/howtos/debugging-a-ctp.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/debugging-a-ctp.md rename to spaces_versioned_docs/version-1.14/howtos/debugging-a-ctp.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/managed-service.md b/spaces_versioned_docs/version-1.14/howtos/managed-service.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/managed-service.md rename to spaces_versioned_docs/version-1.14/howtos/managed-service.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/mcp-connector-guide.md b/spaces_versioned_docs/version-1.14/howtos/mcp-connector-guide.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/mcp-connector-guide.md rename to spaces_versioned_docs/version-1.14/howtos/mcp-connector-guide.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/migrating-to-mcps.md b/spaces_versioned_docs/version-1.14/howtos/migrating-to-mcps.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/migrating-to-mcps.md rename to spaces_versioned_docs/version-1.14/howtos/migrating-to-mcps.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/observability.md b/spaces_versioned_docs/version-1.14/howtos/observability.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/observability.md rename to spaces_versioned_docs/version-1.14/howtos/observability.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/query-api.md b/spaces_versioned_docs/version-1.14/howtos/query-api.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/query-api.md rename to spaces_versioned_docs/version-1.14/howtos/query-api.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/secrets-management.md b/spaces_versioned_docs/version-1.14/howtos/secrets-management.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/secrets-management.md rename to spaces_versioned_docs/version-1.14/howtos/secrets-management.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/_category_.json b/spaces_versioned_docs/version-1.14/howtos/self-hosted/_category_.json similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/self-hosted/_category_.json rename to spaces_versioned_docs/version-1.14/howtos/self-hosted/_category_.json diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/administer-features.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/administer-features.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/self-hosted/administer-features.md rename to spaces_versioned_docs/version-1.14/howtos/self-hosted/administer-features.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/attach-detach.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/attach-detach.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/self-hosted/attach-detach.md rename to spaces_versioned_docs/version-1.14/howtos/self-hosted/attach-detach.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/billing.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/billing.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/self-hosted/billing.md rename to spaces_versioned_docs/version-1.14/howtos/self-hosted/billing.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/capacity-licensing.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/capacity-licensing.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/self-hosted/capacity-licensing.md rename to spaces_versioned_docs/version-1.14/howtos/self-hosted/capacity-licensing.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/certs.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/certs.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/self-hosted/certs.md rename to spaces_versioned_docs/version-1.14/howtos/self-hosted/certs.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/configure-ha.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/configure-ha.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/self-hosted/configure-ha.md rename to spaces_versioned_docs/version-1.14/howtos/self-hosted/configure-ha.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/controllers.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/controllers.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/self-hosted/controllers.md rename to spaces_versioned_docs/version-1.14/howtos/self-hosted/controllers.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/ctp-audit-logs.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/ctp-audit-logs.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/self-hosted/ctp-audit-logs.md rename to spaces_versioned_docs/version-1.14/howtos/self-hosted/ctp-audit-logs.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/declarative-ctps.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/declarative-ctps.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/self-hosted/declarative-ctps.md rename to spaces_versioned_docs/version-1.14/howtos/self-hosted/declarative-ctps.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/deployment-reqs.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/deployment-reqs.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/self-hosted/deployment-reqs.md rename to spaces_versioned_docs/version-1.14/howtos/self-hosted/deployment-reqs.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/dr.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/dr.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/self-hosted/dr.md rename to spaces_versioned_docs/version-1.14/howtos/self-hosted/dr.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/gitops-with-argocd.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/gitops-with-argocd.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/self-hosted/gitops-with-argocd.md rename to spaces_versioned_docs/version-1.14/howtos/self-hosted/gitops-with-argocd.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/managed-spaces-deployment.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/managed-spaces-deployment.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/self-hosted/managed-spaces-deployment.md rename to spaces_versioned_docs/version-1.14/howtos/self-hosted/managed-spaces-deployment.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/oidc-configuration.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/oidc-configuration.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/self-hosted/oidc-configuration.md rename to spaces_versioned_docs/version-1.14/howtos/self-hosted/oidc-configuration.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/proxies-config.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/proxies-config.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/self-hosted/proxies-config.md rename to spaces_versioned_docs/version-1.14/howtos/self-hosted/proxies-config.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/query-api.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/query-api.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/self-hosted/query-api.md rename to spaces_versioned_docs/version-1.14/howtos/self-hosted/query-api.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/scaling-resources.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/scaling-resources.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/self-hosted/scaling-resources.md rename to spaces_versioned_docs/version-1.14/howtos/self-hosted/scaling-resources.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/self-hosted-spaces-deployment.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/self-hosted-spaces-deployment.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/self-hosted/self-hosted-spaces-deployment.md rename to spaces_versioned_docs/version-1.14/howtos/self-hosted/self-hosted-spaces-deployment.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/space-observability.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/space-observability.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/self-hosted/space-observability.md rename to spaces_versioned_docs/version-1.14/howtos/self-hosted/space-observability.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/spaces-management.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/spaces-management.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/self-hosted/spaces-management.md rename to spaces_versioned_docs/version-1.14/howtos/self-hosted/spaces-management.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/troubleshooting.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/troubleshooting.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/self-hosted/troubleshooting.md rename to spaces_versioned_docs/version-1.14/howtos/self-hosted/troubleshooting.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/use-argo.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/use-argo.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/self-hosted/use-argo.md rename to spaces_versioned_docs/version-1.14/howtos/self-hosted/use-argo.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/workload-id/_category_.json b/spaces_versioned_docs/version-1.14/howtos/self-hosted/workload-id/_category_.json similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/self-hosted/workload-id/_category_.json rename to spaces_versioned_docs/version-1.14/howtos/self-hosted/workload-id/_category_.json diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/workload-id/backup-restore-config.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/workload-id/backup-restore-config.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/self-hosted/workload-id/backup-restore-config.md rename to spaces_versioned_docs/version-1.14/howtos/self-hosted/workload-id/backup-restore-config.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/workload-id/billing-config.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/workload-id/billing-config.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/self-hosted/workload-id/billing-config.md rename to spaces_versioned_docs/version-1.14/howtos/self-hosted/workload-id/billing-config.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/self-hosted/workload-id/eso-config.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/workload-id/eso-config.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/self-hosted/workload-id/eso-config.md rename to spaces_versioned_docs/version-1.14/howtos/self-hosted/workload-id/eso-config.md diff --git a/spaces_versioned_docs/version-v1.11/howtos/simulations.md b/spaces_versioned_docs/version-1.14/howtos/simulations.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/howtos/simulations.md rename to spaces_versioned_docs/version-1.14/howtos/simulations.md diff --git a/spaces_versioned_docs/version-v1.11/overview/_category_.json b/spaces_versioned_docs/version-1.14/overview/_category_.json similarity index 100% rename from spaces_versioned_docs/version-v1.11/overview/_category_.json rename to spaces_versioned_docs/version-1.14/overview/_category_.json diff --git a/spaces_versioned_docs/version-v1.11/overview/index.md b/spaces_versioned_docs/version-1.14/overview/index.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/overview/index.md rename to spaces_versioned_docs/version-1.14/overview/index.md diff --git a/spaces_versioned_docs/version-v1.11/reference/_category_.json b/spaces_versioned_docs/version-1.14/reference/_category_.json similarity index 100% rename from spaces_versioned_docs/version-v1.11/reference/_category_.json rename to spaces_versioned_docs/version-1.14/reference/_category_.json diff --git a/spaces_versioned_docs/version-v1.11/reference/index.md b/spaces_versioned_docs/version-1.14/reference/index.md similarity index 100% rename from spaces_versioned_docs/version-v1.11/reference/index.md rename to spaces_versioned_docs/version-1.14/reference/index.md diff --git a/spaces_versioned_docs/version-v1.12/concepts/_category_.json b/spaces_versioned_docs/version-1.15/concepts/_category_.json similarity index 100% rename from spaces_versioned_docs/version-v1.12/concepts/_category_.json rename to spaces_versioned_docs/version-1.15/concepts/_category_.json diff --git a/spaces_versioned_docs/version-v1.12/concepts/control-planes.md b/spaces_versioned_docs/version-1.15/concepts/control-planes.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/concepts/control-planes.md rename to spaces_versioned_docs/version-1.15/concepts/control-planes.md diff --git a/spaces_versioned_docs/version-v1.12/concepts/deployment-modes.md b/spaces_versioned_docs/version-1.15/concepts/deployment-modes.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/concepts/deployment-modes.md rename to spaces_versioned_docs/version-1.15/concepts/deployment-modes.md diff --git a/spaces_versioned_docs/version-v1.12/concepts/groups.md b/spaces_versioned_docs/version-1.15/concepts/groups.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/concepts/groups.md rename to spaces_versioned_docs/version-1.15/concepts/groups.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/_category_.json b/spaces_versioned_docs/version-1.15/howtos/_category_.json similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/_category_.json rename to spaces_versioned_docs/version-1.15/howtos/_category_.json diff --git a/spaces_versioned_docs/version-v1.12/howtos/api-connector.md b/spaces_versioned_docs/version-1.15/howtos/api-connector.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/api-connector.md rename to spaces_versioned_docs/version-1.15/howtos/api-connector.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/auto-upgrade.md b/spaces_versioned_docs/version-1.15/howtos/auto-upgrade.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/auto-upgrade.md rename to spaces_versioned_docs/version-1.15/howtos/auto-upgrade.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/automation-and-gitops/_category_.json b/spaces_versioned_docs/version-1.15/howtos/automation-and-gitops/_category_.json similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/automation-and-gitops/_category_.json rename to spaces_versioned_docs/version-1.15/howtos/automation-and-gitops/_category_.json diff --git a/spaces_versioned_docs/version-v1.12/howtos/automation-and-gitops/overview.md b/spaces_versioned_docs/version-1.15/howtos/automation-and-gitops/overview.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/automation-and-gitops/overview.md rename to spaces_versioned_docs/version-1.15/howtos/automation-and-gitops/overview.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/backup-and-restore.md b/spaces_versioned_docs/version-1.15/howtos/backup-and-restore.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/backup-and-restore.md rename to spaces_versioned_docs/version-1.15/howtos/backup-and-restore.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/cloud-spaces/_category_.json b/spaces_versioned_docs/version-1.15/howtos/cloud-spaces/_category_.json similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/cloud-spaces/_category_.json rename to spaces_versioned_docs/version-1.15/howtos/cloud-spaces/_category_.json diff --git a/spaces_versioned_docs/version-v1.12/howtos/cloud-spaces/dedicated-spaces-deployment.md b/spaces_versioned_docs/version-1.15/howtos/cloud-spaces/dedicated-spaces-deployment.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/cloud-spaces/dedicated-spaces-deployment.md rename to spaces_versioned_docs/version-1.15/howtos/cloud-spaces/dedicated-spaces-deployment.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/cloud-spaces/gitops-on-upbound.md b/spaces_versioned_docs/version-1.15/howtos/cloud-spaces/gitops-on-upbound.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/cloud-spaces/gitops-on-upbound.md rename to spaces_versioned_docs/version-1.15/howtos/cloud-spaces/gitops-on-upbound.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/control-plane-topologies.md b/spaces_versioned_docs/version-1.15/howtos/control-plane-topologies.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/control-plane-topologies.md rename to spaces_versioned_docs/version-1.15/howtos/control-plane-topologies.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/ctp-connector.md b/spaces_versioned_docs/version-1.15/howtos/ctp-connector.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/ctp-connector.md rename to spaces_versioned_docs/version-1.15/howtos/ctp-connector.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/debugging-a-ctp.md b/spaces_versioned_docs/version-1.15/howtos/debugging-a-ctp.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/debugging-a-ctp.md rename to spaces_versioned_docs/version-1.15/howtos/debugging-a-ctp.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/managed-service.md b/spaces_versioned_docs/version-1.15/howtos/managed-service.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/managed-service.md rename to spaces_versioned_docs/version-1.15/howtos/managed-service.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/mcp-connector-guide.md b/spaces_versioned_docs/version-1.15/howtos/mcp-connector-guide.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/mcp-connector-guide.md rename to spaces_versioned_docs/version-1.15/howtos/mcp-connector-guide.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/migrating-to-mcps.md b/spaces_versioned_docs/version-1.15/howtos/migrating-to-mcps.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/migrating-to-mcps.md rename to spaces_versioned_docs/version-1.15/howtos/migrating-to-mcps.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/observability.md b/spaces_versioned_docs/version-1.15/howtos/observability.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/observability.md rename to spaces_versioned_docs/version-1.15/howtos/observability.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/query-api.md b/spaces_versioned_docs/version-1.15/howtos/query-api.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/query-api.md rename to spaces_versioned_docs/version-1.15/howtos/query-api.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/secrets-management.md b/spaces_versioned_docs/version-1.15/howtos/secrets-management.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/secrets-management.md rename to spaces_versioned_docs/version-1.15/howtos/secrets-management.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/_category_.json b/spaces_versioned_docs/version-1.15/howtos/self-hosted/_category_.json similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/self-hosted/_category_.json rename to spaces_versioned_docs/version-1.15/howtos/self-hosted/_category_.json diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/administer-features.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/administer-features.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/self-hosted/administer-features.md rename to spaces_versioned_docs/version-1.15/howtos/self-hosted/administer-features.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/attach-detach.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/attach-detach.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/self-hosted/attach-detach.md rename to spaces_versioned_docs/version-1.15/howtos/self-hosted/attach-detach.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/billing.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/billing.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/self-hosted/billing.md rename to spaces_versioned_docs/version-1.15/howtos/self-hosted/billing.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/capacity-licensing.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/capacity-licensing.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/self-hosted/capacity-licensing.md rename to spaces_versioned_docs/version-1.15/howtos/self-hosted/capacity-licensing.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/certs.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/certs.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/self-hosted/certs.md rename to spaces_versioned_docs/version-1.15/howtos/self-hosted/certs.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/configure-ha.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/configure-ha.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/self-hosted/configure-ha.md rename to spaces_versioned_docs/version-1.15/howtos/self-hosted/configure-ha.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/controllers.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/controllers.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/self-hosted/controllers.md rename to spaces_versioned_docs/version-1.15/howtos/self-hosted/controllers.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/ctp-audit-logs.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/ctp-audit-logs.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/self-hosted/ctp-audit-logs.md rename to spaces_versioned_docs/version-1.15/howtos/self-hosted/ctp-audit-logs.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/declarative-ctps.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/declarative-ctps.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/self-hosted/declarative-ctps.md rename to spaces_versioned_docs/version-1.15/howtos/self-hosted/declarative-ctps.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/deployment-reqs.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/deployment-reqs.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/self-hosted/deployment-reqs.md rename to spaces_versioned_docs/version-1.15/howtos/self-hosted/deployment-reqs.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/dr.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/dr.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/self-hosted/dr.md rename to spaces_versioned_docs/version-1.15/howtos/self-hosted/dr.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/gitops-with-argocd.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/gitops-with-argocd.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/self-hosted/gitops-with-argocd.md rename to spaces_versioned_docs/version-1.15/howtos/self-hosted/gitops-with-argocd.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/managed-spaces-deployment.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/managed-spaces-deployment.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/self-hosted/managed-spaces-deployment.md rename to spaces_versioned_docs/version-1.15/howtos/self-hosted/managed-spaces-deployment.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/oidc-configuration.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/oidc-configuration.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/self-hosted/oidc-configuration.md rename to spaces_versioned_docs/version-1.15/howtos/self-hosted/oidc-configuration.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/proxies-config.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/proxies-config.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/self-hosted/proxies-config.md rename to spaces_versioned_docs/version-1.15/howtos/self-hosted/proxies-config.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/query-api.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/query-api.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/self-hosted/query-api.md rename to spaces_versioned_docs/version-1.15/howtos/self-hosted/query-api.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/scaling-resources.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/scaling-resources.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/self-hosted/scaling-resources.md rename to spaces_versioned_docs/version-1.15/howtos/self-hosted/scaling-resources.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/self-hosted-spaces-deployment.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/self-hosted-spaces-deployment.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/self-hosted/self-hosted-spaces-deployment.md rename to spaces_versioned_docs/version-1.15/howtos/self-hosted/self-hosted-spaces-deployment.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/space-observability.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/space-observability.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/self-hosted/space-observability.md rename to spaces_versioned_docs/version-1.15/howtos/self-hosted/space-observability.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/spaces-management.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/spaces-management.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/self-hosted/spaces-management.md rename to spaces_versioned_docs/version-1.15/howtos/self-hosted/spaces-management.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/troubleshooting.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/troubleshooting.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/self-hosted/troubleshooting.md rename to spaces_versioned_docs/version-1.15/howtos/self-hosted/troubleshooting.md diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/use-argo.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/use-argo.md similarity index 100% rename from spaces_versioned_docs/version-v1.15/howtos/self-hosted/use-argo.md rename to spaces_versioned_docs/version-1.15/howtos/self-hosted/use-argo.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/workload-id/_category_.json b/spaces_versioned_docs/version-1.15/howtos/self-hosted/workload-id/_category_.json similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/self-hosted/workload-id/_category_.json rename to spaces_versioned_docs/version-1.15/howtos/self-hosted/workload-id/_category_.json diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/workload-id/backup-restore-config.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/workload-id/backup-restore-config.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/self-hosted/workload-id/backup-restore-config.md rename to spaces_versioned_docs/version-1.15/howtos/self-hosted/workload-id/backup-restore-config.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/workload-id/billing-config.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/workload-id/billing-config.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/self-hosted/workload-id/billing-config.md rename to spaces_versioned_docs/version-1.15/howtos/self-hosted/workload-id/billing-config.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/workload-id/eso-config.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/workload-id/eso-config.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/self-hosted/workload-id/eso-config.md rename to spaces_versioned_docs/version-1.15/howtos/self-hosted/workload-id/eso-config.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/simulations.md b/spaces_versioned_docs/version-1.15/howtos/simulations.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/howtos/simulations.md rename to spaces_versioned_docs/version-1.15/howtos/simulations.md diff --git a/spaces_versioned_docs/version-v1.12/overview/_category_.json b/spaces_versioned_docs/version-1.15/overview/_category_.json similarity index 100% rename from spaces_versioned_docs/version-v1.12/overview/_category_.json rename to spaces_versioned_docs/version-1.15/overview/_category_.json diff --git a/spaces_versioned_docs/version-v1.15/overview/index.md b/spaces_versioned_docs/version-1.15/overview/index.md similarity index 100% rename from spaces_versioned_docs/version-v1.15/overview/index.md rename to spaces_versioned_docs/version-1.15/overview/index.md diff --git a/spaces_versioned_docs/version-v1.12/reference/_category_.json b/spaces_versioned_docs/version-1.15/reference/_category_.json similarity index 100% rename from spaces_versioned_docs/version-v1.12/reference/_category_.json rename to spaces_versioned_docs/version-1.15/reference/_category_.json diff --git a/spaces_versioned_docs/version-v1.12/reference/index.md b/spaces_versioned_docs/version-1.15/reference/index.md similarity index 100% rename from spaces_versioned_docs/version-v1.12/reference/index.md rename to spaces_versioned_docs/version-1.15/reference/index.md diff --git a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/use-argo.md b/spaces_versioned_docs/version-v1.12/howtos/self-hosted/use-argo.md deleted file mode 100644 index d58f7db44..000000000 --- a/spaces_versioned_docs/version-v1.12/howtos/self-hosted/use-argo.md +++ /dev/null @@ -1,228 +0,0 @@ ---- -title: Use ArgoCD Plugin -sidebar_position: 15 -description: A guide for integrating Argo with control planes in a Space. -aliases: - - /all-spaces/self-hosted-spaces/use-argo - - /deploy/disconnected-spaces/use-argo-flux - - /all-spaces/self-hosted-spaces/use-argo-flux - - /connect/use-argo ---- - - -:::info API Version Information -This guide covers the ArgoCD Plugin, a preview feature available in all supported versions (v1.9-v1.14+) for self-hosted deployments. - -For details on GitOps patterns and related features across versions, see [Automation and GitOps Overview](/spaces/howtos/automation-and-gitops/overview/). -::: - -:::important -This feature is in preview and is off by default. To enable, set `features.alpha.argocdPlugin.enabled=true` when installing Spaces: - -```bash -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - ... - --set "features.alpha.argocdPlugin.enabled=true" -``` -::: - -Spaces provides an optional plugin to assist with integrating a control plane in a Space with Argo CD. You must enable the plugin for the entire Space at Spaces install or upgrade time. The plugin's job is to propagate the connection details of each control plane in a Space to Argo CD. By default, Upbound stores these connection details in a Kubernetes secret named after the control plane. To run Argo CD across multiple namespaces, Upbound recommends enabling the `features.alpha.argocdPlugin.useUIDFormatForCTPSecrets` flag to use a UID-based format for secret names to avoid conflicts. - -:::tip -For general guidance on integrating Upbound with GitOps flows, see [GitOps with Control Planes][gitops-with-control-planes]. -::: - -## On cluster Argo CD - -If you are running Argo CD on the same cluster as the Space, run the following to enable the plugin: - - - - - - -```bash {hl_lines="3-4"} -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "features.alpha.argocdPlugin.enabled=true" \ - --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ - --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" -``` - - - - - -```bash {hl_lines="7-8"} -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "${SPACES_VERSION}" \ - --set "ingress.host=${SPACES_ROUTER_HOST}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "features.alpha.argocdPlugin.enabled=true" \ - --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ - --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ - --wait -``` - - - - - - -The important flags are: - -- `features.alpha.argocdPlugin.enabled=true` -- `features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true` -- `features.alpha.argocdPlugin.target.secretNamespace=argocd` - -The first flag enables the feature and the second indicates the namespace on the cluster where you installed Argo CD. - -Be sure to [configure Argo][configure-argo] after it's installed. - -## External cluster Argo CD - -If you are running Argo CD on an external cluster from where you installed your Space, you need to provide some extra flags: - - - - - - -```bash {hl_lines="3-7"} -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "features.alpha.argocdPlugin.enabled=true" \ - --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ - --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ - --set "features.alpha.argocdPlugin.target.externalCluster.enabled=true" \ - --set "features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster" \ - --set "features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig" -``` - - - - - -```bash {hl_lines="7-11"} -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "${SPACES_VERSION}" \ - --set "ingress.host=${SPACES_ROUTER_HOST}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "features.alpha.argocdPlugin.enabled=true" \ - --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ - --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ - --set "features.alpha.argocdPlugin.target.externalCluster.enabled=true" \ - --set "features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster" \ - --set "features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig" \ - --wait -``` - - - - - -```bash -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "${SPACES_VERSION}" \ - --set "ingress.host=${SPACES_ROUTER_HOST}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "features.alpha.argocdPlugin.enabled=true" \ - --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ - --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ - --set "features.alpha.argocdPlugin.target.externalCluster.enabled=true" \ - --set "features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster" \ - --set "features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig" \ - --wait -``` - -The extra flags are: - -- `features.alpha.argocdPlugin.target.externalCluster.enabled=true` -- `features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true` -- `features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster` -- `features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig` - -These flags tell the plugin (running in Spaces) where your Argo CD instance is. After you've done this at install-time, you also need to create a `Secret` on the Spaces cluster. This secret must contain a kubeconfig pointing to your Argo CD instance. The secret needs to be in the same namespace as the `spaces-controller`, which is `upbound-system`. - -Once you enable the plugin and configure it, the plugin automatically propagates connection details for your control planes to your Argo CD instance. You can then target the control plane and use Argo to sync Crossplane-related objects to it. - -Be sure to [configure Argo][configure-argo-1] after it's installed. - -## Configure Argo - -Argo's default configuration causes it to try to query for resource kinds that don't exist in control planes. You should configure Argo's [general configmap][general-configmap] to include the resource group/kinds which make sense in the context of control planes. example, the concept of `nodes` isn't exposed in control planes. - -To configure Argo CD, connect to the cluster where you've installed it and edit the configmap: - -```bash -kubectl edit configmap argocd-cm -n argocd -``` - -Adjust the resource inclusions and exclusions under the `data` field of the configmap: - -```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: argocd-cm - namespace: argocd -data: - resource.exclusions: | - - apiGroups: - - "*" - kinds: - - "*" - clusters: - - "*" - resource.inclusions: | - - apiGroups: - - "*" - kinds: - - Provider - - Configuration - clusters: - - "*" -``` - -The preceding configuration causes Argo to exclude syncing **all** resource group/kinds--except Crossplane `providers` and `configurations`--for **all** control planes. You're encouraged to adjust the `resource.inclusions` to include the types that make sense for your control plane, such as an `XRD` you've built with Crossplane. You're also encouraged to customize the `clusters` pattern to selectively apply these exclusions/inclusions to control planes (for example, `control-plane-prod-*`). - -## Control plane connection secrets - -To deploy control planes through Argo CD, you need to configure the `writeConnectionSecretToRef` field in your control plane spec. This field specifies where to store the control plane's `kubeconfig` and makes connection details available to Argo CD. - -### Basic Configuration - -In your control plane manifest, include the `writeConnectionSecretToRef` field: - -```yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - name: my-control-plane - namespace: my-control-plane-group -spec: - writeConnectionSecretToRef: - name: kubeconfig-my-control-plane - namespace: my-control-plane-group - # ... other control plane configuration -``` - -### Parameters - -The `writeConnectionSecretToRef` field requires two parameters: - -- `name`: A unique name for the secret containing the kubeconfig (`kubeconfig-my-control-plane`) -- `namespace`: The Kubernetes namespace where you store the secret, which must match the metadata namespace. The system copies it into the `argocd` namespace when you set the `features.alpha.argocdPlugin.target.secretNamespace=argocd` configuration parameter. - -Control plane labels automatically propagate to the connection secret, which allows you to use label selectors in Argo CD for automated discovery and management. - -This configuration enables Argo CD to automatically discover and manage resources on your control planes. - - -[gitops-with-control-planes]: /spaces/howtos/cloud-spaces/gitops -[configure-argo]: #configure-argo -[configure-argo-1]: #configure-argo -[general-configmap]: https://argo-cd.readthedocs.io/en/stable/operator-manual/argocd-cm-yaml/ diff --git a/spaces_versioned_docs/version-v1.12/overview/index.md b/spaces_versioned_docs/version-v1.12/overview/index.md deleted file mode 100644 index 7b79f6e44..000000000 --- a/spaces_versioned_docs/version-v1.12/overview/index.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Spaces Overview -sidebar_position: 0 ---- - -# Upbound Spaces - -Welcome to the Upbound Spaces documentation. This section contains comprehensive documentation for Spaces API and Spaces operations across all supported versions (v1.9 through v1.15). - -## Get Started - -- **[Concepts](/spaces/concepts/control-planes)** - Core concepts for Spaces -- **[How-To Guides](/spaces/howtos/auto-upgrade)** - Step-by-step guides for operating Spaces -- **[API Reference](/spaces/reference/)** - API specifications and resources diff --git a/spaces_versioned_docs/version-v1.13/concepts/_category_.json b/spaces_versioned_docs/version-v1.13/concepts/_category_.json deleted file mode 100644 index 4b8667e29..000000000 --- a/spaces_versioned_docs/version-v1.13/concepts/_category_.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "label": "Concepts", - "position": 2, - "collapsed": true -} - - diff --git a/spaces_versioned_docs/version-v1.13/concepts/control-planes.md b/spaces_versioned_docs/version-v1.13/concepts/control-planes.md deleted file mode 100644 index 7066343de..000000000 --- a/spaces_versioned_docs/version-v1.13/concepts/control-planes.md +++ /dev/null @@ -1,227 +0,0 @@ ---- -title: Control Planes -weight: 1 -description: An overview of control planes in Upbound ---- - - -Control planes in Upbound are fully isolated Crossplane control plane instances that Upbound manages for you. This means: - -- the underlying lifecycle of infrastructure (compute, memory, and storage) required to power your instance. -- scaling of the infrastructure. -- the maintenance of the core Crossplane components that make up a control plane. - -This lets users focus on building their APIs and operating their control planes, while Upbound handles the rest. Each control plane has its own dedicated API server connecting users to their control plane. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). - -For detailed information about Control Plane API specifications and CRD fields, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: - -## Control plane architecture - -![Managed Control Plane Architecture](/img/mcp.png) - -Along with underlying infrastructure, Upbound manages the Crossplane system components. You don't need to manage the Crossplane API server or core resource controllers because Upbound manages your control plane lifecycle from creation to deletion. - -### Crossplane API - -Each control plane offers a unified endpoint. You interact with your control plane through Kubernetes and Crossplane API calls. Each control plane runs a Kubernetes API server to handle API requests. You can make API calls in the following ways: - -- Direct calls: HTTP/gRPC -- Indirect calls: the up CLI, Kubernetes clients such as kubectl, or the Upbound Console. - -Like in Kubernetes, the API server is the hub for all communication for the control plane. All internal components such as system processes and provider controllers act as clients of the API server. - -Your API requests tell Crossplane your desired state for the resources your control plane manages. Crossplane attempts to constantly maintain that state. Crossplane lets you configure objects in the API either imperatively or declaratively. - -### Crossplane versions and features - -Upbound automatically upgrades Crossplane system components on control planes to new Crossplane versions for updated features and improvements in the open source project. With [automatic upgrades][automatic-upgrades], you choose the cadence that Upbound automatically upgrades the system components in your control plane. You can also choose to manually upgrade your control plane to a different Crossplane version. - -For detailed information on versions and upgrades, refer to the [release notes][release-notes] and the automatic upgrade documentation. If you don't enroll a control plane in a release channel, Upbound doesn't apply automatic upgrades. - -Features considered "alpha" in Crossplane are by default not supported in a control plane unless otherwise specified. - -### Hosting environments - -Every control plane in Upbound belongs to a [control plane group][control-plane-group]. Control plane groups are a logical grouping of one or more control planes with shared objects (such as secrets or backup configuration). Every group resides in a [Space][space] in Upbound, which are hosting environments for control planes. - -Think of a Space as being conceptually the same as an AWS, Azure, or GCP region. Regardless of the Space type you run a control plane in, the core experience is identical. - -## Management - -### Create a control plane - -You can create a new control plane from the Upbound Console, [up CLI][up-cli], or with Kubernetes clients such as `kubectl`. - - - - - -To use the CLI, run the following: - -```shell -up ctp create -``` - -To learn more about control plane-related commands in `up`, go to the [CLI reference][cli-reference] documentation. - - - -You can create and manage control planes declaratively in Upbound. Before you -begin, ensure you're logged into Upbound and set the correct context: - -```bash -up login -# Example: acmeco/upbound-gcp-us-west-1/default -up ctx ${yourOrganization}/${yourSpace}/${yourGroup} -```` - -```yaml -#controlplane-a.yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - name: controlplane-a -spec: - crossplane: - autoUpgrade: - channel: Rapid -``` - -```bash -kubectl apply -f controlplane-a.yaml -``` - - - - - -### Connect directly to your control plane - -Each control plane offers a unified endpoint. You interact with your control plane through Kubernetes and Crossplane API calls. Each control plane runs a Kubernetes API server to handle API requests. - -You can connect to a control plane's API server directly via the up CLI. Use the [`up ctx`][up-ctx] command to set your kubeconfig's current context to a control plane: - -```shell -# Example: acmeco/upbound-gcp-us-west-1/default/ctp1 -up ctx ${yourOrganization}/${yourSpace}/${yourGroup}/${yourControlPlane} -``` - -To disconnect from your control plane and revert your kubeconfig's current context to the previous entry, run the following: - -```shell -up ctx .. -``` - -You can also generate a `kubeconfig` file for a control plane with [`up ctx -f`][up-ctx-f]. - -```shell -up ctx ${yourOrganization}/${yourSpace}/${yourGroup}/${yourControlPlane} -f - > ctp-kubeconfig.yaml -``` - -:::tip -To learn more about how to use `up ctx` to navigate different contexts in Upbound, read the [CLI documentation][cli-documentation]. -::: - -## Configuration - -When you create a new control plane, Upbound provides you with a fully isolated instance of Crossplane. Configure your control plane by installing packages that extend its capabilities, like to create and manage the lifecycle of new types of infrastructure resources. - -You're encourage to install any available Crossplane package type (Providers, Configurations, Functions) available in the [Upbound Marketplace][upbound-marketplace] on your control planes. - -### Install packages - -Below are a couple ways to install Crossplane packages on your control plane. - - - - - - -Use the `up` CLI to install Crossplane packages from the [Upbound Marketplace][upbound-marketplace-1] on your control planes. Connect directly to your control plane via `up ctx`. Then, to install a provider: - -```shell -up ctp provider install xpkg.upbound.io/upbound/provider-family-aws -``` - -To install a Configuration: - -```shell -up ctp configuration install xpkg.upbound.io/upbound/platform-ref-aws -``` - -To install a Function: - -```shell -up ctp function install xpkg.upbound.io/crossplane-contrib/function-kcl -``` - - -You can use kubectl to directly apply any Crossplane manifest. Below is an example for installing a Crossplane provider: - -```yaml -cat < - - - -For production-grade scenarios, it's recommended you configure your control plane declaratively via Git plus a Continuous Delivery (CD) Engine such as Argo. guidance on this topic, read [GitOps with control planes][gitops-with-control-planes]. - - - - - - -### Configure Crossplane ProviderConfigs - -#### ProviderConfigs with OpenID Connect - -Use OpenID Connect (`OIDC`) to authenticate to Upbound control planes without credentials. OIDC lets your control plane exchange short-lived tokens directly with your cloud provider. Read how to [connect control planes to external services][connect-control-planes-to-external-services] to learn more. - -#### Generic ProviderConfigs - -The Upbound Console doesn't allow direct editing of ProviderConfigs that don't support `Upbound` authentication. To edit these ProviderConfigs on your control plane, connect to the control plane directly by following the instructions in the previous section and using `kubectl`. - -### Configure secrets - -Upbound gives users the ability to configure the synchronization of secrets from external stores into control planes. Configure this capability at the group-level, explained in the [Spaces documentation][spaces-documentation]. - -### Configure backups - -Upbound gives users the ability to configure backup schedules, take impromptu backups, and conduct self-service restore operations. Configure this capability at the group-level, explained in the [Spaces documentation][spaces-documentation-1]. - -### Configure telemetry - - -Upbound gives users the ability to configure the collection of telemetry (logs, metrics, and traces) in their control planes. Using Upbound's built-in [OTEL][otel] support, you can stream this data out to your preferred observability solution. Configure this capability at the group-level, explained in the [Spaces documentation][spaces-documentation-2]. - - - -[automatic-upgrades]: /spaces/howtos/auto-upgrade -[release-notes]: https://github.com/upbound/universal-crossplane/releases -[control-plane-group]: /spaces/concepts/groups -[space]: /spaces/overview -[up-cli]: /reference/cli-reference -[cli-reference]: /reference/cli-reference -[up-ctx]: /reference/cli-reference -[up-ctx-f]: /reference/cli-reference -[cli-documentation]: /manuals/cli/concepts/contexts -[upbound-marketplace]: https://marketplace.upbound.io -[upbound-marketplace-1]: https://marketplace.upbound.io -[gitops-with-control-planes]: /spaces/howtos/cloud-spaces/gitops -[connect-control-planes-to-external-services]: /manuals/platform/howtos/oidc -[spaces-documentation]: /spaces/howtos/secrets-management -[spaces-documentation-1]: /spaces/howtos/backup-and-restore -[otel]: https://otel.com -[spaces-documentation-2]: /spaces/howtos/observability diff --git a/spaces_versioned_docs/version-v1.13/concepts/deployment-modes.md b/spaces_versioned_docs/version-v1.13/concepts/deployment-modes.md deleted file mode 100644 index f5e718f88..000000000 --- a/spaces_versioned_docs/version-v1.13/concepts/deployment-modes.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: Deployment Modes -sidebar_position: 10 -description: An overview of deployment modes for Spaces ---- - -Upbound Spaces can be deployed and used in a variety of modes: - -- **Cloud Spaces:** Multi-tenant Upbound-hosted, Upbound-managed Space environment. Cloud Spaces provide a typical SaaS experience. -- **[Dedicated Spaces][dedicated-spaces]:** Single-tenant Upbound-hosted, Upbound-managed Space environment. Dedicated Spaces provide a SaaS experience, with additional isolation guarantees that your workloads run in a fully isolated context. -- **[Managed Spaces][managed-spaces]:** Single-tenant customer-hosted, Upbound-managed Space environment. Managed Spaces provide a SaaS-like experience, with additional guarantees of all hosting infrastructure being served from your own cloud account. -- **[Self-Hosted Spaces][self-hosted-spaces]:** Single-tenant customer-hosted, customer-managed Space environment. This is a fully self-hosted, self-managed software experience for using Spaces. Upbound delivers the Spaces software and you run it yourself. - -The Upbound platform uses a federated model to connect each Space back to a -central service called the [Upbound Console][console], which is deployed and -managed by Upbound. - -By default, customers have access to a set of Cloud Spaces. - -## Supported clouds - -You can use host Upbound Spaces on Amazon Web Services (AWS), Microsoft Azure, -and Google Cloud Platform (GCP). Regardless of the hosting platform, you can use -Spaces to deploy control planes that manage the lifecycle of your resources. - -## Supported regions - -This table lists the cloud service provider regions supported by Upbound. - -### GCP - -| Region | Location | -| --- | --- | -| `us-west-1` | Western US (Oregon) -| `us-central-1` | Central US (Iowa) -| `eu-west-3` | Eastern Europe (Frankfurt) - -### AWS - -| Region | Location | -| --- | --- | -| `us-east-1` | Eastern US (Northern Virginia) - -### Azure - -| Region | Location | -| --- | --- | -| `us-east-1` | Eastern US (Iowa) - -[dedicated-spaces]: /spaces/howtos/cloud-spaces/dedicated-spaces-deployment -[managed-spaces]: /spaces/howtos/self-hosted/managed-spaces-deployment -[self-hosted-spaces]: /spaces/howtos/self-hosted/self-hosted-spaces-deployment -[console]: /manuals/console/upbound-console/ diff --git a/spaces_versioned_docs/version-v1.13/concepts/groups.md b/spaces_versioned_docs/version-v1.13/concepts/groups.md deleted file mode 100644 index d2ccacdb3..000000000 --- a/spaces_versioned_docs/version-v1.13/concepts/groups.md +++ /dev/null @@ -1,115 +0,0 @@ ---- -title: Control Plane Groups -sidebar_position: 2 -description: An introduction to the Control Plane Groups in Upbound -plan: "enterprise" ---- - - - -In Upbound, Control Plane Groups (or just, 'groups') are a logical grouping of one or more control planes with shared resources like [secrets][secrets] or [backups][backups]. It's a mechanism for isolating these groups of resources within a single [Space][space]. All role-based access control in Upbound happens at the control plane group-level. - -## When to use multiple groups - -You should use groups in environments where there's a need to have Crossplane manage infrastructure across multiple cloud accounts or projects. For users who only need to deploy and manage resources in a couple cloud accounts, you shouldn't need to think about groups at all. - -Groups are a way to divide access in Upbound between multiple teams. Think of a group as being analogous to a Kubernetes _namespace_. - -## The 'default' group - -Every Cloud Space in Upbound has a group named _default_ available. - -## Working with groups - -### View groups - -You can list groups in a Space using: - -```shell -up group list -``` - -If you're operating in a single-tenant Space and have access to the underlying cluster, you can list namespaces that have the group label: - -```shell -kubectl get namespaces -l spaces.upbound.io/group=true -``` - -### Set the group for a request - -Several commands in _up_ have a group context. To set the group for a request, use the `--group` flag: - -```shell -up ctp list --group=team1 -``` -```shell -up ctp create new-ctp --group=team2 -``` - -### Set the group preference - -The _up_ CLI operates upon a single [Upbound context][upbound-context]. Whatever context gets set is then used as the preference for other commands. An Upbound context is capable of pointing at a variety of altitudes: - -1. A Space in Upbound -2. A group within a Space -3. a control plane within a group - -To set the group preference, use `up ctx` to choose a group as your preferred Upbound context. For example: - -```shell -# This sets the context for the up CLI to the default group in an Upbound-managed Cloud Space (gcp-us-west-1) for an organization called 'acmeco' -up ctx acmeco/upbound-gcp-us-west-1/default/ -``` - -### Create a group - -To create a group, login to Upbound and set your context to your desired Space: - -```shell -up login -up ctx '/' -# Example: up ctx acmeco/upbound-gcp-us-west-1 -``` - - -Create a group: - -```shell -up group create my-new-group -``` - -### Delete a group - -To delete a group, login to Upbound and set your context to your desired Space: - -```shell -up login -up ctx '/' -# Example: up ctx acmeco/upbound-gcp-us-west-1 -``` - -Delete a group: - -```shell -up group delete my-new-group -``` - -### Protected groups - -Once a control plane gets created in a group, Upbound enforces a protection policy on the group. Upbound prevents accidental deletion of the group. To delete a group that has control planes in it, you should first delete all control planes in the group. - -## Groups in the context of single-tenant Spaces - -Upbound offers a variety of deployment models to use the product. If you deploy your own single-tenant Upbound Space (whether connected or disconnected), you're self-hosting Upbound software in a Kubernetes cluster. In these environments, a control plane group maps to a corresponding namespace in the cluster which hosts the Space. - -Most Kubernetes clusters come with some set of predefined namespaces. Because a group maps to a corresponding Kubernetes namespace, whenever a group gets created, there too must be a Kubernetes namespace accordingly. When the Spaces software is newly installed, no groups exist. You _can_ elevate a Kubernetes namespace to become a group by doing the following: - -1. Creating a group with the same name as a preexisting Kubernetes namespace -2. Creating a control plane in a preexisting Kubernetes namespace -3. Labeling a Kubernetes namespace with the label `spaces.upbound.io/group=true` - - -[secrets]: /spaces/howtos/secrets-management -[backups]: /spaces/howtos/self-hosted/workload-id/backup-restore-config/ -[space]: /spaces/overview -[upbound-context]: /manuals/cli/concepts/contexts diff --git a/spaces_versioned_docs/version-v1.13/howtos/_category_.json b/spaces_versioned_docs/version-v1.13/howtos/_category_.json deleted file mode 100644 index d3a8547aa..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/_category_.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "label": "How-tos", - "position": 3, - "collapsed": true -} - - diff --git a/spaces_versioned_docs/version-v1.13/howtos/api-connector.md b/spaces_versioned_docs/version-v1.13/howtos/api-connector.md deleted file mode 100644 index a14468f52..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/api-connector.md +++ /dev/null @@ -1,413 +0,0 @@ ---- -title: API Connector -weight: 90 -description: Connect Kubernetes clusters to remote Crossplane control planes for resource synchronization -aliases: - - /api-connector - - /concepts/api-connector ---- -:::info API Version Information -This guide covers API Connector, a preview feature available in all supported versions (v1.9-v1.14+). - -For related API specifications and available resources, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: - -:::warning -API Connector is currently in **Preview**. The feature is under active -development and subject to breaking changes. Use for testing and evaluation -purposes only. -::: - -API Connector enables seamless integration between Kubernetes application -clusters consuming APIs and remote Crossplane control planes providing and -reconciling APIs. - -You can use the API Connector to decouple where Crossplane is running (for -example in an Upbound control plane), and where APIs are consumed -(for example in an existing Kubernetes cluster). This gives you flexibility and -consistency in your control plane operations. - - - -Unlike the [Control Plane Connector](ctp-connector.md) which offers only -coarse-grained connectivity between app clusters and a control plane, API -connector offers fine-grained configuration of which APIs get offered along with -multi-cluster connectivity. - -## Architecture overview - -![API Connector Architecture](/img/api-connector.png) - -API Connector uses a **provider-consumer** model: - -- **Provider control plane**: The Upbound control plane that provides APIs and manages infrastructure. -- **Consumer cluster**: Any Kubernetes cluster where its users wants to use APIs provided by the provider control plane, without having to run Crossplane. API connector gets installed in the consumer cluster, and bidirectionally syncs API objects to the provider. - -### Key components - -**Custom Resource Definitions (CRDs)**: - - -- `ClusterConnection`: Establishes a connection from the consumer to the provider cluster. Pulls bindable CRD APIs from the provider into the consumer cluster for use. - -- `ClusterAPIBinding`: Instructs API connector to sync all API objects cluster-wide with a given API group to a given provider cluster. -- `APIBinding`: Namespaced version of `ClusterAPIBinding`. Instructs API connector to sync API objects within a given namespace and with a given API group to a given provider cluster. - - -## Prerequisites - -Before using API Connector, ensure: - -1. **Consumer cluster** has network access to the provider control plane -1. You have an license to use API connector. If you are unsure, [contact Upbound][contact] or your sales representative. - -This guide walks through how to automate connecting your cluster to an Upbound -control plane. You can also manually configure the API Connector. - -## Publishing APIs in the provider cluster - - - - -First, log in to your provider control plane, and choose which CRD APIs you want -to make accessible to the consumer cluster's. API connector only syncs -these "bindable" CRDs. - - - - - - -Use the `up` CLI to login: - -```bash -up login -``` - -Connect to your control plane: - -```bash -up ctx -``` - -Check what CRDs are available: - -```bash -kubectl get crds -``` - - -Label all CRDs you want to publish with the bindable label: - - -```bash -kubectl label crd 'connect.upbound.io/bindable'='true' --overwrite -``` - - - - -Change context to the provider cluster: -```bash -kubectl config set-context -``` - -Check what CRDs are available: -```bash -kubectl get crds -``` - - -Label all CRDs you want to publish with the bindable label - -```bash -kubectl label crd 'connect.upbound.io/bindable'='true' --overwrite -``` - - - -## Installation - - - - -The up CLI provides the simplest installation method with automatic -configuration: - -Make sure the current Kubeconfig context is set to the **provider control plane** -```bash -up ctx - -up controlplane api-connector install --consumer-kubeconfig [OPTIONS] -``` - -The command: -1. creates a Robot account (named ``) in the Upbound Cloud organization ``, -1. Gives the created robot account `admin` permissions to the provider control plane `` -1. Generates a JWT token for the robot account, and stores it in a Kubernetes Secret in the consumer cluster. -1. Installs the API connector Helm chart in the consumer cluster. -1. Creates a `ClusterConnection` object in the consumer cluster, referring to the newly generated Secret, so that API connector can authenticate successfully to the provider control plane. -1. API connector pulls all published CRDs from the previous step into the consumer cluster. - -**Example**: -```bash -up controlplane api-connector install \ - --consumer-kubeconfig ~/.kube/config \ - --consumer-context my-cluster \ - --upbound-token -``` - -This command uses provided token to authenticate with the **Provider control plane** -and create a `ClusterConnection` resource in the **Consumer cluster** to connect to the -**Provider control plane**. - -**Key Options**: -- `--consumer-kubeconfig`: Path to consumer cluster kubeconfig (required) -- `--consumer-context`: Context name for consumer cluster (required) -- `--name`: Custom name for connection resources (optional) -- `--upbound-token`: API token for authentication (optional) -- `--upgrade`: Upgrade existing installation (optional) -- `--version`: Specific version to install (optional) - - - - -For manual installation or custom configurations: - -```bash -helm upgrade --install api-connector oci://xpkg.upbound.io/spaces-artifacts/api-connector \ - --namespace upbound-system \ - --create-namespace \ - --version \ - --set consumerClusterDisplayName= -``` - -### Authentication methods - -API Connector supports two authentication methods: - - - - -For Upbound Spaces integration: - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: spaces-secret - namespace: upbound-system -type: Opaque -stringData: - token: - organization: - spacesBaseURL: - controlPlaneGroupName: - controlPlaneName: -``` - - - -For direct cluster access: - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: provider-kubeconfig - namespace: upbound-system -type: Opaque -data: - kubeconfig: -``` - - - - -### Connection setup - -Create a `ClusterConnection` to establish connectivity: - - - - -```yaml -apiVersion: connect.upbound.io/v1alpha1 -kind: ClusterConnection -metadata: - name: spaces-connection - namespace: upbound-system -spec: - secretRef: - kind: UpboundRobotToken - name: spaces-secret - namespace: upbound-system - crdManagement: - pullBehavior: Pull -``` - - - - -```yaml -apiVersion: connect.upbound.io/v1alpha1 -kind: ClusterConnection -metadata: - name: provider-connection - namespace: upbound-system -spec: - secretRef: - kind: KubeConfig - name: provider-kubeconfig - namespace: upbound-system - crdManagement: - pullBehavior: Pull -``` - - - - - - - -### Configuration - -Bind APIs to make them available in your consumer cluster: - -```yaml -apiVersion: connect.upbound.io/v1alpha1 -kind: ClusterAPIBinding -metadata: - name: -spec: - connectionRef: - kind: ClusterConnection - name: # Or --name value -``` - - - - -The `ClusterAPIBinding` name must match the **Resource.Group** (name of the CustomResourceDefinition) of the CRD you want to bind. - - - - -## Usage example - -After configuration, you can create API objects (in the consumer cluster) that -will be synchronized to the provider cluster: - -```yaml -apiVersion: nop.example.org/v1alpha1 -kind: NopResource -metadata: - name: my-resource - namespace: default -spec: - coolField: "Synchronized resource" - compositeDeletePolicy: Foreground -``` - -Verify the resource status: - -```bash -kubectl get nopresource my-resource -o yaml - -``` -When the `APIBound=True` condition is present, it means that the API object has -been synced to the provider cluster, and is being reconciled there. Whenever the -API object in the provider cluster gets status updates (for example -`Ready=True`), that status is synced back to the consumer cluster. - -Switch contexts to the provider cluster to see the API object being created: - -```bash -up ctx -# or kubectl config set-context -``` - -```bash -kubectl get nopresource my-resource -o yaml -``` - -Note that in the provider cluster, the API object is labeled with information on -where the API object originates from, and `connect.upbound.io/managed=true`. - -## Monitoring and troubleshooting - -### Check connection status - -```bash -kubectl get clusterconnection -``` - -Expected output: -``` -NAME STATUS MESSAGE -spaces-connection Ready Provider controlplane is available -``` - -### View available APIs - -```bash -kubectl get clusterconnection spaces-connection -o jsonpath='{.status.offeredAPIs[*].name}' -``` - -### Check API binding status - -```bash -kubectl get clusterapibinding -``` - -### Debug resource synchronization - -```bash -kubectl describe -``` - -## Removal - -### Using the up CLI - -```bash -up controlplane api-connector uninstall \ - --consumer-kubeconfig ~/.kube/config \ - --all -``` - -The `--all` flag removes all resources including connections and secrets. -Without the flag, only runtime related resources won't be removed. - -:::note -Uninstall doesn't remove any API objects in the provider control plane. If you -want to clean up all API objects there, delete all API objects from the consumer -cluster before API connector uninstallation, and wait for the objects to get -deleted. -::: - - -### Using Helm - -```bash -helm uninstall api-connector -n upbound-system -``` - -## Limitations - -- **Preview feature**: Subject to breaking changes. Not yet production grade. -- **CRD updates**: CRDs are pulled once but not automatically updated. If multiple Crossplane clusters offer the same CRD API, API changes must be synchronized out of band, for example using a [Crossplane Configuration](https://docs.crossplane.io/latest/packages/). -- **Network requirements**: Consumer cluster must have direct network access to provider cluster. -- **Wide permissions needed in consumer cluster**: Because the API connector doesn't know up front the names of the APIs it needs to reconcile, it currently runs with full "root" privileges in the consumer cluster. - -- **Connector polling**: API Connector checks for drift between the consumer and provider cluster - periodically through polling. The poll interval can be changed with the `pollInterval` Helm value. - - -## Advanced configuration - -### Multiple connections - -You can connect to multiple provider clusters simultaneously by creating multiple `ClusterConnection` resources with different names and configurations. - -[contact]: https://www.upbound.io/contact-us diff --git a/spaces_versioned_docs/version-v1.13/howtos/auto-upgrade.md b/spaces_versioned_docs/version-v1.13/howtos/auto-upgrade.md deleted file mode 100644 index 249056fb4..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/auto-upgrade.md +++ /dev/null @@ -1,131 +0,0 @@ ---- -title: Automatically upgrade control planes -sidebar_position: 50 -description: How to configure automatic upgrades of Crossplane in a control plane -plan: "standard" ---- - - - -Upbound Spaces can automatically upgrade the version of Upbound Crossplane in managed control plane instances. You can edit the `spec.crossplane.autoUpgrade` field in your `ControlPlane` specification with the available release channels below. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). The `spec.crossplane.autoUpgrade` field has been available since v1.9. - -For ControlPlane API specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: - -| Channel | Description | Example | -|------------|-----------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------| -| **None** | Disables auto upgrades. | _Uses version specified in `spec.crossplane.version`._ | -| **Patch** | Upgrades to the latest supported patch release. | _Control plane version 1.12.2-up.2 auto upgrades to 1.12.3-up.1 upon release._ | -| **Stable** | Default setting. Upgrades to the latest supported patch release on minor version _N-1_ where N is the latest supported minor version. | _If latest supported minor version is 1.14, auto upgrades to latest patch - 1.13.2-up.3_ | -| **Rapid** | Upgrades to the latest supported patch release on the latest supported minor version. | _If the latest supported minor version is 1.14, auto upgrades to the latest patch of minor version. 1.14 upgrades to 1.14.5-up.1_ | - - -:::warning - -The `Rapid` channel is only recommended for users willing to accept the risk of new features and potentially breaking changes. - -::: - -## Examples - -The specs below are examples of how to edit the `autoUpgrade` channel in your `ControlPlane` specification. - -To run a control plane with the `Rapid` auto upgrade channel, your spec should look like this: - -```yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - name: example-ctp -spec: - crossplane: - autoUpgrade: - channel: Rapid - writeConnectionSecretToRef: - name: kubeconfig-example-ctp -``` - -To run a control plane with a pinned version of Crossplane, specify in the `version` field: - -```yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - name: example-ctp -spec: - crossplane: - version: 1.14.3-up.1 - autoUpgrade: - channel: None - writeConnectionSecretToRef: - name: kubeconfig-example-ctp -``` - -## Supported Crossplane versions - -Spaces supports the three [preceding minor versions][preceding-minor-versions] from the last supported minor version. example, if the last supported minor version is `1.14`, minor versions `1.13` and `1.12` are also supported. Versions older than the three most recent minor versions aren't supported. Only supported Crossplane versions are valid specifications for new control planes. - -Current Crossplane version support by Spaces version: - -| Spaces Version | Crossplane Version Min | Crossplane Version Max | -|:--------------:|:----------------------:|:----------------------:| -| 1.2 | 1.13 | 1.15 | -| 1.3 | 1.13 | 1.15 | -| 1.4 | 1.14 | 1.16 | -| 1.5 | 1.14 | 1.16 | -| 1.6 | 1.14 | 1.16 | -| 1.7 | 1.14 | 1.16 | -| 1.8 | 1.15 | 1.17 | -| 1.9 | 1.16 | 1.18 | -| 1.10 | 1.16 | 1.18 | -| 1.11 | 1.16 | 1.18 | -| 1.12 | 1.17 | 1.19 | - - -Upbound offers extended support for all installed Crossplane versions released within a 12 month window since the last Spaces release. Contact your Upbound sales representative for more information on version support. - - -:::warning - -If the auto upgrade channel is `Stable` or `Rapid`, the Crossplane version will always stay within the support window after auto upgrade. If set to `Patch` or `None`, the minor version may be outside the support window. You are responsible for upgrading to a supported version - -::: - -To view the support status of a control plane instance, use `kubectl get ctp`. - -```bash -kubectl get ctp -NAME CROSSPLANE VERSION SUPPORTED READY MESSAGE AGE -example-ctp 1.13.2-up.3 True True 31m - -``` - -Unsupported versions return `SUPPORTED: False`. - -```bash -kubectl get ctp -NAME CROSSPLANE VERSION SUPPORTED READY MESSAGE AGE -example-ctp 1.11.5-up.1 False True 31m - -``` - -For more information, use the `-o yaml` flag to return more information. - -```bash -kubectl get controlplanes.spaces.upbound.io example-ctp -o yaml -status: -conditions: -... -- lastTransitionTime: "2024-01-23T06:36:10Z" - message: Crossplane version 1.11.5-up.1 is outside of the support window. - Oldest supported minor version is 1.12. - reason: UnsupportedCrossplaneVersion - status: "False" - type: Supported -``` - - -[preceding-minor-versions]: /reference/usage/lifecycle/#maintenance-and-updates diff --git a/spaces_versioned_docs/version-v1.13/howtos/automation-and-gitops/_category_.json b/spaces_versioned_docs/version-v1.13/howtos/automation-and-gitops/_category_.json deleted file mode 100644 index b65481af6..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/automation-and-gitops/_category_.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "label": "Automation & GitOps", - "position": 11, - "collapsed": true, - "customProps": { - "plan": "business" - } -} diff --git a/spaces_versioned_docs/version-v1.13/howtos/automation-and-gitops/overview.md b/spaces_versioned_docs/version-v1.13/howtos/automation-and-gitops/overview.md deleted file mode 100644 index 57eeb15fc..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/automation-and-gitops/overview.md +++ /dev/null @@ -1,138 +0,0 @@ ---- -title: Automation and GitOps Overview -sidebar_label: Overview -sidebar_position: 1 -description: Guide to automating control plane deployments with GitOps and Argo CD -plan: "business" ---- - -Automating control plane deployments with GitOps enables declarative, version-controlled infrastructure management. This section covers integrating GitOps workflows with Upbound control planes using Argo CD and related tools. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). GitOps patterns are compatible across all versions of Upbound Spaces. - -For details on deployment models and their differences, see the [Deployment Modes](../../concepts/deployment-modes.md) concept guide. version-specific features, see the . -::: - -## What is GitOps? - -GitOps is an approach for managing infrastructure by: -- **Declaratively describing** desired system state in Git -- **Using controllers** to continuously reconcile actual state with desired state -- **Treating Git as the source of truth** for all configuration and deployments - -Upbound control planes are fully compatible with GitOps patterns and we strongly recommend integrating GitOps in the platforms you build on Upbound. - -## Key Concepts - -### Argo CD -[Argo CD](https://argo-cd.readthedocs.io/) is a popular Kubernetes-native GitOps controller. It continuously monitors Git repositories and automatically applies changes to your infrastructure when commits are detected. - -### Deployment Models - -The way you configure GitOps depends on your deployment model: - -| Aspect | Cloud Spaces | Self-Hosted Spaces | -|--------|--------------|-------------------| -| **Access Method** | Upbound API with tokens | Kubernetes native (secrets/kubeconfig) | -| **Configuration** | Kubeconfig via `up` CLI | Control plane connection secrets | -| **Setup Complexity** | More involved (API integration) | Simpler (native Kubernetes) | -| **Typical Use Case** | Managing Upbound resources | Managing workloads on control planes | - -## Getting Started - -**Choose your path based on your deployment model:** - -###. Cloud Spaces -If you're using Upbound Cloud Spaces (Dedicated or Managed): -1. Start with [GitOps with Upbound Control Planes](../cloud-spaces/gitops-on-upbound.md) -2. Learn how to integrate Argo CD with Cloud Spaces -3. Manage both control plane infrastructure and Upbound resources declaratively - -###. Self-Hosted Spaces -If you're running self-hosted Spaces: -1. Start with [GitOps with ArgoCD in Self-Hosted Spaces](../self-hosted/gitops-with-argocd.md) -2. Learn how to configure control plane connection secrets -3. Manage workloads deployed to your control planes - -## Common Workflows - -### Workflow 1: Managing Control Planes with GitOps -Create and manage control planes themselves declaratively using provider-kubernetes: - -```yaml -apiVersion: kubernetes.crossplane.io/v1alpha2 -kind: Object -metadata: - name: my-controlplane -spec: - forProvider: - manifest: - apiVersion: spaces.upbound.io/v1beta1 - kind: ControlPlane - # ... control plane configuration -``` - -### Workflow 2: Managing Workloads on Control Planes -Deploy applications and resources to control planes using standard Kubernetes GitOps patterns: - -```yaml -apiVersion: v1 -kind: Namespace -metadata: - name: my-app ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: my-app - namespace: my-app -# ... deployment configuration -``` - -### Workflow 3: Managing Upbound Resources -Use provider-upbound to manage Upbound IAM and repository resources: - -- Teams -- Robots and their team memberships -- Repositories and permissions - -## Advanced Topics - -### Argo CD Plugin for Upbound -Learn more in the [ArgoCD Plugin guide](../self-hosted/use-argo.md) for enhanced integration with self-hosted Spaces. - -### Declarative Control Plane Creation -See [Declaratively create control planes](../self-hosted/declarative-ctps.md) for advanced automation patterns. - -### Consuming Control Plane APIs -Understand how to [consume control plane APIs in your app cluster](../mcp-connector-guide.md) with Argo CD. - -## Prerequisites - -Before implementing GitOps with control planes, ensure you have: - -**For Cloud Spaces:** -- Access to Upbound Cloud Spaces -- `up` CLI installed and configured -- API token with appropriate permissions -- Argo CD or similar GitOps controller running -- Familiarity with Kubernetes RBAC - -**For Self-Hosted Spaces:** -- Self-hosted Spaces deployed and running -- Argo CD deployed in your infrastructure -- Kubectl access to the cluster hosting Spaces -- Understanding of control plane architecture - -## Next Steps - -1. **Choose your deployment model** above -2. **Review the relevant getting started guide** -3. **Set up your GitOps controller** (Argo CD) -4. **Deploy your first automated control plane** -5. **Explore advanced topics** as needed - -:::tip -Start with simple deployments to test your GitOps workflow before moving to production. Use [simulations](../simulations.md) to preview changes before applying them. -::: diff --git a/spaces_versioned_docs/version-v1.13/howtos/backup-and-restore.md b/spaces_versioned_docs/version-v1.13/howtos/backup-and-restore.md deleted file mode 100644 index 3b8d026cb..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/backup-and-restore.md +++ /dev/null @@ -1,530 +0,0 @@ ---- -title: Backup and restore -sidebar_position: 13 -description: Configure and manage backups in your Upbound Space. -plan: "enterprise" ---- - - - -Upbound's _Shared Backups_ is a built-in backup and restore feature. Shared Backups lets you configure automatic schedules for taking snapshots of your control planes. You can restore data from these backups by making new control planes. This guide explains how to use Shared Backups for disaster recovery or upgrade scenarios. - -:::info API Version Information & Available Versions -This guide applies to **all supported versions** (v1.9-v1.15+). - -**Select your API version**: -- [Spaces API v1.15 (Latest)](../../../next/reference/apis/spaces-api/) -- [Spaces API v1.14](../../../1.14/reference/apis/spaces-api/) -- [Spaces API v1.13](../../../1.13/reference/apis/spaces-api/) -- [Spaces API v1.12](../../../1.12/reference/apis/spaces-api/) -- [Spaces API v1.11](../../../1.11/reference/apis/spaces-api/) -- [Spaces API v1.10](../../../1.10/reference/apis/spaces-api/) -- [Spaces API v1.9](../../../1.9/reference/apis/spaces-api/) - -For version support policy, see . version compatibility details, see the . -::: - -## Benefits - -The Shared Backups feature provides the following benefits: - -* Automatic backups for control planes without any operational overhead -* Backup schedules for multiple control planes in a group -* Shared Backups are available across all hosting environments of Upbound (Disconnected, Connected or Cloud Spaces) - - -## Configure a Shared Backup Config - - -[SharedBackupConfig][sharedbackupconfig] is a [group-scoped][group-scoped] resource. You should create them in a group containing one or more control planes. This resource configures the storage details and provider. Whenever a backup executes (either by schedule or manually initiated), it references a SharedBackupConfig to tell it where store the snapshot. - - -### Backup config provider - - -The `spec.objectStorage.provider` and `spec.objectStorage.config` fields configures: - -* The object storage provider -* The path to the provider -* The credentials needed to communicate with the provider - -You can only set one provider. Upbound currently supports AWS, Azure, and GCP as providers. - - -`spec.objectStorage.config` is a freeform map of configuration options for the object storage provider. See [Thanos object storage][thanos-object-storage] for more information on the formats for each supported cloud provider. `spec.bucket` and `spec.provider` overrides the required values in the config. - - - -#### AWS as a storage provider - -:::important -For Cloud Spaces, static credentials are currently the only supported auth method. -::: - -This example demonstrates how to use AWS as a storage provider for your backups: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupConfig -metadata: - name: default - namespace: default -spec: - objectStorage: - provider: AWS - bucket: spaces-backup-bucket - config: - endpoint: s3.eu-west-2.amazonaws.com - region: eu-west-2 - credentials: - source: Secret - secretRef: - name: bucket-creds - key: creds -``` - - -This example assumes you've already created an S3 bucket called "spaces-backup-bucket" in AWS `eu-west-2` region. The account credentials to access the bucket should exist in a secret of the same namespace as the Shared Backup Config. - -#### Azure as a storage provider - -:::important -For Cloud Spaces, static credentials are currently the only supported auth method. -::: - -This example demonstrates how to use Azure as a storage provider for your backups: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupConfig -metadata: - name: default - namespace: default -spec: - objectStorage: - provider: Azure - bucket: upbound-backups - config: - storage_account: upbackupstore - container: upbound-backups - endpoint: blob.core.windows.net - credentials: - source: Secret - secretRef: - name: bucket-creds - key: creds -``` - - -This example assumes you've already created an Azure storage account called `upbackupstore` and blob `upbound-backups`. The storage account key to access the blob should exist in a secret of the same namespace as the Shared Backup Config. - - -#### GCP as a storage provider - -:::important -For Cloud Spaces, static credentials are currently the only supported auth method. -::: - -This example demonstrates how to use Google Cloud Storage as a storage provider for your backups: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupConfig -metadata: - name: default - namespace: default -spec: - objectStorage: - provider: GCP - bucket: spaces-backup-bucket - credentials: - source: Secret - secretRef: - name: bucket-creds - key: creds -``` - - -This example assumes you've already created a Cloud bucket called "spaces-backup-bucket" and a service account with access to this bucket. The key file should exist in a secret of the same namespace as the Shared Backup Config. - - -## Configure a Shared Backup Schedule - - -[SharedBackupSchedule][sharedbackupschedule] is a [group-scoped][group-scoped-1] resource. You should create them in a group containing one or more control planes. This resource defines a backup schedule for control planes within its corresponding group. - -Below is an example of a Shared Backup Schedule that takes backups every day of all control planes having `environment: production` labels: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: daily-schedule - namespace: default -spec: - schedule: "@daily" - configRef: - kind: SharedBackupConfig - name: default - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production -``` - -### Define a schedule - -The `spec.schedule` field is a [Cron-formatted][cron-formatted] string. Some common examples are below: - - -| Entry | Description | -| ----------------- | ------------------------------------------------------------------------------------------------- | -| `@hourly` | Run once an hour. | -| `@daily` | Run once a day. | -| `@weekly` | Run once a week. | -| `0 0/4 * * *` | Run every 4 hours. | -| `0/15 * * * 1-5` | Run every fifteenth minute on Monday through Friday. | -| `@every 1h30m10s` | Run every 1 hour, 30 minutes, and 10 seconds. Hour is the largest measurement of time for @every. | - - -### Exclude resources from the backup - -The `spec.excludedResources` field is an array of resource names to exclude from each backup. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: daily-schedule -spec: - excludedResources: - - "xclusters.aws.platformref.upbound.io" - - "xdatabase.aws.platformref.upbound.io" - - "xrolepolicyattachment.iam.aws.crossplane.io" -``` - -:::warning -You must specify resource names in lowercase "resource.group" format (for example, `xclusters.aws.platformref.upbound.io`). Using only the resource kind (for example, `XCluster`) isn't supported. -::: - -### Suspend a schedule - -Use `spec.suspend` field to suspend the schedule. It creates no new backups, but allows running backups to complete. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: daily-schedule -spec: - suspend: true -``` - -### Set the time to live - -Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: daily-schedule -spec: - ttl: 168h # Backup is garbage collected after 7 days -``` -:::tip -By default, this setting doesn't delete uploaded files. Review the next section to define -the deletion policy. -::: - -### Define the deletion policy - -Set the `spec.deletionPolicy` to define backup deletion actions, including the -deletion of the backup file from the bucket. The Deletion Policy value defaults -to `Orphan`. Set it to `Delete` to remove uploaded files in the bucket. more -information on the backup and restore process, review the [Spaces API -documentation][spaces-api-documentation]. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: daily-schedule -spec: - ttl: 168h # Backup is garbage collected after 7 days - deletionPolicy: Delete # Defaults to Orphan -``` - -### Garbage collect backups when the schedule gets deleted - -Set the `spec.useOwnerReferencesInBackup` to garbage collect associated backups when a shared schedule gets deleted. If set to true, backups are garbage collected when the schedule gets deleted. - -### Control plane selection - -To configure which control planes in a group you want to create a backup schedule for, use the `spec.controlPlaneSelector` field. You can either use `labelSelectors` or the `names` of a control plane directly. A control plane matches if any of the label selectors match. - -This example matches all control planes in the group that have `environment: production` as a label: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: my-backup-schedule -spec: - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production -``` - -You can use the more complex `matchExpressions` to match labels based on an expression. This example matches control planes that have label `environment: production` or `environment: staging`: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: my-backup-schedule -spec: - controlPlaneSelector: - labelSelectors: - - matchExpressions: - - { key: environment, operator: In, values: [production,staging] } -``` - -You can also specify the names of control planes directly: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: my-backup-schedule -spec: - controlPlaneSelector: - names: - - controlplane-dev - - controlplane-staging - - controlplane-prod -``` - - -## Configure a Shared Backup - - - -[SharedBackup][sharedbackup] is a [group-scoped][group-scoped-2] resource. You should create them in a group containing one or more control planes. This resource causes a backups to occur for control planes within its corresponding group. - -Below is an example of a Shared Backup that takes a backup of all control planes having `environment: production` labels: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackup -metadata: - name: my-backup - namespace: default -spec: - configRef: - kind: SharedBackupConfig - name: default - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production -``` - -### Exclude resources from the backup - -The `spec.excludedResources` field is an array of resource names to exclude from each backup. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackup -metadata: - name: my-backup -spec: - excludedResources: - - "xclusters.aws.platformref.upbound.io" - - "xdatabase.aws.platformref.upbound.io" - - "xrolepolicyattachment.iam.aws.crossplane.io" -``` - -:::warning -You must specify resource names in lowercase "resource.group" format (for example, `xclusters.aws.platformref.upbound.io`). Using only the resource kind (for example, `XCluster`) isn't supported. -::: - -### Set the time to live - -Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackup -metadata: - name: my-backup -spec: - ttl: 168h # Backup is garbage collected after 7 days -``` - - -### Garbage collect backups on Shared Backup deletion - - - -Set the `spec.useOwnerReferencesInBackup` to define whether to garbage collect associated backups when a shared backup gets deleted. If set to true, backups are garbage collected when the shared backup gets deleted. - -### Control plane selection - -To configure which control planes in a group you want to create a backup for, use the `spec.controlPlaneSelector` field. You can either use `labelSelectors` or the `names` of a control plane directly. A control plane matches if any of the label selectors match. - -This example matches all control planes in the group that have `environment: production` as a label: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackup -metadata: - name: my-backup -spec: - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production -``` - -You can use the more complex `matchExpressions` to match labels based on an expression. This example matches control planes that have label `environment: production` or `environment: staging`: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackup -metadata: - name: my-backup -spec: - controlPlaneSelector: - labelSelectors: - - matchExpressions: - - { key: environment, operator: In, values: [production,staging] } -``` - -You can also specify the names of control planes directly: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackup -metadata: - name: my-backup -spec: - controlPlaneSelector: - names: - - controlplane-dev - - controlplane-staging - - controlplane-prod -``` - -## Create a manual backup - -[Backup][backup] is a [group-scoped][group-scoped-3] resource that causes a single backup to occur for a control planes in its corresponding group. - -Below is an example of a manual Backup of a control plane: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: Backup -metadata: - name: my-backup - namespace: default -spec: - configRef: - kind: SharedBackupConfig - name: default - controlPlane: my-awesome-ctp - deletionPolicy: Delete -``` - -The backup specification `DeletionPolicy` defines backup deletion actions, -including the deletion of the backup file from the bucket. The `Deletion Policy` -value defaults to `Orphan`. Set it to `Delete` to remove uploaded files -in the bucket. -For more information on the backup and restore process, review the [Spaces API documentation][spaces-api-documentation-1]. - - -### Choose a control plane to backup - -The `spec.controlPlane` field defines which control plane to execute a backup against. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: Backup -metadata: - name: my-backup - namespace: default -spec: - controlPlane: my-awesome-ctp -``` - -If the control plane doesn't exist, the backup fails after multiple failed retry attempts. - -### Exclude resources from the backup - -The `spec.excludedResources` field is an array of resource names to exclude from the manual backup. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: Backup -metadata: - name: my-backup -spec: - excludedResources: - - "xclusters.aws.platformref.upbound.io" - - "xdatabase.aws.platformref.upbound.io" - - "xrolepolicyattachment.iam.aws.crossplane.io" -``` - -:::warning -You must specify resource names in lowercase "resource.group" format (for example, `xclusters.aws.platformref.upbound.io`). Using only the resource kind (for example, `XCluster`) isn't supported. -::: - -### Set the time to live - -Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: Backup -metadata: - name: my-backup -spec: - ttl: 168h # Backup is garbage collected after 7 days -``` - -## Restore a control plane from a backup - -You can restore a control plane's state from a backup. Below is an example of creating a new control plane from a previous backup called `restore-me`: - - -```yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - name: my-awesome-restored-ctp - namespace: default -spec: - restore: - source: - kind: Backup - name: restore-me -``` - - -[group-scoped]: /spaces/concepts/groups -[group-scoped-1]: /spaces/concepts/groups -[group-scoped-2]: /spaces/concepts/groups -[group-scoped-3]: /spaces/concepts/groups -[sharedbackupconfig]: /reference/apis/spaces-api/latest -[thanos-object-storage]: https://thanos.io/tip/thanos/storage.md/ -[sharedbackupschedule]: /reference/apis/spaces-api/latest -[cron-formatted]: https://en.wikipedia.org/wiki/Cron -[spaces-api-documentation]: /reference/apis/spaces-api/v1_9 -[sharedbackup]: /reference/apis/spaces-api/latest -[backup]: /reference/apis/spaces-api/latest -[spaces-api-documentation-1]: /reference/apis/spaces-api/v1_9 - - - diff --git a/spaces_versioned_docs/version-v1.13/howtos/cloud-spaces/_category_.json b/spaces_versioned_docs/version-v1.13/howtos/cloud-spaces/_category_.json deleted file mode 100644 index 1e1869a38..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/cloud-spaces/_category_.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "label": "Cloud Spaces", - "position": 1, - "collapsed": true, - "customProps": { - "plan": "standard" - } -} - - diff --git a/spaces_versioned_docs/version-v1.13/howtos/cloud-spaces/dedicated-spaces-deployment.md b/spaces_versioned_docs/version-v1.13/howtos/cloud-spaces/dedicated-spaces-deployment.md deleted file mode 100644 index ebad9493e..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/cloud-spaces/dedicated-spaces-deployment.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: Dedicated Spaces -sidebar_position: 4 -description: A guide to Upbound Dedicated Spaces -plan: business ---- - - -## Benefits - -Dedicated Spaces offer the following benefits: - -- **Single-tenancy** A control plane space where Upbound guarantees you're the only tenant operating in the environment. -- **Connectivity to your private network** Establish secure network connections between your Dedicated Cloud Space running in Upbound and your own resources behind your private network. -- **Reduced Overhead.** Offload day-to-day operational burdens to Upbound while focusing on your job of building your platform. - -## Architecture - -A Dedicated Space is a deployment of the Upbound Spaces software inside an -Upbound-controlled cloud account and network. The control planes you run. - -The diagram below illustrates the high-level architecture of Upbound Dedicated Spaces: - -![Upbound Managed Spaces arch](/img/managed-arch-gcp.png) - -## How to get access to Dedicated Spaces - -If you have an interest in Upbound Dedicated Spaces, contact -[Upbound][contact-us]. We can chat more about your -requirements and see if Dedicated Spaces are a good fit for you. - -[contact-us]: https://www.upbound.io/contact-us -[managed-space]: /spaces/howtos/self-hosted/managed-spaces-deployment diff --git a/spaces_versioned_docs/version-v1.13/howtos/cloud-spaces/gitops-on-upbound.md b/spaces_versioned_docs/version-v1.13/howtos/cloud-spaces/gitops-on-upbound.md deleted file mode 100644 index fa59a8dce..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/cloud-spaces/gitops-on-upbound.md +++ /dev/null @@ -1,318 +0,0 @@ ---- -title: GitOps with Upbound Control Planes -sidebar_position: 80 -description: An introduction to doing GitOps with control planes on Upbound Cloud Spaces -tier: "business" ---- - -:::info Deployment Model -This guide applies to **Upbound Cloud Spaces** (Dedicated and Managed Spaces). For self-hosted Spaces deployments, see [GitOps with ArgoCD in Self-Hosted Spaces](/spaces/howtos/self-hosted/gitops-with-argocd/). -::: - -GitOps is an approach for managing a system by declaratively describing desired resources' configurations in Git and using controllers to realize the desired state. Upbound's control planes are compatible with this pattern and it's strongly recommended you integrate GitOps in the platforms you build on Upbound. - - -## Integrate with Argo CD - - -[Argo CD][argo-cd] is a project in the Kubernetes ecosystem commonly used for GitOps. You can use it in tandem with Upbound control planes to achieve GitOps flows. The sections below explain how to integrate these tools with Upbound. - -### Generate a kubeconfig for your control plane - -Use the up CLI to [generate a kubeconfig][generate-a-kubeconfig] for your control plane. - -```bash -up ctx /// -f - > context.yaml -``` - -### Create an API token - - -You need a personal access token (PAT). You create PATs on a per-user basis in the Upbound Console. Go to [My Account - API tokens][my-account-api-tokens] and select Create New Token. Give the token a name and save the secret value to somewhere safe. - - -### Add the up CLI init container to Argo - -Create a new file called `up-plugin-values.yaml` and paste the following YAML: - -```yaml -controller: - volumes: - - name: up-plugin - emptyDir: {} - - name: up-home - emptyDir: {} - - volumeMounts: - - name: up-plugin - mountPath: /usr/local/bin/up - subPath: up - - name: up-home - mountPath: /home/argocd/.up - - initContainers: - - name: up-plugin - image: xpkg.upbound.io/upbound/up-cli:v0.39.0 - command: ["cp"] - args: - - /usr/local/bin/up - - /plugin/up - volumeMounts: - - name: up-plugin - mountPath: /plugin - -server: - volumes: - - name: up-plugin - emptyDir: {} - - name: up-home - emptyDir: {} - - volumeMounts: - - name: up-plugin - mountPath: /usr/local/bin/up - subPath: up - - name: up-home - mountPath: /home/argocd/.up - - initContainers: - - name: up-plugin - image: xpkg.upbound.io/upbound/up-cli:v0.39.0 - command: ["cp"] - args: - - /usr/local/bin/up - - /plugin/up - volumeMounts: - - name: up-plugin - mountPath: /plugin -``` - -### Install or upgrade Argo using the values file - -Install or upgrade Argo via Helm, including the values from the `up-plugin-values.yaml` file: - -```bash -helm upgrade --install -n argocd -f up-plugin-values.yaml --reuse-values argocd argo/argo-cd -``` - - -### Configure Argo CD - - -To configure Argo CD for Annotation resource tracking, edit the Argo CD ConfigMap in the Argo CD namespace. -Add `application.resourceTrackingMethod: annotation` to the data section as below. -This configuration turns off Argo CD auto pruning, preventing the deletion of Crossplane resources. - -Next, configure the [auto respect RBAC for the Argo CD controller][auto-respect-rbac-for-the-argo-cd-controller]. -By default, Argo CD attempts to discover some Kubernetes resource types that don't exist in a control plane. -You must configure Argo CD to respect the cluster's RBAC rules so that Argo CD can sync. -Add `resource.respectRBAC: normal` to the data section as below. - -```bash -apiVersion: v1 -kind: ConfigMap -metadata: - name: argocd-cm -data: - ... - application.resourceTrackingMethod: annotation - resource.respectRBAC: normal -``` - -:::tip -The `resource.respectRBAC` configuration above tells Argo to respect RBAC for _all_ cluster contexts. If you're using an Argo CD instance to manage more than only control planes, you should consider changing the `clusters` string match for the configuration to apply only to control planes. For example, if every control plane context name followed the convention of being named `controlplane-`, you could set the string match to be `controlplane-*` -::: - - -### Create a cluster context definition - - -Replace the variables and run the following script to configure a new Argo cluster context definition. - -To configure Argo for a control plane in a Connected Space, replace `stringData.server` with the ingress URL of the control plane. This URL is what's outputted when using `up ctx`. - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: my-control-plane - namespace: argocd - labels: - argocd.argoproj.io/secret-type: cluster -type: Opaque -stringData: - name: my-control-plane-context - server: https://.spaces.upbound.io/apis/spaces.upbound.io/v1beta1/namespaces//controlplanes//k8s - config: | - { - "execProviderConfig": { - "apiVersion": "client.authentication.k8s.io/v1", - "command": "up", - "args": [ "org", "token" ], - "env": { - "ORGANIZATION": "", - "UP_TOKEN": "" - } - }, - "tlsClientConfig": { - "insecure": false, - "caData": "" - } - } -``` - - -## GitOps for Upbound resources - - -Like any other cloud service, you can drive the lifecycle of Upbound Cloud resources with Crossplane. This lets you establish GitOps flows to declaratively create and manage: - -- [control plane groups][control-plane-groups] -- [control planes][control-planes] -- [Upbound IAM resources][upbound-iam-resources] - -Use a control plane installed with [provider-upbound][provider-upbound] and [provider-kubernetes][provider-kubernetes] to achieve this. - -### Provider-upbound - -[Provider-upbound][provider-upbound-2] is a Crossplane provider built by Upbound to interact with Upbound resources. use _provider-upbound_ to declaratively create and manage the lifecycle of IAM resources and repositories: - -- [Robots][robots] and their membership to teams -- [Teams][teams] -- [Repositories][repositories] and [permissions][permissions] on those repositories. - -:::tip -This provider defines managed resources for control planes, their auth, and permissions. These resources only applicable for customers who run in Upbound's **Legacy Spaces** control plane hosting environments. Customers should use provider-kubernetes explained below to manage the lifecycle of control planes with Crossplane. -::: - -### Provider-kubernetes - -[Provider-kubernetes][provider-kubernetes-3] is a Crossplane provider that defines an [Object][object] resource. Use _Objects_ as general-purpose resources to wrap _any_ Kubernetes resource for Crossplane to manage. - -Upbound [Space APIs][space-apis] are Kube-like APIs and have implemented support for most Kubernetes-style API concepts. You can use kubectl or any other Kubernetes-compatible tooling to interact with the API. This means you can use _provider-kubernetes_ to drive interactions with Space APIs. - -:::warning -When interacting with a Cloud Space's API, the Kubernetes [watch][watch] feature **isn't implemented.** Argo CD requires _watch_ support to function as expected, meaning you can't point Argo directly at a Cloud Space until it's implemented. -::: - -Use _provider-kubernetes_ to declaratively drive interactions with all [Space APIs][space-apis-1]. Wrap the desired API resource in an _Object_. See the example below for a control plane: - -```yaml -apiVersion: kubernetes.crossplane.io/v1alpha2 -kind: Object -metadata: - name: my-controlplane -spec: - forProvider: - manifest: - apiVersion: spaces.upbound.io/v1beta1 - kind: ControlPlane - metadata: - name: my-controlplane - namespace: default - spec: - crossplane: - autoUpgrade: - channel: Rapid -``` - -[Control plane groups][control-plane-groups-2] are a special case because they technically map to an underlying Kubernetes namespace. You should create a `kind: namespace` with the `spaces.upbound.io/group` label to create a control plane group in a Space. See the example below: - -```yaml -apiVersion: kubernetes.crossplane.io/v1alpha2 -kind: Object -metadata: - name: group1 -spec: - forProvider: - manifest: - apiVersion: v1 - kind: Namespace - metadata: - name: group1 - labels: - spaces.upbound.io/group: "true" - spec: {} -``` - -### Configure auth for provider-kubernetes - -Like any other Crossplane provider, _provider-kubernetes_ requires a valid [ProviderConfig][providerconfig] to authenticate with Upbound before interacting with its APIs. Follow the steps below to configure auth for a ProviderConfig on a control plane that you want to use to interact with Upbound resources. - -1. Define an environment variable for the name of your Upbound org account. Use `up org list` to retrieve this value. -```ini -export UPBOUND_ACCOUNT="" -``` - -2. Create a [personal access token][personal-access-token] and store it as an environment variable. -```shell -export UPBOUND_TOKEN="" -``` - -3. Log on to Upbound. -```shell -up login -``` - -4. Create a kubeconfig for the desired Cloud Space instance you want to interact with. -```shell -export CONTROLPLANE_CONFIG=/tmp/controlplane-kubeconfig -KUBECONFIG=$CONTROLPLANE_CONFIG up ctx $UPBOUND_ACCOUNT/upbound-gcp-us-west-1 # Replace this path with whichever Cloud Space you want to communicate with. -``` - -5. On the control plane you want to use to interact with Upbound resources, create a secret containing the credentials: -```shell -kubectl -n crossplane-system create secret generic cluster-config --from-file=kubeconfig=$CONTROLPLANE_CONFIG -kubectl -n crossplane-system create secret generic upbound-credentials --from-literal=token=$UPBOUND_TOKEN -``` - -6. Create a ProviderConfig that references the credentials created in the prior step. Create this resource in your control plane: -```yaml -apiVersion: kubernetes.crossplane.io/v1alpha1 -kind: ProviderConfig -metadata: - name: default -spec: - credentials: - source: Secret - secretRef: - namespace: crossplane-system - name: cluster-config - key: kubeconfig - identity: - type: UpboundTokens - source: Secret - secretRef: - name: upbound-credentials - namespace: crossplane-system - key: token -``` - -You can now create _Objects_ in the control plane which wrap Space APIs. - -[generate-a-kubeconfig]: /manuals/cli/concepts/contexts -[control-plane-groups]: /spaces/concepts/groups -[control-planes]: /spaces/concepts/control-planes -[upbound-iam-resources]: /manuals/platform/concepts/identity-management -[space-apis]: /reference/apis/spaces-api/v1_9 -[space-apis-1]: /reference/apis/spaces-api/v1_9 -[control-plane-groups-2]: /spaces/concepts/groups - - -[argo-cd]: https://argo-cd.readthedocs.io/en/stable/ -[my-account-api-tokens]: https://accounts.upbound.io/settings/tokens -[auto-respect-rbac-for-the-argo-cd-controller]: https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#auto-respect-rbac-for-controller -[spec-writeconnectionsecrettoref]: /reference/apis/spaces-api/latest -[auto-respect-rbac-for-the-argo-cd-controller-1]: https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#auto-respect-rbac-for-controller -[provider-upbound]: https://marketplace.upbound.io/providers/upbound/provider-upbound -[provider-kubernetes]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes -[provider-upbound-2]: https://marketplace.upbound.io/providers/upbound/provider-upbound -[robots]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/iam.upbound.io/Robot/v1alpha1 -[teams]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/iam.upbound.io/Team/v1alpha1 -[repositories]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/repository.upbound.io/Repository/v1alpha1 -[permissions]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/repository.upbound.io/Permission/v1alpha1 -[provider-kubernetes-3]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes -[object]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes/v0.17.0/resources/kubernetes.crossplane.io/Object/v1alpha2 -[watch]: https://kubernetes.io/docs/reference/using-api/api-concepts/#watch-bookmarks -[providerconfig]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes/v0.17.0/resources/kubernetes.crossplane.io/ProviderConfig/v1alpha1 -[personal-access-token]: https://accounts.upbound.io/settings/tokens diff --git a/spaces_versioned_docs/version-v1.13/howtos/control-plane-topologies.md b/spaces_versioned_docs/version-v1.13/howtos/control-plane-topologies.md deleted file mode 100644 index 9020e5a41..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/control-plane-topologies.md +++ /dev/null @@ -1,566 +0,0 @@ ---- -title: Control Plane Topologies -sidebar_position: 15 -description: Configure scheduling of composites to remote control planes ---- - -:::info API Version Information -This guide is for the Control Plane Topology feature, which is in **private preview**. interested customers with access to this feature, it applies to v1.12+. - -For related API specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). details on feature availability, see the . -::: - -:::important -This feature is in private preview for select customers in Upbound Spaces. If you're interested in this deployment mode, please [contact us](https://www.upbound.io/support/contact). -::: - -Upbound's _Control Plane Topology_ feature lets you build and deploy a platform -of multiple control planes. These control planes work together for a unified platform -experience. - - -With the _Topology_ feature, you can install resource APIs that are -reconciled by other control planes and configure the routing that occurs between -control planes. You can also build compositions that reference other resources -running on your control plane or elsewhere in Upbound. - -This guide explains how to use Control Plane Topology APIs to install, configure -remote APIs, and build powerful compositions that reference other resources. - -## Benefits - -The Control Plane Topology feature provides the following benefits: - -* Decouple your platform architecture into independent offerings to improve your platform's software development lifecycle. -* Install composite APIs from Configurations as CRDs which are fulfilled and reconciled by other control planes. -* Route APIs to other control planes by configuring an _Environment_ resource, which define a set of routable dimensions. - -## How it works - - -Imagine the scenario where you want to let a user reference a subnet when creating a database instance. To your control plane, the `kind: database` and `kind: subnet` are independent resources. To you as the composition author, these resources have an important relationship. It may be that: - -- you don't want your user to ever be able to create a database without specifying a subnet. -- you want to let them create a subnet when they create the database, if it doesn't exist. -- you want to allow them to reuse a subnet that got created elsewhere or gets shared by another user. - -In each of these scenarios, you must resort to writing complex composition logic -to handle each case. The problem is compounded when the resource exists in a -context separate from the current control plane's context. Imagine a scenario -where one control plane manages Database resources and a second control plane -manages networking resources. With the _Topology_ feature, you can offload these -concerns to Upbound machinery. - - -![Control Plane Topology feature arch](/img/topology-arch.png) - -## Prerequisites - -Enable the Control Plane Topology feature in the Space you plan to run your control plane in: - -- Cloud Spaces: Not available yet -- Connected Spaces: Space administrator must enable this feature -- Disconnected Spaces: Space administrator must enable this feature - - - -## Compose resources with _ReferencedObjects_ - - - -_ReferencedObject_ is a resource type available in an Upbound control plane that lets you reference other Kubernetes resources in Upbound. - -:::tip -This feature is useful for composing resources that exist in a -remote context, like another control plane. You can also use -_ReferencedObjects_ to resolve references to any other Kubernetes object -in the current control plane context. This could be a secret, another Crossplane -resource, or more. -::: - -### Declare the resource reference in your XRD - -To compose a _ReferencedObject_, you should start by adding a resource reference -in your Composite Resource Definition (XRD). The convention for the resource -reference follows the shape shown below: - -```yaml -Ref: - type: object - properties: - apiVersion: - type: string - default: "" - enum: [ "" ] - kind: - type: string - default: "" - enum: [ "" ] - grants: - type: array - default: [ "Observe" ] - items: - type: string - enum: [ "Observe", "Create", "Update", "Delete", "*" ] - name: - type: string - namespace: - type: string - required: - - name -``` - -The `Ref` should be the kind of resource you want to reference. The `apiVersion` and `kind` should be the associated API version and kind of the resource you want to reference. - -The `name` and `namespace` strings are inputs that let your users specify the resource instance. - -#### Grants - -The `grants` field is a special array that lets you give users the power to influence the behavior of the referenced resource. You can configure which of the available grants you let your user select and which it defaults to. Similar in behavior as [Crossplane management policies][crossplane-management-policies], each grant value does the following: - -- **Observe:** The composite may observe the state of the referenced resource. -- **Create:** The composite may create the referenced resource if it doesn't exist. -- **Update:** The composite may update the referenced resource. -- **Delete:** The composite may delete the referenced resource. -- **\*:** The composite has full control over the referenced resource. - -Here are some examples that show how it looks in practice: - -
- -Show example for defining the reference to another composite resource - -```yaml -apiVersion: apiextensions.crossplane.io/v1 -kind: CompositeResourceDefinition -metadata: - name: xsqlinstances.database.platform.upbound.io -spec: - type: object - properties: - parameters: - type: object - properties: - networkRef: - type: object - properties: - apiVersion: - type: string - default: "networking.platform.upbound.io" - enum: [ "networking.platform.upbound.io" ] - grants: - type: array - default: [ "Observe" ] - items: - type: string - enum: [ "Observe" ] - kind: - type: string - default: "Network" - enum: [ "Network" ] - name: - type: string - namespace: - type: string - required: - - name -``` - -
- - -
-Show example for defining the reference to a secret -```yaml -apiVersion: apiextensions.crossplane.io/v1 -kind: CompositeResourceDefinition -metadata: - name: xsqlinstances.database.platform.upbound.io -spec: - type: object - properties: - parameters: - type: object - properties: - secretRef: - type: object - properties: - apiVersion: - type: string - default: "v1" - enum: [ "v1" ] - grants: - type: array - default: [ "Observe" ] - items: - type: string - enum: [ "Observe", "Create", "Update", "Delete", "*" ] - kind: - type: string - default: "Secret" - enum: [ "Secret" ] - name: - type: string - namespace: - type: string - required: - - name -``` -
- -### Manually add the jsonPath - -:::important -This step is a known limitation of the preview. We're working on tooling that -removes the need for authors to do this step. -::: - -During the preview timeframe of this feature, you must add an annotation by hand -to the XRD. In your XRD's `metadata.annotations`, set the -`references.upbound.io/schema` annotation. It should be a JSON string in the -following format: - -```json -{ - "apiVersion": "references.upbound.io/v1alpha1", - "kind": "ReferenceSchema", - "references": [ - { - "jsonPath": ".spec.parameters.secretRef", - "kinds": [ - { - "apiVersion": "v1", - "kind": "Secret" - } - ] - } - ] -} -``` - -Flatten this JSON into a string and set the annotation on your XRD. View the -example below for an illustration: - -
-Show example setting the references.upbound.io/schema annotation -```yaml -apiVersion: apiextensions.crossplane.io/v1 -kind: CompositeResourceDefinition -metadata: - name: xthings.networking.acme.com - annotations: - references.upbound.io/schema: '{"apiVersion":"references.upbound.io/v1alpha1","kind":"ReferenceSchema","references":[{"jsonPath":".spec.secretRef","kinds":[{"apiVersion":"v1","kind":"Secret"}]},{"jsonPath":".spec.configMapRef","kinds":[{"apiVersion":"v1","kind":"ConfigMap"}]}]}' -``` -
- -
-Show example for setting multiples references in the references.upbound.io/schema annotation -```yaml -apiVersion: apiextensions.crossplane.io/v1 -kind: CompositeResourceDefinition -metadata: - name: xthings.networking.acme.com - annotations: - references.upbound.io/schema: '{"apiVersion":"references.upbound.io/v1alpha1","kind":"ReferenceSchema","references":[{"jsonPath":".spec.parameters.secretRef","kinds":[{"apiVersion":"v1","kind":"Secret"}]},{"jsonPath":".spec.parameters.configMapRef","kinds":[{"apiVersion":"v1","kind":"ConfigMap"}]}]}' -``` -
- - -You can use a VSCode extension like [vscode-pretty-json][vscode-pretty-json] to make this task easier. - - -### Compose a _ReferencedObject_ - -To pair with the resource reference declared in your XRD, you must compose the referenced resource. Use the _ReferencedObject_ resource type to bring the resource into your composition. _ReferencedObject_ has the following schema: - -```yaml -apiVersion: references.upbound.io/v1alpha1 -kind: ReferencedObject -spec: - managementPolicies: - - Observe - deletionPolicy: Orphan - composite: - apiVersion: - kind: - name: - jsonPath: .spec.parameters.secretRef -``` - -The `spec.composite.apiVersion` and `spec.composite.kind` should match the API version and kind of the `compositeTypeRef` declared in your composition. The `spec.composite.name` should be the name of the composite resource instance. - -The `spec.composite.jsonPath` should be the path to the root of the resource ref you declared in your XRD. - -
-Show example for composing a resource reference to a secret - -```yaml -apiVersion: apiextensions.crossplane.io/v1 -kind: Composition -metadata: - name: demo-composition -spec: - compositeTypeRef: - apiVersion: networking.acme.com/v1alpha1 - kind: XThing - mode: Pipeline - pipeline: - - step: patch-and-transform - functionRef: - name: crossplane-contrib-function-patch-and-transform - input: - apiVersion: pt.fn.crossplane.io/v1beta1 - kind: Resources - resources: - - name: secret-ref-object - base: - apiVersion: references.upbound.io/v1alpha1 - kind: ReferencedObject - spec: - managementPolicies: - - Observe - deletionPolicy: Orphan - composite: - apiVersion: networking.acme.com/v1alpha1 - kind: XThing - name: TO_BE_PATCHED - jsonPath: .spec.parameters.secretRef - patches: - - type: FromCompositeFieldPath - fromFieldPath: metadata.name - toFieldPath: spec.composite.name -``` -
- -By declaring a resource reference in your XRD, Upbound handles resolution of the desired resource. - -## Deploy APIs - -To configure routing resource requests between control planes, you need to deploy APIs in at least two control planes. - -### Deploy into a service-level control plane - -Package the APIs you build into a Configuration package an deploy it on a -control plane in an Upbound Space. In Upbound, it's common to refer to the -control plane where the Configuration package is deployed as a **service-level -control plane**. This control plane runs the controllers that processes the API -requests and provisions underlying resources. In a later section, you learn how -you can use _Topology_ features to [configure routing][configure-routing]. - -### Deploy as Remote APIs on a platform control plane - -You should use the same package source as deployed in the **service-level -control planes**, but this time deploy the Configuration in a separate control -plane as a _RemoteConfiguration_. The _RemoteConfiguration_ installs Kubernetes -CustomResourceDefinitions for the APIs defined in the Configuration package, but -no controllers get deployed. - -### Install a _RemoteConfiguration_ - -_RemoteConfiguration_ is a resource type available in an Upbound manage control -planes that acts like a sort of Crossplane [Configuration][configuration] -package. Unlike standard Crossplane Configurations, which install XRDs, -compositions, and functions into a desired control plane, _RemoteConfigurations_ -install only the CRDs for claimable composite resource types. - -#### Install directly - -Install a _RemoteConfiguration_ by defining the following and applying it to -your control plane: - -```yaml -apiVersion: pkg.upbound.io/v1alpha1 -kind: RemoteConfiguration -metadata: - name: -spec: - package: -``` - -#### Declare as a project dependency - -You can declare _RemoteConfigurations_ as dependencies in your control plane's -[project file][project-file]. Use the up CLI to add the dependency, providing -the `--remote` flag: - -```tsx live -up dep add --remote -``` - -This command adds a declaration in the `spec.apiDependencies` stanza of your -project's `upbound.yaml` as demonstrated below: - -```yaml -apiVersion: meta.dev.upbound.io/v1alpha1 -kind: Project -metadata: - name: service-controlplane -spec: - apiDependencies: - - configuration: xpkg.upbound.io/upbound/remote-configuration - version: '>=v0.0.0' - dependsOn: - - provider: xpkg.upbound.io/upbound/provider-kubernetes - version: '>=v0.0.0' -``` - -Like a Configuration, a _RemoteConfigurationRevision_ gets created when the -package gets installed on a control plane. Unlike Configurations, XRDs and -compositions **don't** get installed by a _RemoteConfiguration_. Only the CRDs -for claimable composite types get installed and Crossplane thereafter manages -their lifecycle. You can tell when a CRD gets installed by a -_RemoteConfiguration_ because it has the `internal.scheduling.upbound.io/remote: -true` label: - -```yaml -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: things.networking.acme.com - labels: - internal.scheduling.upbound.io/remote: "true" -``` - -## Use an _Environment_ to route resources - -_Environment_ is a resource type available in Upbound control planes that works -in tandem with resources installed by _RemoteConfigurations_. _Environment_ is a -namespace-scoped resource that lets you configure how to route remote resources -to other control planes by a set of user-defined dimensions. - -### Define a routing dimension - -To establish a routing dimensions between two control planes, you must do two -things: - -1. Annotate the service control plane with the name and value of a dimension. -2. Configure an environment on another control plane with a dimension matching the field and value of the service control plane. - -The example below demonstrates the creation of a service control plane with a -`region` dimension: - -```yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - labels: - dimension.scheduling.upbound.io/region: "us-east-1" - name: prod-1 - namespace: default -spec: -``` - -Upbound's Spaces controller keeps an inventory of all declared dimensions and -listens for control planes to route to them. - -### Create an _Environment_ - -Next, create an _Environment_ on a separate control plane, referencing the -dimension from before. The example below demonstrates routing all remote -resource requests in the `default` namespace of the control plane based on a -single `region` dimension: - -```yaml -apiVersion: scheduling.upbound.io/v1alpha1 -kind: Environment -metadata: - name: default - namespace: default -spec: - dimensions: - region: us-east-1 -``` - -You can specify whichever dimensions as you want. The example below demonstrates -multiple dimensions: - -```yaml -apiVersion: scheduling.upbound.io/v1alpha1 -kind: Environment -metadata: - name: default - namespace: default -spec: - dimensions: - region: us-east-1 - env: prod - offering: databases -``` - -In order for the routing controller to match, _all_ dimensions must match for a -given service control plane. - -You can specify dimension overrides on a per-resource group basis. This lets you -configure default routing rules for a given _Environment_ and override routing -on a per-offering basis. - -```yaml -apiVersion: scheduling.upbound.io/v1alpha1 -kind: Environment -metadata: - name: default - namespace: default -spec: - dimensions: - region: us-east-1 - resourceGroups: - - name: database.platform.upbound.io # database - dimensions: - region: "us-east-1" - env: "prod" - offering: "databases" - - name: networking.platform.upbound.io # networks - dimensions: - region: "us-east-1" - env: "prod" - offering: "networks" -``` - -### Confirm the configured route - -After you create an _Environment_ on a control plane, the routes selected get -reported in the _Environment's_ `.status.resourceGroups`. This is illustrated -below: - -```yaml -apiVersion: scheduling.upbound.io/v1alpha1 -kind: Environment -metadata: - name: default -... -status: - resourceGroups: - - name: database.platform.upbound.io # database - proposed: - controlPlane: ctp-1 - group: default - space: upbound-gcp-us-central1 - dimensions: - region: "us-east-1" - env: "prod" - offering: "databases" -``` - -If you don't see a response in the `.status.resourceGroups`, this indicates a -match wasn't found or an error establishing routing occurred. - -:::tip -There's no limit to the number of control planes you can route to. You can also -stack routing and form your own topology of control planes, with multiple layers -of routing. -::: - -### Limitations - - -Routing from one control plane to another is currently scoped to control planes -that exist in a single Space. You can't route resource requests to control -planes that exist on a cross-Space boundary. - - -[project-file]: /manuals/cli/howtos/project -[contact-us]: https://www.upbound.io/usage/support/contact -[crossplane-management-policies]: https://docs.crossplane.io/latest/managed-resources/managed-resources/#managementpolicies -[vscode-pretty-json]: https://marketplace.visualstudio.com/items?itemName=chrismeyers.vscode-pretty-json -[configure-routing]: #use-an-environment-to-route-resources -[configuration]: https://docs.crossplane.io/latest/packages/providers diff --git a/spaces_versioned_docs/version-v1.13/howtos/ctp-connector.md b/spaces_versioned_docs/version-v1.13/howtos/ctp-connector.md deleted file mode 100644 index b2cc48c49..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/ctp-connector.md +++ /dev/null @@ -1,508 +0,0 @@ ---- -title: Control Plane Connector -weight: 80 -description: A guide for how to connect a Kubernetes app cluster to a control plane in Upbound using the Control Plane connector feature -plan: "standard" ---- - - - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). Control Plane Connector functionality is consistent across versions. - -For related API specifications and resources, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: - -Control Plane Connector connects arbitrary Kubernetes application clusters outside the -Upbound Spaces to your control planes running in Upbound Spaces. -This lets you interact with your control plane's API from the app cluster. The claim APIs and the namespaced XR APIs -you define via CompositeResourceDefinitions (XRDs) in the control plane, are available in -your app cluster alongside Kubernetes workload APIs like Pod. Control Plane Connector -enables the same experience as a locally installed Crossplane. - -![control plane connector operations flow](/img/ConnectorFlow.png) - -### Control Plane Connector operations - -Control Plane Connector leverages the [Kubernetes API AggregationLayer][kubernetes-api-aggregationlayer] -to create an extension API server and serve the claim APIs and the namespaced XR APIs in the control plane. It -discovers the claim APIs and the namespaced XR APIs available in the control plane and registers corresponding -APIService resources on the app cluster. Those APIService resources refer to the -extension API server of Control Plane Connector. - -The claim APIs and the namespaced XR APIs are available in your Kubernetes cluster, just like all native -Kubernetes APIs. - -The Control Plane Connector processes every request targeting the claim APIs and the namespaced XR APIs and makes the -relevant requests to the connected control plane. - -Only the connected control plane stores and processes all claims and namespaced XRs created in the app -cluster, eliminating any storage use at the application cluster. The control plane -connector provisions a target namespace at the control plane for the app cluster and stores -all claims and namespaced XRs in this target namespace. - -For managing the claims and namespaced XRs, the Control Plane Connector creates a unique identifier for a -resource by combining input parameters from claims, including: -- `metadata.name` -- `metadata.namespace` -- `your cluster name` - - -It employs SHA-256 hashing to generate a hash value and then extracts the first -16 characters of that hash. This ensures the resulting identifier remains within -the 64-character limit in Kubernetes. - - - -For instance, if a claim named `my-bucket` exists in the test namespace in -`cluster-dev`, the system calculates the SHA-256 hash from -`my-bucket-x-test-x-00000000-0000-0000-0000-000000000000` and takes the first 16 -characters. The control plane side then names the claim `claim-c603e518969b413e`. - -For namespaced XRs, the process is similar, only the prefix is different. -The name becomes `nxr-c603e518969b413e`. - - -### Installation - - - - - -Log in with the up CLI: - -```bash -up login -``` - -Connect your app cluster to a namespace in an Upbound control plane with `up controlplane connector install `. This command creates a user token and installs the Control Plane Connector to your cluster. It's recommended you create a values file called `connector-values.yaml` and provide the following below. Select the tab according to which environment your control plane is running in. - - - - - - -```yaml -upbound: - # This is your org account in Upbound e.g. the name displayed after executing `up org list` - account: - # This is a personal access token generated in the Upbound Console - token: - -spaces: - # If your control plane is running in Upbound's GCP Cloud Space, else use upbound-aws-us-east-1.spaces.upbound.io - host: "upbound-gcp-us-west-1.spaces.upbound.io" - insecureSkipTLSVerify: true - controlPlane: - # The name of the control plane you want the Connector to attach to - name: - # The control plane group the control plane resides in - group: - # The namespace within the control plane to sync claims from the app cluster to. NOTE: This must be created before you install the connector. - claimNamespace: -``` - - - - - -1. Create a [kubeconfig][kubeconfig] for the control plane. Update your Upbound context to the path for your desired control plane. -```ini -up login -up ctx /upbound-gcp-us-central-1/default/your-control-plane -up ctx . -f - > context.yaml -``` - -2. Write it to a secret in the cluster where you plan to -install the Control Plane Connector to. -```ini -kubectl create secret generic my-controlplane-kubeconfig --from-file=context.yaml -``` - -3. Reference this secret in the -`spaces.controlPlane.kubeconfigSecret` field below. - -```yaml -spaces: - controlPlane: - # The namespace within the control plane to sync claims from the app cluster to. NOTE: This must be created before you install the connector. - claimNamespace: - kubeconfigSecret: - name: my-controlplane-kubeconfig - key: kubeconfig -``` - - - - - - -Provide the values file above when you run the CLI command: - - -```bash {copy-lines="3"} -up controlplane connector install my-control-plane my-app-ns-1 --file=connector-values.yaml -``` - -The Claim APIs and the namespaced XR APIs from your control plane are now visible in the cluster. -You can verify this with `kubectl api-resources`. - -```bash -kubectl api-resources -``` - -### Uninstall - -Disconnect an app cluster that you prior installed the Control Plane Connector on by -running the following: - -```bash -up ctp connector uninstall -``` - -This command uninstalls the helm chart for the Control Plane Connector from an app -cluster. It moves any claims in the app cluster into the control plane -at the specified namespace. - -:::tip -Make sure your kubeconfig's current context is pointed at the app cluster where -you want to uninstall Control Plane Connector from. -::: - - - - -It's recommended you create a values file called `connector-values.yaml` and -provide the following below. Select the tab according to which environment your -control plane is running in. - - - - - - -```yaml -upbound: - # This is your org account in Upbound e.g. the name displayed after executing `up org list` - account: - # This is a personal access token generated in the Upbound Console - token: - -spaces: - # Upbound GCP US-West-1 upbound-gcp-us-west-1.spaces.upbound.io - # Upbound AWS US-East-1 upbound-aws-us-east-1.spaces.upbound.io - # Upbound GCP US-Central-1 upbound-gcp-us-central-1.spaces.upbound.io - host: "" - insecureSkipTLSVerify: true - controlPlane: - # The name of the control plane you want the Connector to attach to - name: - # The control plane group the control plane resides in - group: - # The namespace within the control plane to sync claims from the app cluster to. - # NOTE: This must be created before you install the connector. - claimNamespace: -``` - - - - -Create a [kubeconfig][kubeconfig-1] for the -control plane. Write it to a secret in the cluster where you plan to -install the Control Plane Connector to. Reference this secret in the -`spaces.controlPlane.kubeconfigSecret` field below. - -```yaml -spaces: - controlPlane: - # The namespace within the control plane to sync claims from the app cluster to. NOTE: This must be created before you install the connector. - claimNamespace: - kubeconfigSecret: - name: my-controlplane-kubeconfig - key: kubeconfig -``` - - - - - - -Provide the values file above when you `helm install` the Control Plane Connector: - - -```bash -helm install --wait mcp-connector oci://xpkg.upbound.io/spaces-artifacts/mcp-connector -n kube-system -f connector-values.yaml -``` -:::tip -Create an API token from the Upbound user account settings page in the console by following [these instructions][these-instructions]. -::: - -### Uninstall - -You can uninstall Control Plane Connector with Helm by running the following: - -```bash -helm uninstall mcp-connector -``` - - - - - -### Example usage - -This example creates a control plane using [Configuration -EKS][configuration-eks]. `KubernetesCluster` is -available as a claim API in your control plane. The following is [an -example][an-example] -object you can create in your control plane. - -```yaml -apiVersion: k8s.starter.org/v1alpha1 -kind: KubernetesCluster -metadata: - name: my-cluster - namespace: default -spec: - id: my-cluster - parameters: - nodes: - count: 3 - size: small - services: - operators: - prometheus: - version: "34.5.1" - writeConnectionSecretToRef: - name: my-cluster-kubeconfig -``` - -After connecting your Kubernetes app cluster to the control plane, you -can create the `KubernetesCluster` object in your app cluster. Although your -local cluster has an Object, the actual resources is in your managed control -plane inside Upbound. - -```bash {copy-lines="3"} -# Applying the claim YAML above. -# kubectl is set up to talk with your Kubernetes cluster. -kubectl apply -f claim.yaml - - -kubectl get claim -A -NAME SYNCED READY CONNECTION-SECRET AGE -my-cluster True True my-cluster-kubeconfig 2m -``` - -Once Kubernetes creates the object, view the console to see your object. - -![Claim by connector in console](/img/ClaimInConsole.png) - -You can interact with the object through your cluster just as if it -lives in your cluster. - -### Migration to control planes - -This guide details the migration of a Crossplane installation to Upbound-managed -control planes using the Control Plane Connector to manage claims on an application -cluster. - -![migration flow application cluster to control plane](/img/ConnectorMigration.png) - -#### Export all resources - -Before proceeding, ensure that you have set the correct kubecontext for your application -cluster. - -```bash -up controlplane migration export --pause-before-export --output=my-export.tar.gz --yes -``` - -This command performs the following: -- Pauses all claim, composite, and managed resources before export. -- Scans the control plane for resource types. -- Exports Crossplane and native resources. -- Archives the exported state into `my-export.tar.gz`. - -Example output: -```bash -Exporting control plane state... - ✓ Pausing all claim resources before export... 1 resources paused! ⏸️ - ✓ Pausing all composite resources before export... 7 resources paused! ⏸️ - ✓ Pausing all managed resources before export... 34 resources paused! ⏸️ - ✓ Scanning control plane for types to export... 231 types found! 👀 - ✓ Exporting 231 Crossplane resources...125 resources exported! 📤 - ✓ Exporting 3 native resources...19 resources exported! 📤 ✓ Archiving exported state... archived to "my-export.tar.gz"! 📦 - -Successfully exported control plane state! -``` - -#### Import all resources - -The system restores the target control plane with the exported -resources, which serves as the destination for the Control Plane Connector. - - -Log into Upbound and select the correct context: - -```bash -up login -up ctx -up ctp create ctp-a -``` - -Output: -```bash -ctp-a created -``` - -Verify that the Crossplane version on both the application cluster and the new managed -control plane matches the core Crossplane version. - -Use the following command to import the resources: -```bash -up controlplane migration import -i my-export.tar.gz \ - --unpause-after-import \ - --mcp-connector-cluster-id=my-appcluster \ - --mcp-connector-claim-namespace=my-appcluster -``` - -This command: -- Note: `--mcp-connector-cluster-id` needs to be unique per application cluster -- Note: `--mcp-connector-claim-namespace` is the namespace the system creates - during the import -- Restores base resources -- Waits for XRDs and packages to establish -- Imports Claims, XRs resources -- Finalizes the import and resumes managed resources - -Example output: -```bash -Importing control plane state... - ✓ Reading state from the archive... Done! 👀 - ✓ Importing base resources... 56 resources imported!📥 - ✓ Waiting for XRDs... Established! ⏳ - ✓ Waiting for Packages... Installed and Healthy! ⏳ - ✓ Importing remaining resources... 88 resources imported! 📥 - ✓ Finalizing import... Done! 🎉 - ✓ Unpausing managed resources ... Done! ▶️ - -fully imported control plane state! -``` - -Verify Imported Claims - - -The Control Plane Connector renames all claims and adds additional labels to them. - -```bash -kubectl get claim -A -``` - -Example output: -```bash -NAMESPACE NAME SYNCED READY CONNECTION-SECRET AGE -my-appcluster cluster.aws.platformref.upbound.io/claim-e708ff592b974f51 True True platform-ref-aws-kubeconfig 3m17s -``` - -Inspect the labels: -```bash -kubectl get -n my-appcluster cluster.aws.platformref.upbound.io/claim-e708ff592b974f51 -o yaml | yq .metadata.labels -``` - -Example output: -```bash -mcp-connector.upbound.io/app-cluster: my-appcluster -mcp-connector.upbound.io/app-namespace: default -mcp-connector.upbound.io/app-resource-name: example -``` - -#### Cleanup the app cluster - -Remove all Crossplane-related resources from the application cluster, including: - -- Managed Resources -- Claims -- Compositions -- XRDs -- Packages (Functions, Configurations, Providers) -- Crossplane and all associated CRDs - - -#### Install Control Plane Connector - - -Follow the preceding installation guide and configure the `connector-values.yaml`: - -```yaml -# NOTE: clusterID needs to match --mcp-connector-cluster-id used in the import on the managed control Plane -clusterID: my-appcluster -upbound: - account: - token: - -spaces: - host: "" - insecureSkipTLSVerify: true - controlPlane: - name: - group: - # NOTE: This is the --mcp-connector-claim-namespace used during the import to the control plane - claimNamespace: -``` -Once the Control Plane Connector installs, verify that resources exist in the application -cluster: - -```bash -kubectl api-resources | grep platform -``` - -Example output: -```bash -awslbcontrollers aws.platform.upbound.io/v1alpha1 true AWSLBController -podidentities aws.platform.upbound.io/v1alpha1 true PodIdentity -sqlinstances aws.platform.upbound.io/v1alpha1 true SQLInstance -clusters aws.platformref.upbound.io/v1alpha1 true Cluster -osss observe.platform.upbound.io/v1alpha1 true Oss -apps platform.upbound.io/v1alpha1 true App -``` - -Restore claims from the control plane to the application cluster: - -```bash -kubectl get claim -A -``` - -Example output: -```bash -NAMESPACE NAME SYNCED READY CONNECTION-SECRET AGE -default cluster.aws.platformref.upbound.io/example True True platform-ref-aws-kubeconfig 127m -``` - -With this guide, you migrated your Crossplane installation to -Upbound-control planes. This ensures seamless integration with your -application cluster using the Control Plane Connector. - -### Connect multiple app clusters to a control plane - -Claims are store in a unique namespace in the Upbound control plane. -Every cluster creates a new control plane namespace. - -![Multi-cluster architecture with control plane connector](/img/ConnectorMulticlusterArch.png) - -There's no limit on the number of clusters connected to a single control plane. -Control plane operators can see all their infrastructure in a central control -plane. - -Without using control planes and Control Plane Connector, users have to install -Crossplane and providers for cluster. Each cluster requires configuration for -providers with necessary credentials. With a single control plane where multiple -clusters connected through Upbound tokens, you don't need to give out any cloud -credentials to the clusters. - - -[kubeconfig]: /manuals/cli/howtos/context-config/#generate-a-kubeconfig-for-a-control-plane-in-a-group -[kubeconfig-1]:/spaces/concepts/control-planes/#connect-directly-to-your-control-plane -[these-instructions]:/manuals/console/#create-a-personal-access-token -[kubernetes-api-aggregationlayer]: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/ -[configuration-eks]: https://github.com/upbound/configuration-eks -[an-example]: https://github.com/upbound/configuration-eks/blob/9f86b6d/.up/examples/cluster.yaml diff --git a/spaces_versioned_docs/version-v1.13/howtos/debugging-a-ctp.md b/spaces_versioned_docs/version-v1.13/howtos/debugging-a-ctp.md deleted file mode 100644 index 521271e40..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/debugging-a-ctp.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -title: Debugging issues on a control plane -sidebar_position: 70 -description: A guide for how to debug resources on a control plane running in Upbound. ---- - -This guide provides troubleshooting guidance for how to identify and fix issues on a control plane. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). Debugging methods are consistent across versions. - -For related control plane specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version-specific features, see the . -::: - -## Start from Upbound Console - - -The Upbound [Console][console] has a built-in control plane explorer experience -that surfaces status and events for the resources on your control plane. The -explorer is claim-based. Resources in this view exist only if they exist in the -reference chain originating from a claim. This view is a helpful starting point -if you are attempting to debug an issue originating from a claim. - -:::tip -If you directly create Crossplane Managed Resources (`MR`s) or Composite -Resources (`XR`s), they won't render in the explorer. -::: - -### Example - -The example below uses the control plane explorer view to inspect why a claim for an EKS Cluster isn't healthy. - -#### Check the health status of claims - -From the API type card, two claims branching from of it: one shows a healthy green icon, while the other shows an unhealthy red icon. - -![Use control plane explorer view to see status of claims](/img/debug-overview.png) - -Select `More details` on the unhealthy claim card and Upbound shows details for the claim. - -![Use control plane explorer view to see details of claims](/img/debug-claim-more-details.png) - -Looking at the three events for this claim: - -- **ConfigureCompositeResource**: this event indicates Upbound created the claimed Composite Resource (`XR`). - -- **BindCompositeResource**: this indicates the Composite Resource (`XR`) that's being "claimed" isn't ready yet. A claim doesn't show `HEALTHY` until the XR it references is ready. - -- **ConfigureCompositeResource**: the error saying, `cannot apply composite resource...the object has been modified; please apply your changes to the latest version and try again` is a generic event from Crossplane resources. It's safe to ignore this error. - -Next, look at the `status` field of the rendered YAML for the resource. - -![Use control plane explorer view to see status details of claims](/img/debug-claim-status.png) - -The status reports a similar message as the event stream: this claim is waiting for a Composite Resource to be ready. Based on this, investigate the Composite Resource referenced by this claim next. - -#### Check the health status of the Composite Resource - - -The control plane explorer only shows the claim cards by default. Selecting the claim card renders the rest of the Crossplane resource tree associated with the selected claim. - - -The previous claim expands into this screenshot: - -![Use control plane explorer view to expand tree of claim](/img/debug-claim-expansion.png) - -This renders the XR referenced by the claim (along with all its references). You can see the XR is showing the same unhealthy status icon in its card. Notice the XR has itself two nested XRs. One of the nested XRs shows a healthy green icon on its card, while the other shows an unhealthy red icon. Like the claim, a Composite Resource doesn't show healthy until all referenced resources also show healthy. - -#### Inspecting Managed Resources - -Select `more details` to inspect one of the unhealthy Managed Resources shows the following: - -![Use control plane explorer view to view events for an MR](/img/debug-mr-event.png) - -This event reveals it's unhealthy because it's waiting on a reference to another Managed Resource. Searching the rendered YAML of the MR for this resource shows the following: - -![Use control plane explorer view to view status for an MR](/img/debug-mr-status.png) - -The rendered YAML shows this MR is referencing a sibling MR that shares the same controller. The same parent XR created both of these managed resources. Inspect the sibling MR to see what its status is. - -![Use control plane explorer view to view status for a sibling MR](/img/debug-mr-dependency-status.png) - -The sibling MR event stream shows the Provider processed the resource create request. Ignore the `CannotInitalizeManagedResrouce` event. EKS clusters can take 15 minutes or more to provision in AWS. The root cause is everything is fine -- all the resources are still provisioning. Waiting longer and then looking at the control plane explorer again, shows all resources are healthy. reference, below is an example status field for a resource that's healthy and provisioned. - -```yaml -... -status: - atProvider: - id: team-b-app-cluster-bhwfb-hwtgs-20230403135452772300000008 - conditions: - - lastTransitionTime: '2023-04-03T13:56:35Z' - reason: Available - status: 'True' - type: Ready - - lastTransitionTime: '2023-04-03T13:54:02Z' - reason: ReconcileSuccess - status: 'True' - type: Synced - - lastTransitionTime: '2023-04-03T13:54:53Z' - reason: Success - status: 'True' - type: LastAsyncOperation - - lastTransitionTime: '2023-04-03T13:54:53Z' - reason: Finished - status: 'True' - type: AsyncOperation -``` - -### Control plane explorer limitations - -The control plane explorer view is currently designed around claims (`XC`s). The control plane explorer doesn't inspect other Crossplane resources. To inspect other Crossplane resources, use the `up` CLI. - -Some examples of Crossplane resources that require the `up` CLI - -- Managed Resources that aren't associated with a claim -- Composite Resources that aren't associated with a claim -- The status of _deleting_ resources -- ProviderConfigs -- Provider events - -## Use direct CLI access - -If your preference is to use a terminal instead of a GUI, Upbound supports direct access to the API server of the control plane. Use [`up ctx`][up-ctx] to connect directly to your control plane. - - -[console]: /manuals/console/upbound-console -[up-ctx]: /reference/cli-reference diff --git a/spaces_versioned_docs/version-v1.13/howtos/managed-service.md b/spaces_versioned_docs/version-v1.13/howtos/managed-service.md deleted file mode 100644 index 40b983a76..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/managed-service.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: Managed Upbound control planes -description: "Learn about the managed service capabilities of a Space" -sidebar_position: 10 ---- - -Control planes in Upbound are fully isolated [Upbound Crossplane][uxp] instances -that Upbound manages for you. This means: - -- the underlying lifecycle of infrastructure (compute, memory, and storage) required to power your instance. -- scaling of the infrastructure. -- the maintenance of the core Upbound Crossplane components that make up a control plane. - -This lets users focus on building their APIs and operating their control planes, -while Upbound handles the rest. Each control plane has its own dedicated API -server connecting users to their control plane. - -## Learn about Upbound control planes - -Read the [concept][ctp-concept] documentation to learn about Upbound control planes. - -[uxp]: /manuals/uxp/overview -[ctp-concept]: /spaces/concepts/control-planes \ No newline at end of file diff --git a/spaces_versioned_docs/version-v1.13/howtos/mcp-connector-guide.md b/spaces_versioned_docs/version-v1.13/howtos/mcp-connector-guide.md deleted file mode 100644 index 8a3866d07..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/mcp-connector-guide.md +++ /dev/null @@ -1,169 +0,0 @@ ---- -title: Consume control plane APIs in an app cluster with control plane connector -sidebar_position: 99 -description: A tutorial to configure a Space with Argo to declaratively create and - manage control planes ---- - -In this tutorial, you learn how to configure a Kubernetes app cluster to communicate with a control plane in an Upbound self-hosted Space. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Control plane connector functionality is consistent across versions. - -For related API specifications and resources, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . -::: - -The [control plane connector][control-plane-connector] bridges your Kubernetes application clusters---running outside of Upbound--to your control planes running in Upbound. This allows you to interact with your control plane's API right from the app cluster. The claim APIs you define via `CompositeResourceDefinitions` are available alongside Kubernetes workload APIs like `Pod`. In effect, control plane connector provides the same experience as a locally installed Crossplane. - -## Prerequisites - -To complete this tutorial, you need the following: - -- Have already deployed an Upbound Space. -- Have already deployed an Kubernetes cluster (referred to as `app cluster`). - -## Create a control plane - -Create a new control plane in your self-hosted Space. Run the following command in a terminal: - -```bash -up ctp create my-control-plane -``` - -Once the control plane is ready, connect to it. - -```bash -up ctp connect my-control-plane -``` - -For convenience, install a an Upbound [platform reference Configuration][platform-reference-configuration] from the marketplace. production scenarios, replace this with your own Crossplane Configurations or compositions. - -```bash -up ctp configuration install xpkg.upbound.io/upbound/platform-ref-aws:v1.4.0 -``` - -## Fetch the control plane's connection details - -Run the following command in a terminal: - -```shell -kubectl get secret kubeconfig-my-control-plane -n default -o jsonpath='{.data.kubeconfig}' | base64 -d > kubeconfig-my-control-plane.yaml -``` - -This command saves the kubeconfig for the control plane to a file in your working directory. - -## Install control plane connector in your app cluster - -Switch contexts to your Kubernetes app cluster. To install the control plane connector in your app cluster, you must first provide a secret containing your control plane's kubeconfig at install-time. Run the following command in a terminal: - -:::important -Make sure the following commands are executed against your **app cluster**, not your control plane. -::: - -```bash -kubectl create secret generic kubeconfig-my-control-plane -n kube-system --from-file=kubeconfig=./kubeconfig-my-control-plane.yaml -``` - -Set the environment variable below to configure which namespace _in your control plane_ you wish to sync the app cluster's claims to. - -```shell -export CONNECTOR_CTP_NAMESPACE=app-cluster-1 -``` - -Install the Control Plane Connector in the app cluster and point it to your control plane. - -```bash -up ctp connector install my-control-plane $CONNECTOR_CTP_NAMESPACE --control-plane-secret=kubeconfig-my-control-plane -``` - -## Inspect your app cluster - -After you install Control Plane Connector in the app cluster, you can now see APIs which live on the control plane. You can confirm this is the case by running the following command on your app cluster: - -```bash {copy-lines="1"} -kubectl api-resources | grep upbound - -# The output should look like this: -sqlinstances aws.platform.upbound.io/v1alpha1 true SQLInstance -clusters aws.platformref.upbound.io/v1alpha1 true Cluster -osss observe.platform.upbound.io/v1alpha1 true Oss -apps platform.upbound.io/v1alpha1 true App -``` - -## Claim a database instance on your app cluster - -Create a database claim against the `SQLInstance` API and observe resources get created by your control plane. Apply the following resources to your app cluster: - -```yaml -cat < --output - ``` - - The command exports your existing Crossplane control plane configuration/state into an archive file. - -::: note -By default, the export command doesn't make any changes to your existing Crossplane control plane state, leaving it intact. Use the `--pause-before-export` flag to pause the reconciliation on managed resources before exporting the archive file. - -This safety mechanism ensures the control plane you migrate state to doesn't assume ownership of resources before you're ready. -::: - -2. Use the control plane [create command][create-command] to create a managed -control plane in Upbound: - - ```bash - up controlplane create my-controlplane - ``` - -3. Use [`up ctx`][up-ctx] to connect to the control plane created in the previous step: - - ```bash - up ctx "///my-controlplane" - ``` - - The command configures your local `kubeconfig` to connect to the control plane. - -4. Run the following command to import the archive file into the control plane: - - ```bash - up controlplane migration import --input - ``` - -:::note -By default, the import command leaves the control plane in an inactive state by pausing the reconciliation on managed -resources. This pause gives you an opportunity to review the imported configuration/state before activating the control plane. -Use the `--unpause-after-import` flag to change the default behavior and activate the control plane immediately after -importing the archive file. -::: - - - -5. Review and validate the imported configuration/state. When you are ready, activate your managed - control plane by running the following command: - - ```bash - kubectl annotate managed --all crossplane.io/paused- - ``` - - At this point, you can delete the source Crossplane control plane. - -## CLI options - -### Filtering - -The migration tool captures the state of a Control Plane. The only filtering -supported is Kubernetes namespace and Kubernetes resource Type filtering. - -You can exclude namespaces using the `--exclude-namespaces` CLI option. This can prevent the CLI from including unwanted resources in the export. - -```bash ---exclude-namespaces=kube-system,kube-public,kube-node-lease,local-path-storage,... - -# A list of specific namespaces to exclude from the export. Defaults to 'kube-system', 'kube-public','kube-node-lease', and 'local-path-storage'. -``` - -You can exclude Kubernetes Resource types by using the `--exclude-resources` CLI option: - -```bash ---exclude-resources=EXCLUDE-RESOURCES,... - -# A list of resource types to exclude from the export in "resource.group" format. No resources are excluded by default. -``` - -For example, here's an example for excluding the CRDs installed by Crossplane functions (since they're not needed): - -```bash -up controlplane migration export \ - --exclude-resources=gotemplates.gotemplating.fn.crossplane.io,kclinputs.template.fn.crossplane.io -``` - -:::warning -You must specify resource names in lowercase "resource.group" format (for example, `gotemplates.gotemplating.fn.crossplane.io`). Using only the resource kind (for example, `GoTemplate`) isn't supported. -::: - - -:::tip Function Input CRDs - -Exclude function input CRDs (`inputs.template.fn.crossplane.io`, `resources.pt.fn.crossplane.io`, `gotemplates.gotemplating.fn.crossplane.io`, `kclinputs.template.fn.crossplane.io`) from migration exports. Upbound automatically recreates these resources during import. Function input CRDs typically have owner references to function packages and may have restricted RBAC access. Upbound installs these CRDs during the import when function packages are restored. - -::: - - -After export, users can also change the archive file to only include necessary resources. - -### Export non-Crossplane resources - -Use the `--include-extra-resources=` CLI option to select other CRD types to include in the export. - -### Set the kubecontext - -Currently `--context` isn't supported in the migration CLI. You should be able to use the `--kubeconfig` CLI option to use a file that's set to the correct context. example: - -```bash -up controlplane migration export --kubeconfig -``` - -Use this in tandem with `up ctx` to export a control plane's kubeconfig: - -```bash -up ctx --kubeconfig ~/.kube/config - -# To list the current contet -up ctx . --kubeconfig ~/.kube/config -``` - -## Export archive - -The migration CLI exports an archive upon successful completion. Below is an example export of a control plane that excludes several CRD types and skips the confirmation prompt. A file gets written to the working directory, unless you select another output file: - -
- -View the example export - -```bash -$ up controlplane migration export --exclude-resources=gotemplates.gotemplating.fn.crossplane.io,kclinputs.template.fn.crossplane.io --yes -Exporting control plane state... -✓ Scanning control plane for types to export... 121 types found! 👀 -✓ Exporting 121 Crossplane resources...60 resources exported! 📤 -✓ Exporting 3 native resources...8 resources exported! 📤 -✓ Archiving exported state... archived to "xp-state.tar.gz"! 📦 -``` - -
- - -When an export occurs, a file named `xp-state.tar.gz` by default gets created in the working directory. You can unzip the file and all the contents of the export are all text YAML files. - -- Each CRD (for example `vpcs.ec2.aws.upbound.io`) gets its own directory -which contains: - - A `metadata.yaml` file that contains Kubernetes Object Metadata - - A list of Kubernetes Categories the resource belongs to -- A `cluster` directory that contains YAML manifests for all resources provisioned -using the CRD. - -Sample contents for a Cluster with a single `XNetwork` Composite from -[configuration-aws-network][configuration-aws-network] is show below: - - -
- -View the example cluster content - -```bash -├── compositionrevisions.apiextensions.crossplane.io -│ ├── cluster -│ │ ├── kcl.xnetworks.aws.platform.upbound.io-4ca6a8a.yaml -│ │ └── xnetworks.aws.platform.upbound.io-9859a34.yaml -│ └── metadata.yaml -├── configurations.pkg.crossplane.io -│ ├── cluster -│ │ └── configuration-aws-network.yaml -│ └── metadata.yaml -├── deploymentruntimeconfigs.pkg.crossplane.io -│ ├── cluster -│ │ └── default.yaml -│ └── metadata.yaml -├── export.yaml -├── functions.pkg.crossplane.io -│ ├── cluster -│ │ ├── crossplane-contrib-function-auto-ready.yaml -│ │ ├── crossplane-contrib-function-go-templating.yaml -│ │ └── crossplane-contrib-function-kcl.yaml -│ └── metadata.yaml -├── internetgateways.ec2.aws.upbound.io -│ ├── cluster -│ │ └── borrelli-backup-test-xgl4q.yaml -│ └── metadata.yaml -├── mainroutetableassociations.ec2.aws.upbound.io -│ ├── cluster -│ │ └── borrelli-backup-test-t2qh7.yaml -│ └── metadata.yaml -├── namespaces -│ └── cluster -│ ├── crossplane-system.yaml -│ ├── default.yaml -│ └── upbound-system.yaml -├── providerconfigs.aws.upbound.io -│ ├── cluster -│ │ └── default.yaml -│ └── metadata.yaml -├── providerconfigusages.aws.upbound.io -│ ├── cluster -│ │ ├── 0a2a3ec6-ef13-45f9-9cf0-63af7f4a6b6b.yaml -...redacted -│ │ └── f7092b0f-3a78-4bfe-82c8-57e5085a9b11.yaml -│ └── metadata.yaml -├── providers.pkg.crossplane.io -│ ├── cluster -│ │ ├── upbound-provider-aws-ec2.yaml -│ │ └── upbound-provider-family-aws.yaml -│ └── metadata.yaml -├── routes.ec2.aws.upbound.io -│ ├── cluster -│ │ └── borrelli-backup-test-dt9cj.yaml -│ └── metadata.yaml -├── routetableassociations.ec2.aws.upbound.io -│ ├── cluster -│ │ ├── borrelli-backup-test-mr2sd.yaml -│ │ ├── borrelli-backup-test-ngq5h.yaml -│ │ ├── borrelli-backup-test-nrkgg.yaml -│ │ └── borrelli-backup-test-wq752.yaml -│ └── metadata.yaml -├── routetables.ec2.aws.upbound.io -│ ├── cluster -│ │ └── borrelli-backup-test-dv4mb.yaml -│ └── metadata.yaml -├── secrets -│ └── namespaces -│ ├── crossplane-system -│ │ ├── cert-token-signing-gateway-pub.yaml -│ │ ├── mxp-hostcluster-certs.yaml -│ │ ├── package-pull-secret.yaml -│ │ └── xgql-tls.yaml -│ └── upbound-system -│ └── aws-creds.yaml -├── securitygrouprules.ec2.aws.upbound.io -│ ├── cluster -│ │ ├── borrelli-backup-test-472f4.yaml -│ │ └── borrelli-backup-test-qftmw.yaml -│ └── metadata.yaml -├── securitygroups.ec2.aws.upbound.io -│ ├── cluster -│ │ └── borrelli-backup-test-w5jch.yaml -│ └── metadata.yaml -├── storeconfigs.secrets.crossplane.io -│ ├── cluster -│ │ └── default.yaml -│ └── metadata.yaml -├── subnets.ec2.aws.upbound.io -│ ├── cluster -│ │ ├── borrelli-backup-test-8btj6.yaml -│ │ ├── borrelli-backup-test-gbmrm.yaml -│ │ ├── borrelli-backup-test-m7kh7.yaml -│ │ └── borrelli-backup-test-nttt5.yaml -│ └── metadata.yaml -├── vpcs.ec2.aws.upbound.io -│ ├── cluster -│ │ └── borrelli-backup-test-7hwgh.yaml -│ └── metadata.yaml -└── xnetworks.aws.platform.upbound.io -├── cluster -│ └── borrelli-backup-test.yaml -└── metadata.yaml -43 directories, 87 files -``` - -
- - -The `export.yaml` file contains metadata about the export, including the configuration of the export, Crossplane information, and what's included in the export bundle. - -
- -View the export - -```yaml -version: v1alpha1 -exportedAt: 2025-01-06T17:39:53.173222Z -options: - excludedNamespaces: - - kube-system - - kube-public - - kube-node-lease - - local-path-storage - includedResources: - - namespaces - - configmaps - - secrets - excludedResources: - - gotemplates.gotemplating.fn.crossplane.io - - kclinputs.template.fn.crossplane.io -crossplane: - distribution: universal-crossplane - namespace: crossplane-system - version: 1.17.3-up.1 - featureFlags: - - --enable-provider-identity - - --enable-environment-configs - - --enable-composition-functions - - --enable-usages -stats: - total: 68 - nativeResources: - configmaps: 0 - namespaces: 3 - secrets: 5 - customResources: - amicopies.ec2.aws.upbound.io: 0 - amilaunchpermissions.ec2.aws.upbound.io: 0 - amis.ec2.aws.upbound.io: 0 - availabilityzonegroups.ec2.aws.upbound.io: 0 - capacityreservations.ec2.aws.upbound.io: 0 - carriergateways.ec2.aws.upbound.io: 0 - compositeresourcedefinitions.apiextensions.crossplane.io: 0 - compositionrevisions.apiextensions.crossplane.io: 2 - compositions.apiextensions.crossplane.io: 0 - configurationrevisions.pkg.crossplane.io: 0 - configurations.pkg.crossplane.io: 1 -...redacted -``` - -
- -### Skipped resources - -Along with to the resources excluded via CLI options, the following resources aren't -included in the backup: - -- The `kube-root-ca.crt` ConfigMap, since this is cluster-specific -- Resources directly managed via Helm (ArgoCD's helm implementation, which templates -Helm resources and then applies them, get included in the backup). The migration creates the exclusion list by looking for: - - Any Resource with the label `"app.kubernetes.io/managed-by" == "Helm"` - - Kubernetes Secrets with the label prefix `helm.sh/release`. example, `helm.sh/release.v1` -- Resources installed via a Crossplane package. These have an `ownerReference` with -a prefix `pkg.crossplane.io`. The expectation is that during import, the Crossplane Package Manager bears responsibility for installing the resources. -- Crossplane Locks: Any `Lock.pkg.crossplane.io` resource isn't included in the -export. - -## Restore - -The following is an example of a successful import run. At the end of the import, all Managed Resources are in a paused state. - -
- -View the migration import - -```bash -$ up controlplane migration import -Importing control plane state... -✓ Reading state from the archive... Done! 👀 -✓ Importing base resources... 18 resources imported! 📥 -✓ Waiting for XRDs... Established! ⏳ -✓ Waiting for Packages... Installed and Healthy! ⏳ -✓ Importing remaining resources... 50 resources imported! 📥 -✓ Finalizing import... Done! 🎉 -``` - -
- -Your scenario may involve migrating resources which already exist through other automation on the platform. When executing an import in these circumstances, the importer applies the new manifests to the cluster. If the resource already exists, the restore sets fields to what's in the backup. - -The importer restores all resources in the export archive. Managed Resources get imported with the `crossplane.io/paused: "true"` annotation set. Use the `--unpause-after-import` CLI argument to automatically un-pause resources that got -paused during backup, or remove the annotation manually. - -### Restore order - -The importer restores based on Kubernetes types. The restore order doesn't include parent/child relationships. - -Because Crossplane Composites create new Managed Resources if not present on the cluster, all -Claims, Composites and Managed Resources get imported in a paused state. You can un-pause them after the restore completes. - -The first step of import is installing Base Resources into the cluster. These resources (such has -packages and XRDs) must be ready before proceeding with the import. -Base Resources are: - -- Kubernetes Resources - - ConfigMaps - - Namespaces - - Secrets -- Crossplane Resources - - ControllerConfigs: `controllerconfigs.pkg.crossplane.io` - - DeploymentRuntimeConfigs: `deploymentruntimeconfigs.pkg.crossplane.io` - - StoreConfigs: `storeconfigs.secrets.crossplane.io` -- Crossplane Packages - - Providers: `providers.pkg.crossplane.io` - - Functions: `functions.pkg.crossplane.io` - - Configurations: `configurations.pkg.crossplane.io` - -Restore waits for the base resources to be `Ready` before moving on to the next step. Next, restore walks through the archive and restores all the manifests present. - -During import, the `crossplane.io/paused` annotation gets added to Managed Resources, Claims -and Composites. - -To manually un-pause managed resources after an import, remove the annotation by running: - -```bash -kubectl annotate managed --all crossplane.io/paused- -``` - -You can also run import again with the `--unpause-after-import` flag to remove the annotations. - -```bash -up controlplane migration import --unpause-after-import -``` - -### Restoring resource status - -The importer applies the status of all resources during import. The importer determines if the CRD version has a status field defined based on the stored CRD version. - - -[cli-command]: /reference/cli-reference -[up-cli]: /reference/cli-reference -[up-cli-1]: /manuals/cli/overview -[create-command]: /reference/cli-reference -[up-ctx]: /reference/cli-reference -[configuration-aws-network]: https://marketplace.upbound.io/configurations/upbound/configuration-aws-network diff --git a/spaces_versioned_docs/version-v1.13/howtos/observability.md b/spaces_versioned_docs/version-v1.13/howtos/observability.md deleted file mode 100644 index 8fc5c3278..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/observability.md +++ /dev/null @@ -1,395 +0,0 @@ ---- -title: Observability -sidebar_position: 50 -description: A guide for how to use the integrated observability pipeline feature - in a Space. -plan: "enterprise" ---- - - - -This guide explains how to configure observability in Upbound Spaces. Upbound -provides integrated observability features built on -[OpenTelemetry][opentelemetry] to collect, process, and export logs, metrics, -and traces. - -Upbound Spaces offers two levels of observability: - -1. **Space-level observability** - Observes the cluster infrastructure where Spaces software is installed (Self-Hosted only) -2. **Control plane observability** - Observes workloads running within individual control planes - - - - - -:::info API Version Information & Version Selector -This guide applies to all supported versions (v1.9-v1.15+). Observability features evolved: - -- **v1.9-v1.12**: SharedTelemetryConfig available (alpha until v1.11) -- **v1.11+**: Observability promoted to stable with logs export support -- **v1.14+**: Both space-level and control-plane observability GA - -**View API Reference for Your Version**: -| Version | Status | Link | -|---------|--------|------| -| v1.15 | Latest | [Spaces API v1.15](../../../next/reference/apis/spaces-api/) | -| v1.14 | Supported | [Spaces API v1.14](../../../1.14/reference/apis/spaces-api/) | -| v1.13 | Supported | [Spaces API v1.13](../../../1.13/reference/apis/spaces-api/) | -| v1.12 | Supported | [Spaces API v1.12](../../../1.12/reference/apis/spaces-api/) | -| v1.11 | Supported | [Spaces API v1.11](../../../1.11/reference/apis/spaces-api/) | -| v1.10 | Limited | [Spaces API v1.10](../../../1.10/reference/apis/spaces-api/) | -| v1.9 | EOL | [Spaces API v1.9](../../../1.9/reference/apis/spaces-api/) | - -For version support policy and feature availability, see and . -::: - -:::important -**Space-level observability** (available since v1.6.0, GA in v1.14.0): -- Disabled by default -- Requires manual enablement and configuration -- Self-Hosted Spaces only - -**Control plane observability** (available since v1.13.0, GA in v1.14.0): -- Enabled by default -- No additional configuration required -::: - - - - -## Prerequisites - - -**Control plane observability** is enabled by default. No additional setup is -required. - - - -### Self-hosted Spaces - -1. **Enable the observability feature** when installing Spaces: - ```bash - up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - ... - --set "observability.enabled=true" - ``` - -Set `features.alpha.observability.enabled=true` instead if using Spaces version -before `v1.14.0`. - -2. **Install OpenTelemetry Operator** (required for Space-level observability): - ```bash - kubectl apply -f https://github.com/open-telemetry/opentelemetry-operator/releases/download/v0.116.0/opentelemetry-operator.yaml - ``` - - :::important - If running Spaces `v1.11` or later, use OpenTelemetry Operator `v0.110.0` or later due to breaking changes. - ::: - - -## Space-level Observability - -Space-level observability is only available for self-hosted Spaces and allows -administrators to observe the cluster infrastructure. - -### Configuration - -Configure Space-level observability using the `spacesCollector` value in your -Spaces Helm chart: - -```yaml -observability: - spacesCollector: - config: - exporters: - otlphttp: - endpoint: "" - headers: - api-key: YOUR_API_KEY - exportPipeline: - logs: - - otlphttp - metrics: - - otlphttp -``` - -This configuration exports metrics and logs from: - -- Crossplane installation -- Spaces infrastructure (controller, API, router, etc.) - -### Router metrics - -The Spaces router uses Envoy as a reverse proxy and automatically exposes -metrics when you enable Space-level observability. These metrics provide -visibility into: - -- Traffic routing to control planes and services -- Request status codes, timeouts, and retries -- Circuit breaker state preventing cascading failures -- Client connection patterns and request volume -- Request latency (P50, P95, P99) - -For more information about available metrics, example queries, and how to enable -this feature, see the [Space-level observability guide][space-level-o11y]. - -## Control plane observability - -Control plane observability collects telemetry data from workloads running -within individual control planes using `SharedTelemetryConfig` resources. - -The pipeline deploys [OpenTelemetry Collectors][opentelemetry-collectors] per -control plane, defined by a `SharedTelemetryConfig` at the group level. -Collectors pass data to external observability backends. - -:::important -From Spaces `v1.13` and beyond, telemetry only includes user-facing control -plane workloads (Crossplane, providers, functions). - -Self-hosted users can include system workloads (`api-server`, `etcd`) by setting -`observability.collectors.includeSystemTelemetry=true` in Helm. -::: - -:::important -Spaces validates `SharedTelemetryConfig` resources before applying them by -sending telemetry to configured exporters. self-hosted Spaces, ensure that -`spaces-controller` can reach the exporter endpoints. -::: - -### `SharedTelemetryConfig` - -`SharedTelemetryConfig` is a group-scoped custom resource that defines telemetry -configuration for control planes. - -#### New Relic example - -```yaml -apiVersion: observability.spaces.upbound.io/v1alpha1 -kind: SharedTelemetryConfig -metadata: - name: newrelic - namespace: default -spec: - controlPlaneSelector: - labelSelectors: - - matchLabels: - org: foo - exporters: - otlphttp: - endpoint: https://otlp.nr-data.net - headers: - api-key: YOUR_API_KEY - exportPipeline: - metrics: [otlphttp] - traces: [otlphttp] - logs: [otlphttp] -``` - -#### Datadog Example - -```yaml -apiVersion: observability.spaces.upbound.io/v1alpha1 -kind: SharedTelemetryConfig -metadata: - name: datadog - namespace: default -spec: - controlPlaneSelector: - labelSelectors: - - matchLabels: - org: foo - exporters: - datadog: - api: - site: ${DATADOG_SITE} - key: ${DATADOG_API_KEY} - exportPipeline: - metrics: [datadog] - traces: [datadog] - logs: [datadog] -``` - -### Control plane selection - -Use `spec.controlPlaneSelector` to specify which control planes should use the -telemetry configuration. - -#### Label-based selection - -```yaml -spec: - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production -``` - -#### Expression-based selection - -```yaml -spec: - controlPlaneSelector: - labelSelectors: - - matchExpressions: - - { key: environment, operator: In, values: [production,staging] } -``` - -#### Name-based selection - -```yaml -spec: - controlPlaneSelector: - names: - - controlplane-dev - - controlplane-staging - - controlplane-prod -``` - -### Manage sensitive data - -:::important -Available from Spaces `v1.10` -::: - -Store sensitive data in Kubernetes secrets and reference them in your -`SharedTelemetryConfig`: - -1. **Create the secret:** - ```bash - kubectl create secret generic sensitive -n \ - --from-literal=apiKey='YOUR_API_KEY' - ``` - -2. **Reference in SharedTelemetryConfig:** - ```yaml - apiVersion: observability.spaces.upbound.io/v1alpha1 - kind: SharedTelemetryConfig - metadata: - name: newrelic - spec: - configPatchSecretRefs: - - name: sensitive - key: apiKey - path: exporters.otlphttp.headers.api-key - controlPlaneSelector: - labelSelectors: - - matchLabels: - org: foo - exporters: - otlphttp: - endpoint: https://otlp.nr-data.net - headers: - api-key: dummy # Replaced by secret value - exportPipeline: - metrics: [otlphttp] - traces: [otlphttp] - logs: [otlphttp] - ``` - -### Telemetry processing - -:::important -Available from Spaces `v1.11` -::: - -Configure processing pipelines to transform telemetry data using the [transform -processor][transform-processor]. - -#### Add labels to metrics - -```yaml -spec: - processors: - transform: - error_mode: ignore - metric_statements: - - context: datapoint - statements: - - set(attributes["newLabel"], "someLabel") - processorPipeline: - metrics: [transform] -``` - -#### Remove labels - -From metrics: -```yaml -processors: - transform: - metric_statements: - - context: datapoint - statements: - - delete_key(attributes, "kubernetes_namespace") -``` - -From logs: -```yaml -processors: - transform: - log_statements: - - context: log - statements: - - delete_key(attributes, "log.file.name") -``` - -#### Modify log messages - -```yaml -processors: - transform: - log_statements: - - context: log - statements: - - set(attributes["original"], body) - - set(body, Concat(["log message:", body], " ")) -``` - -### Monitor status - -Check the status of your `SharedTelemetryConfig`: - -```bash -kubectl get stc -NAME SELECTED FAILED PROVISIONED AGE -datadog 1 0 1 63s -``` - -- `SELECTED`: Number of control planes selected -- `FAILED`: Number of control planes that failed provisioning -- `PROVISIONED`: Number of successfully running collectors - -For detailed status information: - -```bash -kubectl describe stc -``` - -## Supported exporters - -Both Space-level and control plane observability support: -- `datadog` -. Datadog integration -- `otlphttp` - General-purpose exporter (used by New Relic, among others) -- `debug` -. troubleshooting - -## Considerations - -- **Control plane conflicts**: Each control plane can only use one `SharedTelemetryConfig`. Multiple configs selecting the same control plane conflict. -- **Custom collector image**: Both Space-level and control plane observability use the same custom OpenTelemetry Collector image with supported exporters. -- **Resource scope**: `SharedTelemetryConfig` resources are group-scoped, allowing different telemetry configurations per group. - -For more advanced configuration options, review the [Helm chart -reference][helm-chart-reference] and [OpenTelemetry Transformation Language -documentation][opentelemetry-transformation-language]. - - -[opentelemetry]: https://opentelemetry.io/ -[opentelemetry-collectors]: https://opentelemetry.io/docs/collector/ -[opentelemetry-collector-configuration]: https://opentelemetry.io/docs/collector/configuration/#exporters -[opentelemetry-operator]: https://opentelemetry.io/docs/kubernetes/operator/ -[transform-processor]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/transformprocessor/README.md -[opentelemetry-transformation-language]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl -[space-level-o11y]: /spaces/howtos/self-hosted/space-observability -[helm-chart-reference]: /reference/helm-reference -[opentelemetry-transformation-language-functions]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/ottlfuncs/README.md -[opentelemetry-transformation-language-contexts]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts -[guide-on-ottl]: https://betterstack.com/community/guides/observability/ottl/#a-brief-overview-of-the-ottl-grammar diff --git a/spaces_versioned_docs/version-v1.13/howtos/query-api.md b/spaces_versioned_docs/version-v1.13/howtos/query-api.md deleted file mode 100644 index 78163de2f..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/query-api.md +++ /dev/null @@ -1,320 +0,0 @@ ---- -title: Query API -sidebar_position: 40 -description: Use the `up` CLI to query objects and resources ---- - - - - -Upbound's Query API allows users to inspect objects and resources within their control planes. The read-only `up alpha query` and `up alpha get` CLI commands allow you to gather information on your control planes in a fast and efficient package. These commands follow the [`kubectl` conventions][kubectl-conventions] for filtering, sorting, and retrieving information from your Space. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). Query API is available in Cloud Spaces since v1.6 and Self-Hosted since v1.8. - -For detailed deployment procedures, see [Deploy Query API infrastructure - Self-Hosted](./self-hosted/query-api.md). version compatibility details, see the . -::: - - - -## Using the Query API - - -The Query API allows you to retrieve control plane information faster than traditional `kubectl` commands. This feature lets you debug your Crossplane resources with the CLI or within the Upbound Console's enhanced management views. - -### Query within a single control plane - -Use the `up alpha get` command to retrieve information about objects within the current control plane context. This command uses the **Query** endpoint and targets the current control plane. - -To switch between control plane groups, use the [`up ctx` ][up-ctx] and change to your desired context with an interactive prompt or specify with your control plane path: - -```shell -up ctx /// -``` - -You can query within a single control plane with the [`up alpha get` command][up-alpha-get-command] to return more information about a given object within the current kubeconfig context. - -The `up alpha get` command can query resource types and aliases to return objects in your control plane. - -```shell -up alpha get managed -NAME READY SYNCED AGE -custom-account1-5bv5j-sa True True 15m -custom-cluster1-bq6dk-net True True 15m -custom-account1-5bv5j-subnet True True 15m -custom-cluster1-bq6dk-nodepool True True 15m -custom-cluster1-bq6dk-cluster True True 15m -custom-account1-5bv5j-net True True 15m -custom-cluster1-bq6dk-subnet True True 15m -custom-cluster1-bq6dk-sa True True 15m -``` - -The [`-A` flag][a-flag] queries for objects across all namespaces. - -```shell -up alpha get configmaps -A -NAMESPACE NAME AGE -crossplane-system uxp-versions-config 18m -crossplane-system universal-crossplane-config 18m -crossplane-system kube-root-ca.crt 18m -upbound-system kube-root-ca.crt 18m -kube-system kube-root-ca.crt 18m -kube-system coredns 18m -default kube-root-ca.crt 18m -kube-node-lease kube-root-ca.crt 18m -kube-public kube-root-ca.crt 18m -kube-system kube-apiserver-legacy-service-account-token-tracking 18m -kube-system extension-apiserver-authentication 18m -``` - -To query for [multiple resource types][multiple-resource-types], you can add the name or alias for the resource as a comma separated string. - -```shell -up alpha get providers,providerrevisions - -NAME HEALTHY REVISION IMAGE STATE DEP-FOUND DEP-INSTALLED AGE -providerrevision.pkg.crossplane.io/crossplane-contrib-provider-nop-ecc25c121431 True 1 xpkg.upbound.io/crossplane-contrib/provider-nop:v0.2.1 Active 18m -NAME INSTALLED HEALTHY PACKAGE AGE -provider.pkg.crossplane.io/crossplane-contrib-provider-nop True True xpkg.upbound.io/crossplane-contrib/provider-nop:v0.2.1 18m -``` - -### Query multiple control planes - -The [`up alpha query` command][up-alpha-query-command] returns a list of objects of any kind within all the control planes in your Space. This command uses either the **SpaceQuery** or **GroupQuery** endpoints depending on your query scope. The `-A` flag switches the query context from the group level to the entire Space - -The `up alpha query` command accepts resources and aliases to return objects across your group or Space. - -```shell -up alpha query crossplane - -NAME ESTABLISHED OFFERED AGE -compositeresourcedefinition.apiextensions.crossplane.io/xnetworks.platform.acme.co True True 20m -compositeresourcedefinition.apiextensions.crossplane.io/xaccountscaffolds.platform.acme.co True True 20m - - -NAME XR-KIND XR-APIVERSION AGE -composition.apiextensions.crossplane.io/xaccountscaffolds.platform.acme.co XAccountScaffold platform.acme.co/v1alpha1 20m -composition.apiextensions.crossplane.io/xnetworks.platform.acme.co XNetwork platform.acme.co/v1alpha1 20m - - -NAME REVISION XR-KIND XR-APIVERSION AGE -compositionrevision.apiextensions.crossplane.io/xaccountscaffolds.platform.acme.co-5ae9da5 1 XAccountScaffold platform.acme.co/v1alpha1 20m -compositionrevision.apiextensions.crossplane.io/xnetworks.platform.acme.co-414ce80 1 XNetwork platform.acme.co/v1alpha1 20m - -NAME READY SYNCED AGE -nopresource.nop.crossplane.io/custom-cluster1-bq6dk-subnet True True 19m -nopresource.nop.crossplane.io/custom-account1-5bv5j-net True True 19m - -## Output truncated... - -``` - - -The [`--sort-by` flag][sort-by-flag] allows you to return information to your specifications. You can construct your sort order in a JSONPath expression string or integer. - - -```shell -up alpha query crossplane -A --sort-by="{.metadata.name}" - -CONTROLPLANE NAME AGE -default/test deploymentruntimeconfig.pkg.crossplane.io/default 10m - -CONTROLPLANE NAME AGE TYPE DEFAULT-SCOPE -default/test storeconfig.secrets.crossplane.io/default 10m Kubernetes crossplane-system -``` - -To query for multiple resource types, you can add the name or alias for the resource as a comma separated string. - -```shell -up alpha query namespaces,configmaps -A - -CONTROLPLANE NAME AGE -default/test namespace/upbound-system 15m -default/test namespace/crossplane-system 15m -default/test namespace/kube-system 16m -default/test namespace/default 16m - -CONTROLPLANE NAMESPACE NAME AGE -default/test crossplane-system configmap/uxp-versions-config 15m -default/test crossplane-system configmap/universal-crossplane-config 15m -default/test crossplane-system configmap/kube-root-ca.crt 15m -default/test upbound-system configmap/kube-root-ca.crt 15m -default/test kube-system configmap/coredns 16m -default/test default configmap/kube-root-ca.crt 16m - -## Output truncated... - -``` - -The Query API also allows you to return resource types with specific [label columns][label-columns]. - -```shell -up alpha query composite -A --label-columns=crossplane.io/claim-namespace - -CONTROLPLANE NAME SYNCED READY COMPOSITION AGE CLAIM-NAMESPACE -query-api-test/test xeks.argo.discover.upbound.io/test-k7xbk False xeks.argo.discover.upbound.io 51d default - -CONTROLPLANE NAME EXTERNALDNS SYNCED READY COMPOSITION AGE CLAIM-NAMESPACE -spaces-clusters/controlplane-query-api-test-spaces-playground xexternaldns.externaldns.platform.upbound.io/spaces-cluster-0-xd8v2-lhnl7 6.34.2 True True xexternaldns.externaldns.platform.upbound.io 19d default -default/query-api-test xexternaldns.externaldns.platform.upbound.io/space-awg-kine-f7dxq-nkk2q 6.34.2 True True xexternaldns.externaldns.platform.upbound.io 55d default - -## Output truncated... - -``` - -### Query API request format - -The CLI can also return a version of your query request with the [`--debug` flag][debug-flag]. This flag returns the API spec request for your query. - -```shell -up alpha query composite -A -d - -apiVersion: query.spaces.upbound.io/v1alpha1 -kind: SpaceQuery -metadata: - creationTimestamp: null -spec: - cursor: true - filter: - categories: - - composite - controlPlane: {} - limit: 500 - objects: - controlPlane: true - table: {} - page: {} -``` - -For more complex queries, you can interact with the Query API like a Kubernetes-style API by creating a query and applying it with `kubectl`. - -The example below is a query for `claim` resources in every control plane from oldest to newest and returns specific information about those claims. - - -```yaml -apiVersion: query.spaces.upbound.io/v1alpha1 -kind: SpaceQuery -spec: - filter: - categories: - - claim - order: - - creationTimestamp: Asc - cursor: true - count: true - objects: - id: true - controlPlane: true - object: - kind: true - apiVersion: true - metadata: - name: true - uid: true - spec: - containers: - image: true -``` - - -The Query API is served by the Spaces API endpoint. You can use `up ctx` to -switch the kubectl context to the Spaces API ingress. After that, you can use -`kubectl create` and receive the `response` for your query parameters. - - -```shell -kubectl create -f spaces-query.yaml -o yaml -``` - -Your `response` should look similar to this example: - -```yaml {copy-lines="none"} -apiVersion: query.spaces.upbound.io/v1alpha1 -kind: SpaceQuery -metadata: - creationTimestamp: "2024-08-08T14:41:46Z" - name: default -response: - count: 3 - cursor: - next: "" - page: 0 - pageSize: 100 - position: 0 - objects: - - controlPlane: - name: query-api-test - namespace: default - id: default/query-api-test/823b2781-7e70-4d91-a6f0-ee8f455d67dc - object: - apiVersion: spaces.platform.upbound.io/v1alpha1 - kind: Space - metadata: - name: space-awg-kine - resourceVersion: "803868" - uid: 823b2781-7e70-4d91-a6f0-ee8f455d67dc - spec: {} - - controlPlane: - name: test-1 - namespace: test - id: test/test-1/08a573dd-851a-42cc-a600-b6f6ed37ee8d - object: - apiVersion: argo.discover.upbound.io/v1alpha1 - kind: EKS - metadata: - name: test-1 - resourceVersion: "4270320" - uid: 08a573dd-851a-42cc-a600-b6f6ed37ee8d - spec: {} - - controlPlane: - name: controlplane-query-api-test-spaces-playground - namespace: spaces-clusters - id: spaces-clusters/controlplane-query-api-test-spaces-playground/b5a6770f-1f85-4d09-8990-997c84bd4159 - object: - apiVersion: spaces.platform.upbound.io/v1alpha1 - kind: Space - metadata: - name: spaces-cluster-0 - resourceVersion: "1408337" - uid: b5a6770f-1f85-4d09-8990-997c84bd4159 - spec: {} -``` - - -## Query API Explorer - - - -import CrdDocViewer from '@site/src/components/CrdViewer'; - -### Query - -The Query resource allows you to query objects in a single control plane. - - - -### GroupQuery - -The GroupQuery resource allows you to query objects across a group of control planes. - - - -### SpaceQuery - -The SpaceQuery resource allows you to query objects across all control planes in a space. - - - - - - -[documentation]: /spaces/howtos/self-hosted/query-api -[up-ctx]: /reference/cli-reference -[up-alpha-get-command]: /reference/cli-reference -[a-flag]: /reference/cli-reference -[multiple-resource-types]: /reference/cli-reference -[up-alpha-query-command]: /reference/cli-reference -[sort-by-flag]: /reference/cli-reference -[label-columns]: /reference/cli-reference -[debug-flag]: /reference/cli-reference -[kubectl-conventions]: https://kubernetes.io/docs/reference/kubectl/generated/kubectl_get/ diff --git a/spaces_versioned_docs/version-v1.13/howtos/secrets-management.md b/spaces_versioned_docs/version-v1.13/howtos/secrets-management.md deleted file mode 100644 index 88e730ae5..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/secrets-management.md +++ /dev/null @@ -1,719 +0,0 @@ ---- -title: Secrets Management -sidebar_position: 20 -description: A guide for how to configure synchronizing external secrets into control - planes in a Space. ---- - -Upbound's _Shared Secrets_ is a built in secrets management feature that -provides an integrated way to manage secrets across your platform. It allows you -to store sensitive data like passwords and certificates for your managed control -planes as secrets in an external secret store. - -This feature is a wrapper around the [External Secrets Operator (ESO)][external-secrets-operator-eso] that pulls secrets from external vaults and distributes them across your platform. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). The SharedSecretStore and SharedExternalSecret CRDs have been stable since v1.9. - -For API specifications and configuration details, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: - -## Benefits - -The Shared Secrets feature allows you to: - -* Access secrets from a variety of external secret stores without operation overhead -* Configure synchronization for multiple control planes in a group -* Store and manage all your secrets centrally -* Use Shared Secrets across all Upbound environments(Cloud and Disconnected Spaces) -* Synchronize secrets across groups of control planes while maintaining clear security boundaries -* Manage secrets at scale programmatically while ensuring proper isolation and access control - -## Understanding the Architecture - -The Shared Secrets feature uses a hierarchical approach to centrally manage -secrets and effectively control their distribution. - -![Shared Secrets workflow diagram](/img/shared-secrets-workflow.png) - -1. The flow begins at the group level, where you define your secret sources and distribution rules -2. These rules automatically create corresponding resources in your control planes -3. In each control plane, specific namespaces receive the secrets -4. Changes at the group level automatically propagate through this chain - -## Component configuration - -Upbound Shared Secrets consists of two components: - -1. **SharedSecretStore**: Defines connections to external secret providers -2. **SharedExternalSecret**: Specifies which secrets to synchronize and where - - -### Connect to an External Vault - - -The `SharedSecretStore` component is the connection point to your external -secret vaults. It provisions ClusterSecretStore resources into control planes -within the group. - - -#### AWS Secrets Manager - - - -In this example, you'll create a `SharedSecretStore` to connect to AWS -Secrets Manager in `us-west-2`. Then apply access to all control planes labeled with -`environment: production`, and make these secrets available in the `default` and -`crossplane-system` namespaces. - - -You can configure access to AWS Secrets Manager using static credentials or -workload identity. - -:::important -While the underlying ESO API supports more auth methods, static credentials are currently the only supported auth method in Cloud Spaces. -::: - -##### Static credentials - -1. Use the AWS CLI to create access credentials. - - -2. Create your access credentials. -```ini -# Create a text file with AWS credentials -cat > aws-credentials.txt << EOF -[default] -aws_access_key_id = -aws_secret_access_key = -EOF -``` - -3. Next,store the access credentials in a secret in the namespace you want to have access to the `SharedSecretStore`. -```shell -kubectl create secret \ - generic aws-credentials \ - -n default \ - --from-file=creds=./aws-credentials.txt -``` - -4. Create a `SharedSecretStore` custom resource file called `secretstore.yaml`. - Paste the following configuration: -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: aws-secrets -spec: - # Define which control planes should receive this configuration - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production - - # Define which namespaces within those control planes can access secrets - namespaceSelector: - names: - - default - - crossplane-system - - # Configure the connection to AWS Secrets Manager - provider: - aws: - service: SecretsManager - region: us-west-2 - auth: - secretRef: - accessKeyIDSecretRef: - name: aws-credentials - key: access-key-id - secretAccessKeySecretRef: - name: aws-credentials - key: secret-access-key -``` - - - -##### Workload Identity with IRSA - - - -You can also use AWS IAM Roles for Service Accounts (IRSA) depending on your -organizations needs: - -1. Ensure you have deployed the Spaces software into an IRSA-enabled EKS cluster. -2. Follow the AWS instructions to create an IAM OIDC provider with your EKS OIDC - provider URL. -3. Determine the Spaces-generated `controlPlaneID` of your control plane: -```shell -kubectl get controlplane -o jsonpath='{.status.controlPlaneID}' -``` - -4. Create an IAM trust policy in your AWS account to match the control plane. -```yaml -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "arn:aws:iam:::oidc-provider/" - }, - "Action": "sts:AssumeRoleWithWebIdentity", - "Condition": { - "StringEquals": { - ":aud": "sts.amazonaws.com", - ":sub": [ -"system:serviceaccount:mxp--system:external-secrets-controller"] - } - } - } - ] -} -``` - -5. Update your Spaces deployment to annotate the SharedSecrets service account - with the role ARN. -```shell -up space upgrade ... \ - --set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."eks\.amazonaws\.com/role-arn"="" -``` - -6. Create a SharedSecretStore and reference the SharedSecrets service account: -```ini {copy-lines="all"} -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: aws-sm - namespace: default -spec: - provider: - aws: - service: SecretsManager - region: - auth: - jwt: - serviceAccountRef: - name: external-secrets-controller - controlPlaneSelector: - names: - - - namespaceSelector: - names: - - default -``` - -When you create a `SharedSecretStore` the underlying mechanism: - -1. Applies at the group level -2. Determines which control planes should receive this configuration by the `controlPlaneSelector` -3. Automatically creates a ClusterSecretStore inside each identified control plane -4. Maintains a connection in each control plane with the ClusterSecretStore - credentials and configuration from the parent SharedSecretStore - -Upbound automatically generates a ClusterSecretStore in each matching control -plane when you create a SharedSecretStore. - -```yaml {copy-lines="none"} -# Automatically created in each matching control plane -apiVersion: external-secrets.io/v1beta1 -kind: ClusterSecretStore -metadata: - name: aws-secrets # Name matches the parent SharedSecretStore -spec: - provider: - upboundspaces: - storeRef: - name: aws-secret -``` - -When you create the SharedSecretStore controller, it replaces the provider with -a special provider called `upboundspaces`. This provider references the -SharedSecretStore object in the Spaces API. This avoids copying the actual cloud -credentials from Spaces to each control plane. - -This workflow allows you to configure the store connection only once at the -group level and automatically propagates to each control plane. Individual control -planes can use the store without exposure to the group-level configuration and -updates all child ClusterSecretStores when updated. - - -#### Azure Key Vault - - -:::important -While the underlying ESO API supports more auth methods, static credentials are currently the only supported auth method in Cloud Spaces. -::: - -##### Static credentials - -1. Use the Azure CLI to create a service principal and authentication file. -2. Create a service principal and save credentials in a file: -```json -{ - "appId": "myAppId", - "displayName": "myServicePrincipalName", - "password": "myServicePrincipalPassword", - "tenant": "myTentantId" -} -``` - -3. Store the credentials as a Kubernetes secret: -```shell -kubectl create secret \ - generic azure-secret-sp \ - -n default \ - --from-file=creds=./azure-credentials.json -``` - -4. Create a SharedSecretStore referencing these credentials: -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: azure-kv -spec: - provider: - azurekv: - tenantId: "" - vaultUrl: "" - authSecretRef: - clientId: - name: azure-secret-sp - key: ClientID - clientSecret: - name: azure-secret-sp - key: ClientSecret - controlPlaneSelector: - names: - - - namespaceSelector: - names: - - default -``` - -##### Workload Identity - - -You can also use Entra Workload Identity Federation to access Azure Key Vault -without needing to manage secrets. - -To use Entra Workload ID with AKS: - - -1. Deploy the Spaces software into a [workload identity-enabled AKS cluster][workload-identity-enabled-aks-cluster]. -2. Retrieve the OIDC issuer URL of the AKS cluster: -```ini -az aks show --name "" \ - --resource-group "" \ - --query "oidcIssuerProfile.issuerUrl" \ - --output tsv -``` - -3. Use the Azure CLI to make a managed identity: -```ini -az identity create \ - --name "" \ - --resource-group "" \ - --location "" \ - --subscription "" -``` - -4. Look up the managed identity's client ID: -```ini -az identity show \ - --resource-group "" \ - --name "" \ - --query 'clientId' \ - --output tsv -``` - -5. Update your Spaces deployment to annotate the SharedSecrets service account with the associated Entra application client ID from the previous step: -```ini -up space upgrade ... \ - --set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."azure\.workload\.identity/client-id"="" \ - --set-string controlPlanes.sharedSecrets.pod.customLabels."azure\.workload\.identity/use"="true" -``` - -6. Determine the Spaces-generated `controlPlaneID` of your control plane. When you deploy a `kind: controlplane` in a Space, the Spaces software deploys a set of pods in a new namespace following the format `mxp--system`. -```ini -kubectl get controlplane -o jsonpath='{.status.controlPlaneID}' -``` - -7. Create a federated identity credential. -```ini -FEDERATED_IDENTITY_CREDENTIAL_NAME= -USER_ASSIGNED_IDENTITY_NAME= -RESOURCE_GROUP= -AKS_OIDC_ISSUER= -CONTROLPLANE_ID= -az identity federated-credential create --name ${FEDERATED_IDENTITY_CREDENTIAL_NAME} --identity-name "${USER_ASSIGNED_IDENTITY_NAME}" --resource-group "${RESOURCE_GROUP}" --issuer "${AKS_OIDC_ISSUER}" --subject system:serviceaccount:"mxp-${CONTROLPLANE_ID}-system:external-secrets-controller" --audience api://AzureADTokenExchange -``` - -8. Assign the `Key Vault Secrets User` role to the user-assigned managed identity that you created earlier. This step gives the managed identity permission to read secrets from the key vault: -```ini -az role assignment create \ - --assignee-object-id "${IDENTITY_PRINCIPAL_ID}" \ - --role "Key Vault Secrets User" \ - --scope "${KEYVAULT_RESOURCE_ID}" \ - --assignee-principal-type ServicePrincipal -``` - -:::important -You must manually restart a workload's pod when you add the annotation to the running pod's service account. The Entra workload identity mutating admission webhook requires a restart to inject the necessary environment. -::: - -8. Create a `SharedSecretStore`. Replace `vaultURL` with the URL of your Azure Key Vault instance. Replace `identityId` with the client ID of the managed identity created earlier: -```yaml {copy-lines="all"} -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: azure-kv -spec: - provider: - azurekv: - authType: WorkloadIdentity - vaultUrl: "" - controlPlaneSelector: - names: - - - namespaceSelector: - names: - - default -``` - - - - -#### Google Cloud Secret Manager - - - -You can configure access to Google Cloud Secret Manager using static credentials or workload identity. Below are instructions for configuring either. See the [ESO provider API][eso-provider-api] for more information. - -:::important -While the underlying ESO API supports more auth methods, static credentials are currently the only supported auth method in Cloud Spaces. -::: - -##### Static credentials - -1. Use the [GCP CLI][gcp-cli] to create access credentials. -2. Save the output in a file called `gcp-credentials.json`. -3. Store the access credentials in a secret in the same namespace as the `SharedSecretStore`. - ```shell {label="kube-create-secret",copy-lines="all"} - kubectl create secret \ - generic gcpsm-secret \ - -n default \ - --from-file=creds=./gcp-credentials.json - ``` - -4. Create a `SharedSecretStore`, referencing the secret created earlier. Replace `projectID` with your GCP Project ID: -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: gcp-sm -spec: - provider: - gcpsm: - auth: - secretRef: - secretAccessKeySecretRef: - name: gcpsm-secret - key: creds - projectID: - controlPlaneSelector: - names: - - - namespaceSelector: - names: - - default -``` - -:::tip -The example above maps a Shared Secret Store into a single namespace of a single control plane. Read [control plane selection][control-plane-selection] and [namespace selection][namespace-selection] to learn how to map into one or more namespaces of one or more control planes. -::: - - -##### Workload identity with Service Accounts to IAM Roles - - -To configure, grant the `roles/iam.workloadIdentityUser` role to the Kubernetes -service account in the control plane namespace to impersonate the IAM service -account. - -1. Ensure you've deployed Spaces on a [Workload Identity Federation-enabled][workload-identity-federation-enabled] GKE cluster. -2. Determine the Spaces-generated `controlPlaneID` of your control plane. When you deploy a `kind: controlplane` in a Space, the Spaces software deploys a set of pods in a new namespace following the format `mxp--system`. -```ini -kubectl get controlplane -o jsonpath='{.status.controlPlaneID}' -``` - -3. Create a GCP IAM service account with the [GCP CLI][gcp-cli-1]: -```ini -gcloud iam service-accounts create \ - --project= -``` - -4. Grant the IAM service account the role to access GCP Secret Manager: -```ini -SA_NAME= -IAM_SA_PROJECT_ID= -gcloud projects add-iam-policy-binding IAM_SA_PROJECT_ID \ - --member "serviceAccount:SA_NAME@IAM_SA_PROJECT_ID.iam.gserviceaccount.com" \ - --role roles/secretmanager.secretAccessor -``` - -5. When you enable the Shared Secrets feature, a service account gets created in each control plane for the External Secrets Operator. Apply a [GCP IAM policy binding][gcp-iam-policy-binding] to associate this service account with the desired GCP IAM role. -```ini -PROJECT_ID= -PROJECT_NUMBER= -CONTROLPLANE_ID= -gcloud projects add-iam-policy-binding projects/${PROJECT_ID} \ - --role "roles/iam.workloadIdentityUser" \ - --member=principal://iam.googleapis.com/projects/${PROJECT_NUMBER}/locations/global/workloadIdentityPools/${PROJECT_ID}.svc.id.goog/subject/ns/mxp-${CONTROLPLANE_ID}-system/sa/external-secrets-controller -``` - -6. Update your Spaces deployment to annotate the SharedSecrets service account with GCP IAM service account's identifier: -```ini -up space upgrade ... \ - --set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."iam\.gke\.io/gcp-service-account"="" -``` - -7. Create a `SharedSecretStore`. Replace `projectID` with your GCP Project ID: -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: gcp-sm -spec: - provider: - gcpsm: - projectID: - controlPlaneSelector: - names: - - - namespaceSelector: - names: - - default -``` - -:::tip -The example above maps a Shared Secret Store into a single namespace of a single control plane. Read [control plane selection][control-plane-selection-1] and [namespace selection][namespace-selection-2] to learn how to map into one or more namespaces of one or more control planes. -::: - -### Manage your secret distribution - -After you create your SharedSecretStore, you can define which secrets to -distribute using SharedExternalSecret: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedExternalSecret -metadata: - name: database-credentials - namespace: default -spec: - # Select the same control planes as your SharedSecretStore - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production - - externalSecretSpec: - refreshInterval: 1h - secretStoreRef: - name: aws-secrets # References the SharedSecretStore name - kind: ClusterSecretStore - target: - name: db-credentials - data: - - secretKey: username - remoteRef: - key: prod/database/credentials - property: username - - secretKey: password - remoteRef: - key: prod/database/credentials - property: password -``` - -This configuration: - -* Pulls database credentials from your external secret provider -* Creates secrets in all production control planes -* Refreshes the secrets every hour -* Creates a secret called `db-credentials` in each control plane - -When you create a SharedExternalSecret at the group level, Upbound's system -creates a template for the corresponding ClusterExternalSecrets in each selected -control plane. - -The example below simulates the ClusterExternalSecret that Upbound creates: - -```yaml -# Inside each matching control plane: -apiVersion: external-secrets.io/v1beta1 -kind: ClusterExternalSecret -metadata: - name: database-credentials -spec: - refreshInterval: 1h - secretStoreRef: - name: aws-secrets - kind: ClusterSecretStore - data: - - secretKey: username - remoteRef: - key: prod/database/credentials - property: username -``` - -The hierarchy in this configuration is: - -1. SharedExternalSecret (group level) defines what secrets to distribute -2. ClusterExternalSecret (control plane level) manages the distribution within - each control plane - -3. Kubernetes Secrets (namespace level) are created in specified namespaces - - -#### Control plane selection - -To configure which control planes in a group you want to project a SecretStore into, use the `spec.controlPlaneSelector` field. You can either use `labelSelectors` or the `names` of a control plane directly. A control plane matches if any of the label selectors match. - -This example matches all control planes in the group that have `environment: production` as a label: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: my-secret-store -spec: - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production -``` - -You can use the more complex `matchExpressions` to match labels based on an expression. This example matches control planes that have label `environment: production` or `environment: staging`: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: my-secret-store -spec: - controlPlaneSelector: - labelSelectors: - - matchExpressions: - - { key: environment, operator: In, values: [production,staging] } -``` - -You can also specify the names of control planes directly: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: my-secret-store -spec: - controlPlaneSelector: - names: - - controlplane-dev - - controlplane-staging - - controlplane-prod -``` - - -#### Namespace selection - -To configure which namespaces **within each matched control plane** to project the secret store into, use `spec.namespaceSelector` field. The projected secret store only appears in the namespaces matching the provided selector. You can either use `labelSelectors` or the `names` of namespaces directly. A control plane matches if any of the label selectors match. - -**For all control planes matched by** `spec.controlPlaneSelector`, This example matches all namespaces in each selected control plane that have `team: team1` as a label: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: my-secret-store -spec: - namespaceSelector: - labelSelectors: - - matchLabels: - team: team1 -``` - -You can use the more complex `matchExpressions` to match labels based on an expression. This example matches namespaces that have label `team: team1` or `team: team2`: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: my-secret-store -spec: - namespaceSelector: - labelSelectors: - - matchExpressions: - - { key: team, operator: In, values: [team1,team2] } -``` - -You can also specify the names of namespaces directly: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: my-secret-store -spec: - namespaceSelector: - names: - - team1-namespace - - team2-namespace -``` - -## Configure secrets directly in a control plane - - -The above explains using group-scoped resources to project secrets into multiple control planes. You can also use ESO API types directly in a control plane as you would in standalone Crossplane or Kubernetes. - - -See the [ESO documentation][eso-documentation] for a full guide on using the API types. - -## Best practices - -When you configure secrets management in your Upbound environment, keep the -following best practices in mind: - -**Use consistent labeling schemes** across your control planes for predictable -and manageable secret distribution. - -**Organize your secrets** in your external provider using a hierarchical -structure that mirrors your control plane organization. - -**Set appropriate refresh intervals** based on your security requires and the -nature of the secrets. - -**Use namespace selection sparingly** to limit secret distribution to only the -namespaces that need them. - -**Use separate tokens for each environment.** Keep them in distinct -SharedSecretStores. Users could bypass SharedExternalSecret selectors by -creating ClusterExternalSecrets directly in control planes. This grants access to all -secrets available to that token. - -**Document your secret management architecture**, including which control planes -should receive which secrets. - -[control-plane-selection]: #control-plane-selection -[namespace-selection]: #namespace-selection -[control-plane-selection-1]: #control-plane-selection -[namespace-selection-2]: #namespace-selection - -[external-secrets-operator-eso]: https://external-secrets.io -[workload-identity-enabled-aks-cluster]: https://learn.microsoft.com/en-us/azure/aks/workload-identity-deploy-cluster -[eso-provider-api]: https://external-secrets.io/latest/provider/google-secrets-manager/ -[gcp-cli]: https://cloud.google.com/iam/docs/creating-managing-service-account-keys -[workload-identity-federation-enabled]: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#enable_on_clusters_and_node_pools -[gcp-cli-1]: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#kubernetes-sa-to-iam -[gcp-iam-policy-binding]: https://cloud.google.com/sdk/gcloud/reference/iam/service-accounts/add-iam-policy-binding -[eso-documentation]: https://external-secrets.io/latest/introduction/getting-started/ diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/_category_.json b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/_category_.json deleted file mode 100644 index 5bf23bb0a..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/_category_.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "label": "Self-Hosted Spaces", - "position": 2, - "collapsed": true, - "customProps": { - "plan": "business" - } - -} - - diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/administer-features.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/administer-features.md deleted file mode 100644 index ce878014e..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/administer-features.md +++ /dev/null @@ -1,121 +0,0 @@ ---- -title: Administer features -sidebar_position: 12 -description: Enable and disable features in Spaces ---- - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Feature flags and their availability may vary by version. - -For detailed feature availability across versions, see the . -::: - -This guide shows how to enable or disable features in your self-hosted Space. - -## Shared secrets - -**Status:** Preview - -This feature is enabled by default in Cloud Spaces. - -To enable this feature in a self-hosted Space, set -`features.alpha.sharedSecrets.enabled=true` when installing the Space: - -```bash -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - ... - --set "features.alpha.sharedSecrets.enabled=true" \ -``` - - -## Observability - -**Status:** GA -**Available from:** Spaces v1.13+ - -This feature is enabled by default in Cloud Spaces. - - - -To enable this feature in a self-hosted Space, set -`observability.enabled=true` (`features.alpha.observability.enabled=true` before `v1.14.0`) when installing the Space: - -```bash -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - ... - --set "observability.enabled=true" \ -``` - -The observability feature collects telemetry data from user-facing control -plane workloads like: - -* Crossplane -* Providers -* Functions - -Self-hosted Spaces users can add control plane system workloads such as the -`api-server`, `etcd` by setting the -`observability.collectors.includeSystemTelemetry` Helm flag to true. - -### Sensitive data - -To avoid exposing sensitive data in the `SharedTelemetryConfig` resource, use -Kubernetes secrets to store the sensitive data and reference the secret in the -`SharedTelemetryConfig` resource. - -Create the secret in the same namespace/group as the `SharedTelemetryConfig` -resource. The example below uses `kubectl create secret` to create a new secret: - -```bash -kubectl create secret generic sensitive -n \ - --from-literal=apiKey='YOUR_API_KEY' -``` - -Next, reference the secret in the `SharedTelemetryConfig` resource: - -```yaml -apiVersion: observability.spaces.upbound.io/v1alpha1 -kind: SharedTelemetryConfig -metadata: - name: newrelic -spec: - configPatchSecretRefs: - - name: sensitive - key: apiKey - path: exporters.otlphttp.headers.api-key - controlPlaneSelector: - labelSelectors: - - matchLabels: - org: foo - exporters: - otlphttp: - endpoint: https://otlp.nr-data.net - headers: - api-key: dummy # This value is replaced by the secret value, can be omitted - exportPipeline: - metrics: [otlphttp] - traces: [otlphttp] - logs: [otlphttp] -``` - -The `configPatchSecretRefs` field in the `spec` specifies the secret `name`, -`key`, and `path` values to inject the secret value in the -`SharedTelemetryConfig` resource. - -## Shared backups - -As of Spaces `v.12.0`, this feature is enabled by default. - -To disable in a self-hosted Space, pass the `features.alpha.sharedBackup.enabled=false` as a Helm chart value. -`--set "features.alpha.sharedBackup.enabled=false"` - -## Query API - -**Status:** Preview -The Query API is available in the Cloud Space offering and enabled by default. - -Query API is required for self-hosted deployments with connected Spaces. See the -related [documentation][documentation] -to enable this feature. - -[documentation]: /spaces/howtos/query-api/ diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/attach-detach.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/attach-detach.md deleted file mode 100644 index 1465921cf..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/attach-detach.md +++ /dev/null @@ -1,198 +0,0 @@ ---- -title: Connect or disconnect a Space -sidebar_position: 12 -description: Enable and connect self-hosted Spaces to the Upbound console ---- -:::info API Version Information -This guide applies to **Spaces v1.8.0 and later** (Self-Hosted only). Connection to Upbound console requires Query API and RBAC to be enabled. - -For version-specific features and requirements, see the . Query API setup details, see [Deploy Query API infrastructure](./query-api.md). -::: - -:::important -This feature is in preview. Starting in Spaces `v1.8.0` and later, you must -deploy and [enable the Query API][enable-the-query-api] and [enable Upbound -RBAC][enable-upbound-rbac] to connect a Space to Upbound. -::: - -[Upbound][upbound] allows you to connect self-hosted Spaces and enables a streamlined operations and debugging experience in your Console. - -## Usage - -### Connect - -Before you begin, make sure you have: - -- An existing Upbound [organization][organization] in Upbound SaaS. -- The `up` CLI installed and logged into your organization -- `kubectl` installed with the kubecontext of your self-hosted Space cluster. -- A `token.json` license, provided by your Upbound account representative. -- You enabled the [Query API][query-api] in the self-hosted Space. - -Create a new `UPBOUND_SPACE_NAME`. If you don't create a name, `up` automatically generates one for you: - -```ini -export UPBOUND_SPACE_NAME=your-self-hosted-space -``` - -#### With up CLI - -:::tip -The command tries to connect the Space to the org account context pointed at by your `up` CLI profile. Make sure you've logged into Upbound SaaS with `up login -a ` before trying to connect the Space. -::: - -Connect the Space to the Console: - -```bash -up space connect "${UPBOUND_SPACE_NAME}" -``` - -This command installs a Connect agent, creates a service account, and configures permissions in your Upbound cloud organization in the `upbound-system` namespace of your Space. - -#### With Helm - -Export your Upbound org account name to an environment variable called `UPBOUND_ORG_NAME`. You can see this value by running `up org list` after logging on to Upbound. - -```ini -export UPBOUND_ORG_NAME=your-org-name -``` - -Create a new robot token and export it to an environment variable called `UPBOUND_TOKEN`: - -```bash -up robot create "${UPBOUND_SPACE_NAME}" --description="Robot used for authenticating Space '${UPBOUND_SPACE_NAME}' with Upbound Connect" -export UPBOUND_TOKEN=$(up robot token create "$UPBOUND_SPACE_NAME" "$UPBOUND_SPACE_NAME" --file - | jq -r '.token') -``` - -:::note -Follow the [`jq` installation guide][jq-install] if your machine doesn't include -it by default. -::: - -Create a secret containing the robot token: - -```bash -kubectl create secret -n upbound-system generic connect-token --from-literal=token=${UPBOUND_TOKEN} -``` - -Specify your username and password for the helm OCI registry: - -```bash -jq -r .token $SPACES_TOKEN_PATH | helm registry login xpkg.upbound.io -u $(jq -r .accessId $SPACES_TOKEN_PATH) --password-stdin -``` - -In the same cluster where you installed the Spaces software, install the Upbound connect agent with your token secret. - -```bash -helm -n upbound-system upgrade --install agent \ - oci://xpkg.upbound.io/spaces-artifacts/agent \ - --version "0.0.0-441.g68777b9" \ - --set "image.repository=xpkg.upbound.io/spaces-artifacts/agent" \ - --set "registration.image.repository=xpkg.upbound.io/spaces-artifacts/register-init" \ - --set "imagePullSecrets[0].name=upbound-pull-secret" \ - --set "registration.enabled=true" \ - --set "space=${UPBOUND_SPACE_NAME}" \ - --set "organization=${UPBOUND_ORG_NAME}" \ - --set "tokenSecret=connect-token" \ - --wait -``` - - -#### View your Space in the Console - - -Go to the [Upbound Console][upbound-console], log in, and choose the newly connected Space from the Space selector dropdown. - -![A screenshot of the Upbound Console space selector dropdown](/img/attached-space.png) - -:::note -You can only connect a self-hosted Space to a single organization at a time. -::: - -### Disconnect - -#### With up CLI - -To disconnect a self-hosted Space or a deleted self-hosted Space, run the following command: - -```bash -up space disconnect "${UPBOUND_SPACE_NAME}" -``` - -If the Space still exists, this command uninstalls the Connect agent and deletes the associated service account and permissions. - -#### With Helm - -To disconnect a self-hosted Space or a deleted self-hosted Space, run the following command: - -```bash -helm delete -n upbound-system agent -``` - -Clean up the robot token you created for this self-hosted Space: - -```bash -up robot delete "${UPBOUND_SPACE_NAME}" --force -``` - -## Security model - -### Architecture - -![An architectural diagram of a self-hosted Space attached to Upbound](/img/console-attach-architecture.jpg) - -:::note -This diagram illustrates a self-hosted Space running in AWS connected to the global Upbound Console. The same model applies to a Space running in AKS, GKE, or other Kubernetes environments. -::: - -### Data path - -Upbound uses a Pub/Sub model over TLS to communicate between Upbound's global -console and your self-hosted Space. Self-hosted Spaces establishes a secure -connection with `connect.upbound.io`. `api.upbound.io`, and `auth.upbound.io` and subscribes to an -endpoint. - -:::important -Add `connect.upbound.io`, `api.upbound.io`, and `auth.upbound.io` to your organization's list of -allowed endpoints. -::: - -The -Upbound Console communicates to the Space through that endpoint. The data flow -is: - -1. Users sign in to the Upbound Console, redirecting to authenticate with an organization's configured Identity Provider via SSO. -2. Once authenticated, actions in the Console, like listing control planes or specific resource types from a control plane. These requests post as messages to the Upbound Connect service. -3. A user's self-hosted Space polls the Upbound Connect service periodically for new messages, verifies the authenticity of the message, and fulfills the request contained. -4. A user's self-hosted Space returns the results of the request to the Upbound Connect service and the Console renders the results in the user's browser session. - -**Upbound never stores data originated from a self-hosted Space.** The data is transient and only exposed in the user's browser session. The Console needs this data to render your resources and control planes in the UI. - -### Data transmitted - -Users interact with the Upbound Console to generate request queries to the Upbound Connect Service while exploring, managing, or debugging a self-hosted Space. These requests send data back to the user's browser session in the Console, including: - -* Metadata for the Space -* Metadata for control planes in the state -* Configuration manifests for various resource types within your Space: Crossplane managed resources, composite resources, composite resource claims, Upbound shared secrets, Upbound shared backups, Crossplane providers, ProviderConfigs, Configurations, and Crossplane Composite Functions. - -:::important -This data only concerns resource configuration. The data _inside_ the managed -resource in your Space isn't visible at any point. -::: - -**Upbound can't see your data.** Upbound doesn't have access to session-based data rendered for your users in the Upbound Console. Upbound has no information about your self-hosted Space, other than that you've connected a self-hosted Space. - -### Threat vectors - -Only users with editor or administrative permissions can make changes using the Console like creating or deleting control planes or groups. - - -[enable-the-query-api]: /spaces/howtos/self-hosted/query-api -[enable-upbound-rbac]: /manuals/platform/concepts/authorization/upbound-rbac -[upbound]: /manuals/console/upbound-console -[organization]: /manuals/platform/concepts/identity-management/organizations -[query-api]: /spaces/howtos/self-hosted/query-api -[jq-install]: https://jqlang.org/download/ - -[upbound-console]: https://console.upbound.io diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/billing.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/billing.md deleted file mode 100644 index 145ff9f03..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/billing.md +++ /dev/null @@ -1,307 +0,0 @@ ---- -title: Self-Hosted Space Billing -sidebar_position: 50 -description: A guide for how billing works in an Upbound Space ---- - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Billing models may evolve between versions; see Capacity Licensing for alternative models. - -For version-specific features and capacity-based licensing details, see the . reference specifications, see [Capacity Licensing](/spaces/howtos/self-hosted/capacity-licensing). -::: - -Spaces are a self-hosting feature of Upbound's [flagship product][flagship-product] for platform teams to deploy control planes in their self-managed environments. You can install Spaces into any Kubernetes cluster in your own cloud account, on-premises data center, or on the edge. The pricing usage-based and requires an Upbound account and subscription. The billing unit is a `Loop`. - - -:::info -This guide describes the traditional usage-based billing model using object storage. disconnected or air-gapped environments, consider [Capacity Licensing](/spaces/howtos/self-hosted/capacity-licensing), which provides a simpler fixed-capacity model with local usage tracking. -::: - -## Billing details - -Spaces **aren't connected** to Upbound's global service. To enable proper billing, the Spaces software ships a controller whose responsibility is to collect billing data from your Spaces deployment. The collection and storage of your billing data happens expressly locally within your environment; no data is automatically emitted back to Upbound's global service. This data gets written to object storage of your choice. AWS, Azure, and GCP are currently supported. The Spaces software exports billing usage software every ~15 seconds. - -Spaces customers must periodically provide the billing data to Upbound. Contact your Upbound sales representative to learn more. - - - -## AWS S3 - - - -Configure billing to write to an S3 bucket by providing the following values at install-time. Create an S3 bucket if you don't already have one. - -### IAM policy - -You must create an IAM policy and attach it to the IAM user (for static credentials) or IAM role (for assumed -roles). - -The policy example below enables the necessary S3 permissions: - -```json -{ - "Sid":"EnableS3Permissions", - "Effect":"Allow", - "Action": [ - "s3:PutObject", - "s3:GetObject", - "s3:ListBucket", - "s3:DeleteObject" - ], - "Resource": [ - "arn:aws:s3:::your-bucket-name/*", - "arn:aws:s3:::your-bucket-name" - ] -}, -{ - "Sid": "ListBuckets", - "Effect": "Allow", - "Action": "s3:ListAllMyBuckets", - "Resource": "*" -} -``` - -### Authentication with static credentials - -In your Spaces install cluster, create a secret in the `upbound-system` -namespace. This secret must contain keys `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`. - -```bash -kubectl create secret generic billing-credentials -n upbound-system \ - --from-literal=AWS_ACCESS_KEY_ID= \ - --from-literal=AWS_SECRET_ACCESS_KEY= -``` - -Install the Space software, providing the billing details to the other required values. - - - - - - -```bash {hl_lines="2-6"} -helm -n upbound-system upgrade --install spaces ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=aws" \ - --set "billing.storage.aws.region=" \ - --set "billing.storage.aws.bucket=" \ - --set "billing.storage.secretRef.name=billing-credentials" - ... -``` - - - - - -```bash {hl_lines="2-6"} -up space init ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=aws" \ - --set "billing.storage.aws.region=" \ - --set "billing.storage.aws.bucket=" \ - --set "billing.storage.secretRef.name=billing-credentials" - ... -``` - - - - - - - -### Authentication with an IAM role - - -To use short-lived credentials with an assumed IAM role, create an IAM role with -established trust to the `vector`-serviceaccount in all `mxp-*-system` -namespaces. - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "arn:aws:iam::12345678912:oidc-provider/oidc.eks.eu-west-2.amazonaws.com/id/YOUROIDCPROVIDERID" - }, - "Action": "sts:AssumeRoleWithWebIdentity", - "Condition": { - "StringLike": { - "oidc.eks.eu-west-2.amazonaws.com/id/YOUROIDCPROVIDERID:sub": "system:serviceaccount:mxp-*-system:vector" - } - } - } - ] -} -``` - -For more information about workload identities, review the [Workload-identity -Configuration documentation][workload-identity-configuration-documentation] - - - - - - -```bash {hl_lines="2-7"} -helm -n upbound-system upgrade --install spaces ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=aws" \ - --set "billing.storage.aws.region=" \ - --set "billing.storage.aws.bucket=" \ - --set "billing.storage.secretRef.name=" \ - --set "controlPlanes.vector.serviceAccount.customAnnotations[eks.amazonaws.com/role-arn]=" - ... -``` - - - - - -```bash {hl_lines="2-7"} -up space init ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=aws" \ - --set "billing.storage.aws.region=" \ - --set "billing.storage.aws.bucket=" \ - --set "billing.storage.secretRef.name=" \ - --set "controlPlanes.vector.serviceAccount.customAnnotations[eks.amazonaws.com/role-arn]=" - ... -``` - - - - - - -*Note*: You must set `billing.storage.secretRef.name` to an empty string when using an assumed role. - - -## Azure blob storage - -Configure billing to write to a blob in Azure by providing the following values at install-time. Create a storage account and container if you don't already have one. - -Then, on the cluster where you installed the Spaces software, create a secret in `upbound-system`. This secret must contain keys `AZURE_TENANT_ID`, `AZURE_CLIENT_ID`, and `AZURE_CLIENT_SECRET`. Make sure to replace the values with details generated from your Azure account. - -```bash -kubectl create secret generic billing-credentials -n upbound-system \ - --from-literal=AZURE_TENANT_ID= \ - --from-literal=AZURE_CLIENT_ID= \ - --from-literal=AZURE_CLIENT_SECRET= -``` - -Install the Space software, providing the billing details to the other required values. - - - - - - -```bash {hl_lines="2-6"} -helm -n upbound-system upgrade --install spaces ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=azure" \ - --set "billing.storage.azure.storageAccount=" \ - --set "billing.storage.azure.container=" \ - --set "billing.storage.secretRef.name=billing-credentials" - ... -``` - - - - - -```bash {hl_lines="2-6"} -up space init ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=azure" \ - --set "billing.storage.azure.storageAccount=" \ - --set "billing.storage.azure.container=" \ - --set "billing.storage.secretRef.name=billing-credentials" - ... -``` - - - - - - - -## GCP Cloud Storage Buckets - - -Configure billing to write to a Cloud Storage bucket in GCP by providing the following values at install-time. Create a bucket if you don't already have one. - -Then, on the cluster where you installed the Spaces software, create a secret in `upbound-system`. This secret must contain the key `google_application_credentials`. Make sure to replace the value with a GCP service account key JSON generated from your GCP account. - -```bash -kubectl create secret generic billing-credentials -n upbound-system \ - --from-literal=google_application_credentials= -``` - -Install the Space software, providing the billing details to the other required values. - - - - - - -```bash {hl_lines="2-5"} -helm -n upbound-system upgrade --install spaces ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=gcp" \ - --set "billing.storage.gcp.bucket=" \ - --set "billing.storage.secretRef.name=billing-credentials" - ... -``` - - - - - -```bash {hl_lines="2-5"} -up space init ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=gcp" \ - --set "billing.storage.gcp.bucket=" \ - --set "billing.storage.secretRef.name=billing-credentials" - ... -``` - - - - - -## Export billing data to send to Upbound - -To prepare the billing data to send to Upbound, do the following: - -Ensure the current context of your kubeconfig points at the Spaces cluster. Then, run the [export][export] command. - - -:::important -Your current CLI must have read access to the bucket to run this command. -::: - - -The example below exports billing data stored in AWS: - -```bash -up space billing export --provider=aws \ - --bucket=spaces-billing-bucket \ - --account=your-upbound-org \ - --billing-month=2024-07 \ - --force-incomplete -``` - -The command creates a billing report that's zipped up in your current working directory. Send the output to your Upbound sales representative. - - -You can find full instructions and command options in the up [CLI reference][cli-reference] docs. - - -[export]: /reference/cli-reference -[cli-reference]: /reference/cli-reference -[flagship-product]: https://www.upbound.io/platform -[workload-identity-configuration-documentation]: https://docs.upbound.io/operate/accounts/authentication/oidc-configuration diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/capacity-licensing.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/capacity-licensing.md deleted file mode 100644 index a1dc6c101..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/capacity-licensing.md +++ /dev/null @@ -1,591 +0,0 @@ ---- -title: Capacity Licensing -sidebar_position: 60 -description: A guide for capacity-based licensing in self-hosted Spaces -plan: "enterprise" ---- - - - - - -This guide explains how to configure and monitor capacity-based licensing in -self-hosted Upbound Spaces. Capacity licensing provides a simplified billing -model for disconnected or air-gapped environments where automated usage -reporting isn't possible. - -:::info -Spaces `v1.15` and later support Capacity Licensing as an -alternative to the traditional usage-based billing model described in the -[Self-Hosted Space Billing][space-billing] guide. -::: - -## Overview - -Capacity licensing allows organizations to purchase a fixed capacity of -resources upfront. The Spaces software tracks usage locally and provides -visibility into consumption against your purchased capacity, all without -requiring external connectivity to Upbound's services. - -### Key concepts - -- **Resource Hours**: The primary billing unit representing all resources - managed by Crossplane over time. This includes managed resources, - composites (XRs), claims (XRCs), and all composed resources - essentially - everything Crossplane manages. The system aggregates resource counts over each - hour using trapezoidal integration to accurately account for changes in - resource count throughout the hour. -- **Operations**: The number of Operations invoked by Crossplane. -- **License Capacity**: The total amount of resource hours and operations included in your license. -- **Usage Tracking**: Continuous monitoring of consumption with real-time utilization percentages. - -### How it works - -1. Upbound provides you with a license file containing your purchased capacity -2. You configure a `SpaceLicense` in your Spaces cluster -3. The metering system automatically: - - Collects measurements from all control planes every minute - - Aggregates usage data into hourly intervals - - Stores usage data in a local PostgreSQL database - - Updates the `SpaceLicense` status with current consumption - -## Prerequisites - -### PostgreSQL database - -Capacity licensing requires a PostgreSQL database to store usage measurements. You can use: - -- An existing PostgreSQL instance -- A managed PostgreSQL service (AWS RDS, Azure Database, Google Cloud SQL) -- A PostgreSQL instance deployed in your cluster - -The database must be: - -- Accessible from the Spaces cluster -- Configured with a dedicated database and credentials - -#### Example: Deploy PostgreSQL with CloudNativePG - -If you don't have an existing PostgreSQL instance, you can deploy one in your -cluster using [CloudNativePG] (CNPG). CNPG is a Kubernetes operator that -manages PostgreSQL clusters. - -1. Install the CloudNativePG operator: - -```bash -kubectl apply -f https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.24/releases/cnpg-1.24.1.yaml -``` - -2. Create a PostgreSQL cluster for metering: - -```yaml -apiVersion: postgresql.cnpg.io/v1 -kind: Cluster -metadata: - name: metering-postgres - namespace: upbound-system -spec: - instances: 1 - imageName: ghcr.io/cloudnative-pg/postgresql:16 - bootstrap: - initdb: - database: metering - owner: metering - postInitApplicationSQL: - - ALTER ROLE "metering" CREATEROLE; - storage: - size: 5Gi - # Optional: Configure resources for production use - # resources: - # requests: - # memory: "512Mi" - # cpu: "500m" - # limits: - # memory: "1Gi" - # cpu: "1000m" ---- -apiVersion: v1 -kind: Secret -metadata: - name: metering-postgres-app - namespace: upbound-system - labels: - cnpg.io/reload: "true" -stringData: - username: metering - password: "your-secure-password-here" -type: kubernetes.io/basic-auth -``` - -```bash -kubectl apply -f metering-postgres.yaml -``` - -3. Wait for the cluster to be ready: - -```bash -kubectl wait --for=condition=ready cluster/metering-postgres -n upbound-system --timeout=5m -``` - -4. You can access the PostgreSQL cluster at `metering-postgres-rw.upbound-system.svc.cluster.local:5432`. - -:::tip -For production deployments, consider: -- Increasing `instances` to 3 for high availability -- Configuring [backups] to object storage -- Setting appropriate resource requests and limits -- Using a dedicated storage class with good I/O performance -::: - -### License file - -Contact your Upbound sales representative to obtain a license file for your organization. The license file contains: -- Your unique license ID -- Purchased capacity (resource hours and operations) -- License validity period -- Any usage restrictions (such as cluster UUID pinning) - -## Configuration - -### Step 1: Create database credentials secret - -Create a Kubernetes secret containing your PostgreSQL password using the pgpass format: - -```bash -# Create a pgpass file with format: hostname:port:database:username:password -# Note: The database name and username must be 'metering' -# For CNPG clusters, use the read-write service endpoint: -rw..svc.cluster.local -echo "metering-postgres-rw.upbound-system.svc.cluster.local:5432:metering:metering:your-secure-password-here" > pgpass - -# Create the secret -kubectl create secret generic metering-postgres-credentials \ - -n upbound-system \ - --from-file=pgpass=pgpass - -# Clean up the pgpass file -rm pgpass -``` - -The secret must contain a single key: -- **`pgpass`**: PostgreSQL password file in the format `hostname:port:metering:metering:password` - -:::note -The database name and username are fixed as `metering`. Ensure your PostgreSQL instance has a database named `metering` with a user `metering` that has appropriate permissions. - -If you deployed PostgreSQL using CNPG as shown in the example above, the password should match what you set in the `metering-postgres-app` secret. -::: - -:::tip -For production environments, consider using external secret management solutions: -- [External Secrets Operator][eso] -- Cloud-specific secret managers (AWS Secrets Manager, Azure Key Vault, GCP Secret Manager) -::: - -### Step 2: Enable metering in Spaces - -Enable the metering feature when installing or upgrading Spaces: - - - - - -```bash {hl_lines="2-7"} -helm -n upbound-system upgrade --install spaces ... \ - --set "metering.enabled=true" \ - --set "metering.storage.postgres.connection.url=metering-postgres-rw.upbound-system.svc.cluster.local:5432" \ - --set "metering.storage.postgres.connection.credentials.secret.name=metering-postgres-credentials" \ - --set "metering.interval=1m" \ - --set "metering.workerCount=10" \ - --set "metering.aggregationInterval=1h" \ - --set "metering.measurementRetentionDays=30" - ... -``` - - - - - -```bash {hl_lines="2-7"} -up space init ... \ - --set "metering.enabled=true" \ - --set "metering.storage.postgres.connection.url=metering-postgres-rw.upbound-system.svc.cluster.local:5432" \ - --set "metering.storage.postgres.connection.credentials.secret.name=metering-postgres-credentials" \ - --set "metering.interval=1m" \ - --set "metering.workerCount=10" \ - --set "metering.aggregationInterval=1h" \ - --set "metering.measurementRetentionDays=30" - ... -``` - - - - - -#### Configuration options - -| Option | Default | Description | -|--------|---------|-------------| -| `metering.enabled` | `false` | Enable the metering feature | -| `metering.storage.postgres.connection.url` | - | PostgreSQL host and port (format: `host:port`, required) | -| `metering.storage.postgres.connection.credentials.secret.name` | - | Name of the secret containing PostgreSQL credentials (required) | -| `metering.storage.postgres.connection.sslmode` | `require` | SSL mode for PostgreSQL connection (`disable`, `allow`, `prefer`, `require`, `verify-ca`, `verify-full`) | -| `metering.storage.postgres.connection.ca.name` | - | Name of the secret containing CA certificate for TLS connections (optional) | -| `metering.interval` | `1m` | How often to collect measurements from control planes | -| `metering.workerCount` | `10` | Number of parallel workers for measurement collection | -| `metering.aggregationInterval` | `1h` | How often to aggregate measurements into hourly usage data | -| `metering.measurementRetentionDays` | `30` | Days to retain raw measurements (0 = indefinite) | - - -#### Database sizing and retention - -The metering system uses two PostgreSQL tables to track usage: - -**Raw measurements table** (`measurements`): -- Stores point-in-time snapshots collected every measurement interval (default: 1 minute) -- One row per control plane per interval -- Affected by the `measurementRetentionDays` setting -- Used for detailed auditing and troubleshooting - -**Aggregated usage table** (`hourly_usage`): -- Stores hourly aggregated resource hours and operations per license -- One row per hour per license -- Never deleted (required for accurate license tracking) -- Grows much slower than raw measurements - -##### Storage sizing guidelines - -Estimate your PostgreSQL storage needs based on these factors: - - -| Deployment Size | Control Planes | Measurement Interval | Retention Days | Raw Measurements | Indexes & Overhead | Total Storage | -|----------------|----------------|---------------------|----------------|------------------|-------------------|---------------| -| Small | 10 | 1m | 30 | ~85 MB | ~40 MB | **~125 MB** | -| Medium | 50 | 1m | 30 | ~430 MB | ~215 MB | **~645 MB** | -| Large | 200 | 1m | 30 | ~1.7 GB | ~850 MB | **~2.5 GB** | -| Large (90-day retention) | 200 | 1m | 90 | ~5.2 GB | ~2.6 GB | **~7.8 GB** | - -The aggregated hourly usage table adds minimal overhead (~50 KB per year per license). - -**Formula for custom calculations**: -``` -Daily measurements per control plane = (24 * 60) / interval_minutes -Total rows = control_planes × daily_measurements × retention_days -Storage (MB) ≈ (total_rows × 200 bytes) / 1,048,576 × 1.5 (with indexes) -``` - -##### Retention behavior - -The `measurementRetentionDays` setting controls retention of raw measurement data: - -- **Default: 30 days** - Balances audit capabilities with storage efficiency -- **Set to 0**: Disables cleanup, retains all raw measurements indefinitely -- **Cleanup runs**: Every aggregation interval (default: hourly) -- **What's kept forever**: Aggregated hourly usage data (needed for license tracking) -- **What's cleaned up**: Raw point-in-time measurements older than retention period - -**Recommendations**: -- **30 days**: For most troubleshooting and short-term auditing -- **60 to 90 days**: For environments requiring extended audit trails -- **Unlimited (0)**: Only for environments with ample storage or specific compliance requirements - -:::note -Increasing retention period linearly increases storage requirements for raw measurements. The aggregated hourly data is always retained regardless of this setting. -::: - -### Step 3: Apply your license - -Use the `up` CLI to apply your license file: - -```bash -up space license apply /path/to/license.json -``` - -This command automatically: -- Creates a secret containing your license file in the `upbound-system` namespace -- Creates the `SpaceLicense` resource configured to use that secret - -:::tip -You can specify a different namespace for the license secret using the `--namespace` flag: -```bash -up space license apply /path/to/license.json --namespace my-namespace -``` -::: - -
-Alternative: Manual kubectl approach - -If you prefer not to use the `up` CLI, you can manually create the resources: - -1. Create the license secret: - -```bash -kubectl create secret generic space-license \ - -n upbound-system \ - --from-file=license.json=/path/to/license.json -``` - -2. Create the SpaceLicense resource: - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceLicense -metadata: - name: space -spec: - secretRef: - name: space-license - namespace: upbound-system - key: license.json -``` - -```bash -kubectl apply -f spacelicense.yaml -``` - -:::important -You **must** name the `SpaceLicense` resource `space`. This resource is a singleton and only one can exist in the cluster. -::: - -
- -## Monitoring usage - -### Check license status - -Use the `up` CLI to view your license details and current usage: - -```bash -up space license show -``` - -Example output: - -``` -Spaces License Status: Valid (License is valid) - -Created: 2024-01-01T00:00:00Z -Expires: 2025-01-01T00:00:00Z - -Plan: enterprise - -Resource Hour Limit: 1000000 -Operation Limit: 500000 - -Enabled Features: -- spaces -- query-api -- backup-restore -``` - -The output shows: -- License validity status and any validation messages -- Creation and expiration dates -- Your commercial plan tier -- Capacity limits for resource hours and operations -- Enabled features in your license -- Any restrictions (such as cluster UUID pinning) - -
-Alternative: View detailed status with kubectl - -For detailed information including usage statistics, use kubectl: - -```bash -kubectl get spacelicense space -o yaml -``` - -Example output showing usage data: - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceLicense -metadata: - name: space -spec: - secretRef: - name: space-license - namespace: upbound-system -status: - conditions: - - type: LicenseValid - status: "True" - reason: Valid - message: "License is valid" - id: "lic_abc123xyz" - plan: "enterprise" - capacity: - resourceHours: 1000000 - operations: 500000 - usage: - resourceHours: 245680 - operations: 12543 - resourceHoursUtilization: "24.57%" - operationsUtilization: "2.51%" - firstMeasurement: "2024-01-15T10:00:00Z" - lastMeasurement: "2024-02-10T14:30:00Z" - createdAt: "2024-01-01T00:00:00Z" - expiresAt: "2025-01-01T00:00:00Z" - enabledFeatures: - - "spaces" - - "query-api" - - "backup-restore" -``` - -
- -### Understanding the status fields - -| Field | Description | -|-------|-------------| -| `status.id` | Unique license identifier | -| `status.plan` | Your commercial plan (community, standard, enterprise) | -| `status.capacity` | Total capacity included in your license | -| `status.usage.resourceHours` | Total resource hours consumed | -| `status.usage.operations` | Total operations performed | -| `status.usage.resourceHoursUtilization` | Percentage of resource hours capacity used | -| `status.usage.operationsUtilization` | Percentage of operations capacity used | -| `status.usage.firstMeasurement` | When usage tracking began | -| `status.usage.lastMeasurement` | Most recent usage update | -| `status.expiresAt` | License expiration date | - -### Monitor with kubectl - -Watch your license utilization in real-time: - -```bash -kubectl get spacelicense space -w -``` - -Short output format: - -``` -NAME PLAN VALID REASON AGE -space enterprise True Valid 45d -``` - -## Managing licenses - -### Updating your license - -To update your license with a new license file (for example, when renewing or upgrading capacity), apply the new license: - -```bash -up space license apply /path/to/new-license.json -``` - -This command replaces the existing license secret and updates the SpaceLicense resource. - -### Removing a license - -To remove a license: - -```bash -up space license remove -``` - -This command: -- Prompts for confirmation before proceeding -- Removes the license secret - -To skip the confirmation prompt, use the `--force` flag: - -```bash -up space license remove --force -``` - -## Troubleshooting - -### License not updating - -If the license status doesn't update with usage data: - -1. **Check metering controller logs**: - ```bash - kubectl logs -n upbound-system deployment/spaces-controller -c metering - ``` - -2**Check if the system captures your measurements**: - - ```bash - # Connect to PostgreSQL and query the measurements table - kubectl exec -it -- psql -U -d \ - -c "SELECT COUNT(*) FROM measurements WHERE timestamp > NOW() - INTERVAL '1 hour';" - ``` - -### High utilization warnings - -If you're approaching your capacity limits: - -1. **Review resource usage** by control plane to identify high consumers -2. **Contact your Upbound sales representative** to discuss capacity expansion -3. **Optimize managed resources** by cleaning up unused resources - -### License validation failures - -If your license shows as invalid: - -1. **Check expiration date**: `kubectl get spacelicense space -o jsonpath='{.status.expiresAt}'` -2. **Verify license file integrity**: Ensure the secret contains valid JSON -3. **Check for cluster UUID restrictions**: Upbound pins some licenses to - specific clusters -4. **Review controller logs** for detailed error messages - -## Differences from traditional billing - -### Capacity licensing - -- ✅ Works in disconnected environments -- ✅ Provides real-time usage visibility -- ✅ No manual data export required -- ✅ Requires PostgreSQL database -- ✅ Fixed capacity model - -### Traditional billing (object storage) - - -- ❌ Requires periodic manual export -- ❌ Delayed visibility into usage -- ✅ Works with S3/Azure Blob/GCS -- ❌ Requires cloud storage access -- ✅ Pay-as-you-go model - -## Best practices - -### Database management - -1. **Regular backups**: Back up your metering database regularly to preserve usage history -2. **Monitor database size**: Set appropriate retention periods to manage storage growth -3. **Use managed databases**: Consider managed PostgreSQL services for production -4. **Connection pooling**: Use connection pooling for better performance at scale - -### License management - -1. **Monitor utilization**: Set up alerts before reaching 80% capacity -2. **Plan renewals early**: Start renewal discussions 60 days before expiration -3. **Track grace periods**: Note the `gracePeriodEndsAt` date for planning -4. **Secure license files**: Treat license files as sensitive credentials - -### Operational monitoring - -1. **Set up dashboards**: Create Grafana dashboards for usage trends -2. **Enable alerting**: Configure alerts for high utilization and expiration -3. **Regular audits**: Periodically review usage patterns across control planes -4. **Capacity planning**: Use historical data to predict future capacity needs - -## Next steps - -- Learn about [Observability] to monitor your Spaces deployment -- Explore [Backup and Restore][backup-restore] to protect your control plane data -- Review [Self-Hosted Space Billing][space-billing] for the traditional billing model -- Contact [Upbound Sales][sales] to discuss capacity licensing options - - -[space-billing]: /spaces/howtos/self-hosted/billing -[CloudNativePG]: https://cloudnative-pg.io/ -[backups]: https://cloudnative-pg.io/documentation/current/backup_recovery/ -[backup-restore]: /spaces/howtos/backup-and-restore -[sales]: https://www.upbound.io/contact -[eso]: https://external-secrets.io/ -[Observability]: /spaces/howtos/observability - - diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/certs.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/certs.md deleted file mode 100644 index e517c250e..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/certs.md +++ /dev/null @@ -1,274 +0,0 @@ ---- -title: Istio Ingress Gateway With Custom Certificates -sidebar_position: 20 -description: Install self hosted spaces using istio ingress gateway in a Kind cluster ---- - -:::important -Prerequisites - -- Spaces Token available in a file -- `docker login xpkg.upbound.io -u -p ` -- [`istioctl`][istioctl] installation -- `jq` installation -::: - -This document describes the installation of a self hosted space on an example `kind` -cluster along with Istio Ingress Gateway and certificates. The service mesh and certificates -installation is transferable to self hosted spaces in arbitrary clouds. - -## Create a kind cluster - -```shell -cat < -## Install Istio - - - -:::important -This is an example and not recommended for use in production. -::: - - -1. Create the `istio-values.yaml` file - -```shell -cat > istio-values.yaml << 'EOF' -apiVersion: install.istio.io/v1alpha1 -kind: IstioOperator -spec: - hub: gcr.io/istio-release - components: - ingressGateways: - - enabled: true - name: istio-ingressgateway - k8s: - nodeSelector: - ingress-ready: "true" - overlays: - - apiVersion: apps/v1 - kind: Deployment - name: istio-ingressgateway - patches: - - path: spec.template.spec.containers.[name:istio-proxy].ports - value: - - containerPort: 8080 - hostPort: 80 - - containerPort: 8443 - hostPort: 443 -EOF -``` - -2. Install istio via `istioctl` - -```shell -istioctl install -f istio-values.yaml -``` - -## Create a self-signed Certificate via cert-manager - -:::important -This Certificate manifest creates a self-signed certificate for a proof of concept -environment and isn't recommended for production use cases. -::: - -1. Create the upbound-system namespace - -```shell -kubectl create namespace upbound-system -``` - -2. Create a self-signed certificate - -```shell -cat < -## Create an Istio Gateway and VirtualService - - - - -Configure an Istio Gateway and VirtualService to use TLS passthrough. - - -```shell -cat < spaces-values.yaml << 'EOF' -# Configure spaces-router to use the TLS secret created by cert-manager. -externalTLS: - tlsSecret: - name: example-tls-secret - caBundleSecret: - name: example-tls-secret - key: ca.crt -ingress: - provision: false - # Allow Istio Ingress Gateway to communicate to the spaces-router - namespaceLabels: - kubernetes.io/metadata.name: istio-system - podLabels: - app: istio-ingressgateway - istio: ingressgateway -EOF -``` - -2. Set the required environment variables - -```shell -# Update these according to your account/token file -export SPACES_TOKEN_PATH= -export UPBOUND_ACCOUNT= -# Replace SPACES_ROUTER_HOST with your Spaces ingress hostname -export SPACES_ROUTER_HOST="proxy.upbound-127.0.0.1.nip.io" -export SPACES_VERSION="1.14.1" -``` - -3. Create an image pull secret for Spaces - -```shell -kubectl -n upbound-system create secret docker-registry upbound-pull-secret \ - --docker-server=https://xpkg.upbound.io \ - --docker-username="$(jq -r .accessId $SPACES_TOKEN_PATH)" \ - --docker-password="$(jq -r .token $SPACES_TOKEN_PATH)" -``` - -4. Install the Spaces helm chart - -```shell -# Login to xpkg.upbound.io -jq -r .token $SPACES_TOKEN_PATH | helm registry login xpkg.upbound.io -u $(jq -r .accessId $SPACES_TOKEN_PATH) --password-stdin - -# Install spaces helm chart -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "${SPACES_VERSION}" \ - --set "ingress.host=${SPACES_ROUTER_HOST}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "authentication.hubIdentities=true" \ - --set "authorization.hubRBAC=true" \ - --wait -f spaces-values.yaml -``` - -## Validate the installation - -Successful access of the `up` command to interact with your self hosted space validates the -certificate installation. - -- `up ctx .` - -You can also issue control plane creation, list and deletion commands. - -- `up ctp create cert-test` -- `up ctp list` -- `up ctx disconnected/kind-kind/default/cert-test && kubectl get namespace` -- `up ctp delete cert-test` - -:::note -If `up` can't connect to your control plane, follow [this guide to create a new profile][up-profile]. -::: - -## Troubleshooting - -Examine your certificate with `openssl`: - -```shell -openssl s_client -connect proxy.upbound-127.0.0.1.nip.io:443 -showcerts -``` - -[istioctl]: https://istio.io/latest/docs/ops/diagnostic-tools/istioctl/ -[up-profile]: /manuals/cli/howtos/profile-config/ diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/configure-ha.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/configure-ha.md deleted file mode 100644 index ddf36c55e..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/configure-ha.md +++ /dev/null @@ -1,450 +0,0 @@ ---- -title: Production Scaling and High Availability -description: Configure your Self-Hosted Space for production -sidebar_position: 5 ---- - - - -This guide explains how to configure an existing Upbound Space deployment for -production operation at scale. - -Use this guide when you're ready to deploy production scaling, high availability, -and monitoring in your Space. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. - -For API specifications on ControlPlane resources and configurations, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . -::: - -## Prerequisites - -Before you begin scaling your Spaces deployment, make sure you have: - - -* A working Space deployment -* Cluster administrator access -* An understanding of load patterns and growth in your organization -* A familiarity with node affinity, tainting, and Horizontal Pod Autoscaling - (HPA) - - -## Production scaling strategy - - -In this guide, you will: - - - -* Create dedicated node pools for different component types -* Configure high-availability to ensure there are no single points of failure -* Set dynamic scaling for variable workloads -* Optimize your storage and component operations -* Monitor your deployment health and performance - -## Spaces architecture - -The basic Spaces workflow follows the pattern below: - - -![Spaces workflow][spaces-workflow] - -## Node architecture - -You can mitigate resource contention and improve reliability by separating system -components into dedicated node pools. - -### `etcd` dedicated nodes - -`etcd` performance directly impacts your entire Space, so isolate it for -consistent performance. - -1. Create a dedicated `etcd` node pool - - **Requirements:** - - **Minimum**: 3 nodes for HA - - **Instance type**: General purpose with high network throughput/low latency - - **Storage**: High performance storage (`etcd` is I/O sensitive) - -2. Taint `etcd` nodes to reserve them - - ```bash - kubectl taint nodes target=etcd:NoSchedule - ``` - -3. Configure `etcd` storage - - `etcd` is sensitive to storage I/O performance. Review the [`etcd` scaling - documentation][scaling] - for specific storage guidance. - -### API server dedicated nodes - -API servers handle all control plane requests and should run on dedicated -infrastructure. - -1. Create dedicated API server nodes - - **Requirements:** - - **Minimum**: 2 nodes for HA - - **Instance type**: Compute-optimized, memory-optimized, or general-purpose - - **Scaling**: Scale vertically based on API server load patterns - -2. Taint API server nodes - - ```bash - kubectl taint nodes target=apiserver:NoSchedule - ``` - -### Configure cluster autoscaling - -Enable cluster autoscaling for all node pools. - -For AWS EKS clusters, Upbound recommends using [`Karpenter`][karpenter] for -improved bin-packing and instance type selection. - -For GCP GKE clusters, follow the [GKE autoscaling][gke-autoscaling] guide. - -For Azure AKS clusters, follow the [AKS autoscaling][aks-autoscaling] guide. - - -## Configure high availability - -Ensure control plane components can survive node and zone failures. - -### Enable high availability mode - -1. Configure control planes for high availability - - ```yaml - controlPlanes: - ha: - enabled: true - ``` - - This configures control plane pods to run with multiple replicas and - associated pod disruption budgets. - -### Configure component distribution - -1. Set up API server pod distribution - - ```yaml - controlPlanes: - vcluster: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: target - operator: In - values: - - apiserver - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - vcluster - topologyKey: "kubernetes.io/hostname" - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - vcluster - topologyKey: topology.kubernetes.io/zone - weight: 100 - ``` - -2. Configure `etcd` pod distribution - - ```yaml - controlPlanes: - etcd: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: target - operator: In - values: - - etcd - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - vcluster-etcd - topologyKey: "kubernetes.io/hostname" - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - vcluster-etcd - topologyKey: topology.kubernetes.io/zone - weight: 100 - ``` - -### Configure tolerations - -Allow control plane pods to schedule on the tainted dedicated nodes (available -in Spaces v1.14+). - -1. Add tolerations for `etcd` pods - - ```yaml - controlPlanes: - etcd: - tolerations: - - key: "target" - operator: "Equal" - value: "etcd" - effect: "NoSchedule" - ``` - -2. Add tolerations for API server pods - - ```yaml - controlPlanes: - vcluster: - tolerations: - - key: "target" - operator: "Equal" - value: "apiserver" - effect: "NoSchedule" - ``` - - -## Configure autoscaling for Spaces components - - -Set up the Spaces system components to handle variable load automatically. - -### Scale API and `apollo` services - -1. Configure minimum replicas for availability - - ```yaml - api: - replicaCount: 2 - - features: - alpha: - apollo: - enabled: true - replicaCount: 2 - ``` - - Both services support horizontal and vertical scaling based on load patterns. - -### Configure router autoscaling - -The `spaces-router` is the entry point for all traffic and needs intelligent -scaling. - - -1. Enable Horizontal Pod Autoscaler - - ```yaml - router: - hpa: - enabled: true - minReplicas: 2 - maxReplicas: 8 - targetCPUUtilizationPercentage: 80 - targetMemoryUtilizationPercentage: 80 - ``` - -2. Monitor scaling factors - - **Router scaling behavior:** - - **Vertical scaling**: Scales based on number of control planes - - **Horizontal scaling**: Scales based on request volume - - **Resource monitoring**: Monitor CPU and memory usage - - - -### Configure controller scaling - -The `spaces-controller` manages Space-level resources and requires vertical -scaling. - -1. Configure adequate resources with headroom - - ```yaml - controller: - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "2000m" - memory: "4Gi" - ``` - - **Important**: The controller can spike when reconciling large numbers of - control planes, so provide adequate headroom for resource spikes. - -## Set up production storage - - -### Configure Query API database - - -1. Use a managed PostgreSQL database - - **Recommended services:** - - [AWS RDS][rds] - - [Google Cloud SQL][gke-sql] - - [Azure Database for PostgreSQL][aks-sql] - - **Requirements:** - - Minimum 400 IOPS performance - - -## Monitoring - - - -Monitor key metrics to ensure healthy scaling and identify issues quickly. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -### Control plane health - -Track these `spaces-controller` metrics: - -1. **Total control planes** - - ``` - spaces_control_plane_exists - ``` - - Tracks the total number of control planes in the system. - -2. **Degraded control planes** - - ``` - spaces_control_plane_degraded - ``` - - Returns control planes that don't have a `Synced`, `Ready`, and - `Healthy` state. - -3. **Stuck control planes** - - ``` - spaces_control_plane_stuck - ``` - - Control planes stuck in a provisioning state. - -4. **Deletion issues** - - ``` - spaces_control_plane_deletion_stuck - ``` - - Control planes stuck during deletion. - -### Alerting - -Configure alerts for critical scaling and health metrics: - -- **High error rates**: Alert when 4xx/5xx response rates exceed thresholds -- **Control plane health**: Alert when degraded or stuck control planes exceed acceptable counts - -## Architecture overview - -**Spaces System Components:** - -- **`spaces-router`**: Entry point for all endpoints, dynamically builds routes to control plane API servers -- **`spaces-controller`**: Reconciles Space-level resources, serves webhooks, works with `mxp-controller` for provisioning -- **`spaces-api`**: API for managing groups, control planes, shared secrets, and telemetry objects (accessed only through spaces-router) -- **`spaces-apollo`**: Hosts the Query API, connects to PostgreSQL database populated by `apollo-syncer` pods - - -**Control Plane Components (per control plane):** -- **`mxp-controller`**: Handles provisioning tasks, serves webhooks, installs UXP and `XGQL` -- **`XGQL`**: GraphQL API powering console views -- **`kube-state-metrics`**: Collects usage metrics for billing (updated by `mxp-controller` when CRDs change) -- **`vector`**: Works with `kube-state-metrics` to send usage data to external storage for billing -- **`apollo syncer`**: Syncs `etcd` data into PostgreSQL for the Query API - - -### `up ctx` workflow - - - up ctx workflow diagram - - -### Access a control plane API server via kubectl - - - kubectl workflow diagram - - -### Query API/Apollo - - - query API workflow diagram - - -## See also - -* [Upbound Spaces deployment requirements][deployment] -* [Upbound `etcd` scaling resources][scaling] - -[up-ctx-workflow]: /img/up-ctx-workflow.png -[kubectl]: /img/kubectl-workflow.png -[query-api]: /img/query-api-workflow.png -[spaces-workflow]: /img/up-basic-flow.png -[rds]: https://aws.amazon.com/rds/postgresql/ -[gke-sql]: https://cloud.google.com/kubernetes-engine/docs/tutorials/stateful-workloads/postgresql -[aks-sql]: https://learn.microsoft.com/en-us/azure/aks/deploy-postgresql-ha?tabs=azuredisk -[deployment]: https://docs.upbound.io/spaces/howtos/self-hosted/deployment-reqs/ -[karpenter]: https://docs.aws.amazon.com/eks/latest/best-practices/karpenter.html -[gke-autoscaling]: https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-autoscaler -[aks-autoscaling]: https://learn.microsoft.com/en-us/azure/aks/cluster-autoscaler-overview -[scaling]: https://docs.upbound.io/deploy/self-hosted-spaces/scaling-resources#scaling-etcd-storage diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/controllers.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/controllers.md deleted file mode 100644 index 692740638..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/controllers.md +++ /dev/null @@ -1,389 +0,0 @@ ---- -title: Controllers -weight: 250 -description: A guide to how to wrap and deploy an Upbound controller into control planes on Upbound. ---- - -:::important -This feature is in private preview for select customers in Upbound Spaces. If you're interested in this feature, please [contact us](https://www.upbound.io/contact-us). -::: - -Upbound's _Controllers_ feature lets you build and deploy control plane software from the Kubernetes ecosystem. With the _Controllers_ feature, you're not limited to just managing resource types defined by Crossplane. Now you can create resources from _CustomResourceDefinitions_ defined by other Kubernetes ecosystem tooling. - -This guide explains how to bundle and deploy control plane software from the Kubernetes ecosystem on a control plane in Upbound. - -## Benefits - -The Controllers feature provides the following benefits: - -* Deploy control plane software from the Kubernetes ecosystem. -* Use your control plane's package manager to handle the lifecycle of the control plane software and define dependencies between package. -* Build powerful compositions that combine both Crossplane and Kubernetes _CustomResources_. - -## How it works - -A _Controller_ is a package type that bundles control plane software from the Kubernetes ecosystem. Examples of such software includes: - -- Kubernetes policy engines -- CI/CD tooling -- Your own private custom controllers defined by your organization - -You build a _Controller_ package by wrapping a helm chart along with its requisite _CustomResourceDefinitions_. Your _Controller_ package gets pushed to an OCI registry, and from there you can apply it to a control plane like you would any other Crossplane package. Your control plane's package manager is responsible for managing the lifecycle of the software once applied. - -## Prerequisites - -Enable the Controllers feature in the Space you plan to run your control plane in: - -- Cloud Spaces: Not available yet -- Connected Spaces: Space administrator must enable this feature -- Disconnected Spaces: Space administrator must enable this feature - -Packaging a _Controller_ requires [up CLI][cli] `v0.39.0` or later. - - - -## Build a _Controller_ package - - - -_Controllers_ are a package type that get administered by your control plane's package manager. - -### Prepare the package - -To define a _Controller_, you need a Helm chart. This guide assumes the control plane software you want to build into a _Controller_ already has a Helm chart available. - -Start by making a working directory to assemble the necessary parts: - -```ini -mkdir controller-package -cd controller-package -``` - -Inside the working directory, pull the Helm chart: - -```shell -export CHART_REPOSITORY= -export CHART_NAME= -export CHART_VERSION= - -helm pull $CHART_NAME --repo $CHART_REPOSITORY --version $CHART_VERSION -``` - -Be sure to update the Helm chart repository, name, and version with your own. - -Move the Helm chart into its own folder: - -```ini -mkdir helm -mv $CHART_NAME-$CHART_VERSION.tgz helm/chart.tgz -``` - -Unpack the CRDs from the Helm chart into their own directory: - -```shell -export RELEASE_NAME= -export RELEASE_NAMESPACE= - -mkdir crds -helm template $RELEASE_NAME helm/chart.tgz -n $RELEASE_NAMESPACE --include-crds | \ - yq e 'select(.kind == "CustomResourceDefinition")' - | \ - yq -s '("crds/" + .metadata.name + ".yaml")' - -``` -Be sure to update the Helm release name, and namespace with your own. - -:::info -The instructions above assume your CRDs get deployed as part of your Helm chart. If they're deployed another way, you need to manually copy your CRDs instead. -::: - -Create a `crossplane.yaml` with your controller metadata: - -```yaml -cat < crossplane.yaml -apiVersion: meta.pkg.upbound.io/v1alpha1 -kind: Controller -metadata: - annotations: - friendly-name.meta.crossplane.io: Controller - meta.crossplane.io/description: | - A brief description of what the controller does. - meta.crossplane.io/license: Apache-2.0 - meta.crossplane.io/maintainer: - meta.crossplane.io/readme: | - An explanation of your controller. - meta.crossplane.io/source: - name: -spec: - packagingType: Helm - helm: - releaseName: - releaseNamespace: - # Value overrides for the helm release can be provided below. - # values: - # foo: bar -EOF -``` - -Your controller's file structure should look like this: - -```ini -. -├── crds -│ ├── your-crd.yaml -│ ├── second-crd.yaml -│ └── another-crd.yaml -├── crossplane.yaml -└── helm - └── chart.tgz -``` - -### Package and push the _Controller_ - -At the root of your controller's working directory, build the contents into an xpkg: - -```ini -up xpkg build -``` - -This causes an xpkg to get saved to your current directory with a name like `controller-f7091386b4c0.xpkg`. - -Push the package to your desired OCI registry: - -```shell -export UPBOUND_ACCOUNT= -export CONTROLLER_NAME= -export CONTROLLER_VERSION= -export XPKG_FILENAME= - -up xpkg push xpkg.upbound.io/$UPBOUND_ACCOUNT/$CONTROLLER_NAME:$CONTROLLER_VERSION -f $XPKG_FILENAME -``` - - - -## Deploy a _Controller_ package - - - -:::important -_Controllers_ are only installable on control planes running Crossplane `v1.19.0` or later. -::: - -Set your kubecontext to the desired control plane in Upbound. Change the package path to the OCI registry you pushed it to. Then, deploy the _Controller_ directly: - -```shell -export CONTROLLER_NAME= -export CONTROLLER_VERSION= - -cat < crossplane.yaml -apiVersion: meta.pkg.upbound.io/v1alpha1 -kind: Controller -metadata: - annotations: - friendly-name.meta.crossplane.io: Controller ArgoCD - meta.crossplane.io/description: | - The ArgoCD Controller enables continuous delivery and declarative configuration - management for Kubernetes applications using GitOps principles. - meta.crossplane.io/license: Apache-2.0 - meta.crossplane.io/maintainer: Upbound Maintainers - meta.crossplane.io/readme: | - ArgoCD is a declarative GitOps continuous delivery tool for Kubernetes that - follows the GitOps methodology to manage infrastructure and application - configurations. - meta.crossplane.io/source: https://github.com/argoproj/argo-cd - name: argocd -spec: - packagingType: Helm - helm: - releaseName: argo-cd - releaseNamespace: argo-system - # values: - # foo: bar -EOF -``` - -Your controller's file structure should look like this: - -```ini -. -├── crds -│ ├── applications.argoproj.io.yaml -│ ├── applicationsets.argoproj.io.yaml -│ └── appprojects.argoproj.io.yaml -├── crossplane.yaml -└── helm - └── chart.tgz -``` - -### Package and push controller-argocd - -At the root of your controller's working directory, build the contents into an xpkg: - -```ini -up xpkg build -``` - -This causes an xpkg to get saved to your current directory with a name like `argocd-f7091386b4c0.xpkg`. - -Push the package to your desired OCI registry: - -```shell -export UPBOUND_ACCOUNT= -export CONTROLLER_NAME=controller-argocd -export CONTROLLER_VERSION=v7.8.8 -export XPKG_FILENAME= - -up xpkg push --create xpkg.upbound.io/$UPBOUND_ACCOUNT/$CONTROLLER_NAME:$CONTROLLER_VERSION -f $XPKG_FILENAME -``` - -### Deploy controller-argocd to a control plane - -Set your kubecontext to the desired control plane in Upbound. Change the package path to the OCI registry you pushed it to. Then, deploy the _Controller_ directly: - -```ini -cat < - -## Frequently asked questions - -
-Can I package any software or are there any prerequisites to be a Controller? - -We define a *Controller* as a software that has at least one Custom Resource Definition (CRD) and a Kubernetes controller for that CRD. This is the minimum requirement to be a *Controller*. We have some checks to enforce this at packaging time. - -
- -
-How can I package my software as a Controller? - -Currently, we support Helm charts as the underlying package format for *Controllers*. As long as you have a Helm chart, you can package it as a *Controller*. - -If you don't have a Helm chart, you can't deploy the software. We only support Helm charts as the underlying package format for *Controllers*. We may extend this to support other packaging formats like Kustomize in the future. - -
- -
-Can I package Crossplane XRDs/Compositions as a Helm chart to deploy as a Controller? - -This is not recommended. For packaging Crossplane XRDs/ and Compositions, we recommend using the `Configuration` package format. A helm chart only with Crossplane XRDs/Compositions does not qualify as a *Controller*. - -
- -
-How can I override the Helm values when deploying a Controller? - -Overriding the Helm values is possible at two levels: -- During packaging time, in the package manifest file. -- At runtime, using a `ControllerRuntimeConfig` resource (similar to Crossplane `DeploymentRuntimeConfig`). - -
- -
-How can I configure the helm release name and namespace for the controller? - -Right now, it is not possible to configure this at runtime. The package author configures release name and namespace during packaging, so it is hardcoded inside the package. Unlike a regular application that is deployed by a Helm chart, *Controllers* can only be deployed once in a given control plane, so, we hope it should be ok to rely on predefined release names and namespaces. We may consider exposing these in `ControllerRuntimeConfig` later, but, we would like to keep it opinionated unless there are strong reasons to do so. - -
- -
-Can I deploy more than one instance of a Controller package? - -No, this is not possible. Remember, a *Controller* package introduces CRDs which are cluster-scoped objects. Just like one cannot deploy more than one instance of the same Crossplane Provider package today, it is not possible to deploy more than one instance of a *Controller*. - -
- -
-Do I need a specific Crossplane version to run Controllers? - -Yes, you need to use Crossplane v1.19.0 or later to use *Controllers*. This is because of the changes in the Crossplane codebase to support third-party package formats in dependencies. - -Spaces `v1.12.0` supports Crossplane `v1.19` in the *Rapid* release channel. - -
- -
-Can I deploy Controllers outside of an Upbound control plane? With UXP? - -No, *Controllers* are a proprietary package format and are only available for control planes running in Spaces hosting environments in Upbound. - -
- - -[cli]: /manuals/uxp/overview - diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/ctp-audit-logs.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/ctp-audit-logs.md deleted file mode 100644 index 52f52c776..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/ctp-audit-logs.md +++ /dev/null @@ -1,549 +0,0 @@ ---- -title: Control plane audit logging ---- - -This guide explains how to enable and configure audit logging for control planes -in Self-Hosted Upbound Spaces. - -Starting in Spaces `v1.14.0`, each control plane contains an API server that -supports audit log collection. You can use audit logging to track creation, -updates, and deletions of Crossplane resources. Control plane audit logs -use observability features to collect audit logs with `SharedTelemetryConfig` and -send logs to an OpenTelemetry (`OTEL`) collector. - -:::info API Version Information -This guide applies to **Spaces v1.14.0 and later** (Self-Hosted only). Audit logging is not available in earlier versions. - -For API specifications on observability resources, see the [Spaces API Reference - v1.14](../../../../../1.14/reference/apis/spaces-api/). details on observability evolution across versions, see the . -::: - -## Prerequisites - -Before you begin, make sure you have: - -* Spaces `v1.14.0` or greater -* Admin access to your Spaces host cluster -* `kubectl` configured to access the host cluster -* `helm` installed -* `yq` installed -* `up` CLI installed and logged in to your organization - -## Enable observability - - -Observability graduated to General Available in `v1.14.0` but is disabled by -default. - - - - - -### Before `v1.14` -To enable the GA Observability feature, upgrade your Spaces installation to `v1.14.0` -or later and update your installation setting to the new flag: - -```diff -helm upgrade spaces upbound/spaces -n upbound-system \ -- --set "features.alpha.observability.enabled=true" -+ --set "observability.enabled=true" -``` - - - -### After `v1.14` - -To enable the GA Observability feature for `v1.14.0` and later, pass the feature -flag: - -```sh -helm upgrade spaces upbound/spaces -n upbound-system \ - --set "observability.enabled=true" - -``` - - - - -To confirm Observability is enabled, run the `helm get values` command: - - -```shell -helm get values --namespace upbound-system spaces | yq .observability -``` - -Your output should return: - -```shell-noCopy - enabled: true -``` - -## Install an observability backend - -:::note -If you already have an observability backend in your environment, skip to the -next section. -::: - - -For this guide, you'll use Grafana's `docker-otel-lgtm` bundle to validate audit log -generation. production environments, configure a dedicated observability -backend like Datadog, Splunk, or an enterprise-grade Grafana stack. - - - -First, make sure your `kubectl` context points to your Spaces host cluster: - -```shell -kubectl config current-context -``` - -The output should return your cluster name. - -Next, install `docker-otel-lgtm` as a deployment using port-forwarding to -connect to Grafana. Create a manifest file and paste the -following configuration: - -```yaml title="otel-lgtm.yaml" -apiVersion: v1 -kind: Namespace -metadata: - name: observability ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app: otel-lgtm - name: otel-lgtm - namespace: observability -spec: - ports: - - name: grpc - port: 4317 - protocol: TCP - targetPort: 4317 - - name: http - port: 4318 - protocol: TCP - targetPort: 4318 - - name: grafana - port: 3000 - protocol: TCP - targetPort: 3000 - selector: - app: otel-lgtm ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: otel-lgtm - labels: - app: otel-lgtm - namespace: observability -spec: - replicas: 1 - selector: - matchLabels: - app: otel-lgtm - template: - metadata: - labels: - app: otel-lgtm - spec: - containers: - - name: otel-lgtm - image: grafana/otel-lgtm - ports: - - containerPort: 4317 - - containerPort: 4318 - - containerPort: 3000 -``` - -Next, apply the manifest: - -```shell -kubectl apply --filename otel-lgtm.yaml -``` - -Your output should return the resources: - -```shell -namespace/observability created - service/otel-lgtm created - deployment.apps/otel-lgtm created -``` - -To verify your resources deployed, use `kubectl get` to display resources with -an `ACTIVE` or `READY` status. - -Next, forward the Grafana port: - -```shell -kubectl port-forward svc/otel-lgtm --namespace observability 3000:3000 -``` - -Now you can access the Grafana UI at http://localhost:3000. - - -## Create an audit-enabled control plane - -To enable audit logging for a control plane, you need to label it so the -`SharedTelemetryConfig` can identify and apply audit settings. This section -creates a new control plane with the `audit-enabled: "true"` label. The -`audit-enabled: "true"` label marks this control plane for audit logging. The -`SharedTelemetryConfig` (created in the next section) finds control planes with -this label and enables audit logging on them. - -Create a new manifest file and paste the configuration below: - -
-```yaml title="ctp-audit.yaml" -apiVersion: v1 -kind: Namespace -metadata: - name: audit-test ---- -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - labels: - audit-enabled: "true" - name: ctp1 - namespace: audit-test -spec: - writeConnectionSecretToRef: - name: kubeconfig-ctp1 - namespace: audit-test -``` -
- -The `metadata.labels` section contains the `audit-enabled` setting. - -Apply the manifest: - -```shell -kubectl apply --filename ctp-audit.yaml -``` - -Confirm your control plane reaches the `READY` status: - -```shell -kubectl get --filename ctp-audit.yaml -``` - -## Create a `SharedTelemetryConfig` - -The `SharedTelemetryConfig` applies to all control plane objects in a namespace -and enables audit logging and routes logs to your `OTEL` endpoint. - -Create a `SharedTelemetryConfig` manifest file and paste the configuration -below: - -
-```yaml title="sharedtelemetryconfig.yaml" -apiVersion: observability.spaces.upbound.io/v1alpha1 -kind: SharedTelemetryConfig -metadata: - name: apiserver-audit - namespace: audit-test -spec: - apiServer: - audit: - enabled: true - exporters: - otlphttp: - endpoint: http://otel-lgtm.observability:4318 - exportPipeline: - logs: [otlphttp] - controlPlaneSelector: - labelSelectors: - - matchLabels: - audit-enabled: "true" -``` -
- -This configuration: - -* Sets `apiServer.audit.enabled` to `true` -* Configures the `otlphttp` exporter to point to the `docker-otel-lgtm` service -* Uses `controlPlaneSelector` to match any control plane in the namespace with the `audit-enabled` label set to `true` - -:::note -You can configure the `SharedTelemetryConfig` to select control planes in -several ways. more information on control plane selection, see the [control -plane selection][ctp-selection] documentation. -::: - -Apply the `SharedTelemetryConfig`: - -```shell -kubectl apply --filename sharedtelemetryconfig.yaml -``` - -Confirm the configuration selected the control plane: - -```shell -kubectl get --filename sharedtelemetryconfig.yaml -``` - -The output should return `SELECTED` as `1` and `VALIDATED` as `TRUE`. - -For more detailed status information, use `kubectl get`: - -```shell -kubectl get --filename sharedtelemetryconfig.yaml --output yaml | yq .status -``` - -## Generate and monitor audit events - -You enabled telemetry on your new control plane and can now generate events to -test the audit logging. This guide uses the `nop-provider` to simulate resource -operations. - -Switch your `up` context to the new control plane: - -```shell -up ctx /// -``` - -Create a new Provider manifest: - -```yaml title="provider-nop.yaml" -apiVersion: pkg.crossplane.io/v1 - kind: Provider - metadata: - name: crossplane-contrib-provider-nop - spec: - package: xpkg.upbound.io/crossplane-contrib/provider-nop:v0.4.0 -``` - -Apply the provider manifest: - -```shell -kubectl apply --filename provider-nop.yaml -``` - -Verify the provider installed and returns `HEALTHY` status as `TRUE`. - -Apply an example resource to kick off event generation: - - -```shell -kubectl apply --filename https://raw.githubusercontent.com/crossplane-contrib/provider-nop/refs/heads/main/examples/nopresource.yaml -``` - -In your Grafana dashboard, navigate to **Drilldown** > **Logs** under the -Grafana menu. - - -Filter for `controlplane-audit` log messages. - -Create a query to find `create` events on `nopresources` by filtering: - -* The `verb` field for `create` events -* The `objectRef_resource` field to match the Kind `nopresources` - -Review the audit log results. The log stream displays: - -*The client applying the create operation -* The resource kind -* Client details -* The response code - -Expand the example below for an audit log entry: - -
- Audit log entry - -```json -{ - "level": "Metadata", - "auditID": "51bbe609-14ad-4874-be78-1289c10d506a", - "stage": "ResponseComplete", - "requestURI": "/apis/nop.crossplane.io/v1alpha1/nopresources?fieldManager=kubectl-client-side-apply&fieldValidation=Strict", - "verb": "create", - "user": { - "username": "kubernetes-admin", - "groups": ["system:masters", "system:authenticated"] - }, - "impersonatedUser": { - "username": "upbound:spaces:host:masterclient", - "groups": [ - "system:authenticated", - "upbound:controlplane:admin", - "upbound:spaces:host:system:masters" - ] - }, - "sourceIPs": ["10.244.0.135", "127.0.0.1"], - "userAgent": "kubectl/v1.32.2 (darwin/arm64) kubernetes/67a30c0", - "objectRef": { - "resource": "nopresources", - "name": "example", - "apiGroup": "nop.crossplane.io", - "apiVersion": "v1alpha1" - }, - "responseStatus": { "metadata": {}, "code": 201 }, - "requestReceivedTimestamp": "2025-09-19T23:03:24.540067Z", - "stageTimestamp": "2025-09-19T23:03:24.557583Z", - "annotations": { - "authorization.k8s.io/decision": "allow", - "authorization.k8s.io/reason": "RBAC: allowed by ClusterRoleBinding \"controlplane-admin\" of ClusterRole \"controlplane-admin\" to Group \"upbound:controlplane:admin\"" - } - } -``` -
- -## Customize the audit policy - -Spaces `v1.14.0` includes a default audit policy. You can customize this policy -by creating a configuration file and passing the values to -`observability.collectors.apiServer.auditPolicy` in the helm values file. - -An example custom audit policy: - -```yaml -observability: - controlPlanes: - apiServer: - auditPolicy: | - apiVersion: audit.k8s.io/v1 - kind: Policy - rules: - # ============================================================================ - # RULE 1: Exclude health check and version endpoints - # ============================================================================ - - level: None - nonResourceURLs: - - '/healthz*' - - '/readyz*' - - /version - # ============================================================================ - # RULE 2: ConfigMaps - Write operations only - # ============================================================================ - - level: Metadata - resources: - - group: "" - resources: - - configmaps - verbs: - - create - - update - - patch - - delete - omitStages: - - RequestReceived - - ResponseStarted - # ============================================================================ - # RULE 3: Secrets - ALL operations - # ============================================================================ - - level: Metadata - resources: - - group: "" - resources: - - secrets - verbs: - - get - - list - - watch - - create - - update - - patch - - delete - omitStages: - - RequestReceived - - ResponseStarted - # ============================================================================ - # RULE 4: Global exclusion of read-only operations - # ============================================================================ - - level: None - verbs: - - get - - list - - watch - # ========================================================================== - # RULE 5: Exclude standard Kubernetes resources from write operation logging - # ========================================================================== - - level: None - resources: - - group: "" - - group: "apps" - - group: "networking.k8s.io" - - group: "policy" - - group: "rbac.authorization.k8s.io" - - group: "storage.k8s.io" - - group: "batch" - - group: "autoscaling" - - group: "metrics.k8s.io" - - group: "node.k8s.io" - - group: "scheduling.k8s.io" - - group: "coordination.k8s.io" - - group: "discovery.k8s.io" - - group: "events.k8s.io" - - group: "flowcontrol.apiserver.k8s.io" - - group: "internal.apiserver.k8s.io" - - group: "authentication.k8s.io" - - group: "authorization.k8s.io" - - group: "admissionregistration.k8s.io" - verbs: - - create - - update - - patch - - delete - # ============================================================================ - # RULE 6: Catch-all for ALL custom resources and any missed resources - # ============================================================================ - - level: Metadata - verbs: - - create - - update - - patch - - delete - omitStages: - - RequestReceived - - ResponseStarted - # ============================================================================ - # RULE 7: Final catch-all - exclude everything else - # ============================================================================ - - level: None - omitStages: - - RequestReceived - - ResponseStarted -``` -You can apply this policy during Spaces installation or upgrade using the helm values file. - -Audit policies use rules evaluated in order from top to bottom where the first -matching rule applies. Control plane audit policies follow Kubernetes conventions and use the -following logging levels: - -* **None** - Don't log events matching this rule -* **Metadata** - Log request metadata (user, timestamp, resource, verb) but not request or response bodies -* **Request** - Log metadata and request body but not response body -* **RequestResponse** - Log metadata, request body, and response body - -For more information, review the Kubernetes [Auditing] documentation. - -## Disable audit logging - -You can disable audit logging on a control plane by removing it from the -`SharedTelemetryConfig` selector or by deleting the `SharedTelemetryConfig`. - -### Disable for specific control planes - -Remove the `audit-enabled` label from control planes that should stop sending audit logs: - -```bash -kubectl label controlplane --namespace audit-enabled- -``` - -The `SharedTelemetryConfig` no longer selects this control plane, and audit log collection stops. - -### Disable for all control planes - -Delete the `SharedTelemetryConfig` to stop audit logging for all control planes it manages: - -```bash -kubectl delete sharedtelemetryconfig --namespace -``` - -[ctp-selection]: /spaces/howtos/observability/#control-plane-selection -[Auditing]: https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/ diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/declarative-ctps.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/declarative-ctps.md deleted file mode 100644 index 2c3e5331b..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/declarative-ctps.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -title: Declaratively create control planes -sidebar_position: 99 -description: A tutorial to configure a Space with Argo to declaratively create and - manage control planes ---- - -In this tutorial, you learn how to configure [Argo CD][argo-cd] to communicate with a self-hosted Space. This flow allows you to declaratively create and manage control planes from Git. Argo CD is a continuous delivery tool for Kubernetes that you can use to drive GitOps flows for your control plane infrastructure. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. - -For API specifications on ControlPlane resources and their declarative creation, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . -::: - -## Prerequisites - -To complete this tutorial, you need the following: - -- Have already deployed an Upbound Space. -- Have already deployed an instance of Argo CD on a Kubernetes cluster. - -## Connect your Space to Argo CD - -Fetch the kubeconfig for the Space cluster, the Kubernetes cluster where you installed the Upbound Spaces software. You must add the Space cluster as a context to Argo. - -```ini -export SPACES_CLUSTER_SERVER="https://url" -export SPACES_CLUSTER_NAME="cluster" -``` - -Switch contexts to the Kubernetes cluster where you've installed Argo. Create a secret on the Argo cluster whose data contains the connection details of the Space cluster. - -:::important -Make sure the following commands are executed against your **Argo** cluster, not your Space cluster. -::: - -Run the following command in a terminal: - -```yaml -cat < -When you install a Crossplane provider on a control plane, memory gets consumed -according to the number of custom resources it defines. Upbound [Official Provider families][official-provider-families] provide higher fidelity control -to platform teams to install providers for only the resources they need, -reducing the bloat of needlessly installing unused custom resources. Still, you -must factor provider memory usage into your calculations to ensure you've -rightsized the memory available in your Spaces cluster. - - -:::important -Be careful not to conflate `managed resource` with `custom resource definition`. -The former is an "instance" of an external resource in Crossplane, while the -latter defines the API schema of that resource. -::: - -It's estimated that each custom resource definition consumes ~3 MB of memory. -The calculation is: - -```bash -number_of_managed_resources_defined_in_provider x 3 MB = memory_required -``` - -For example, if you plan to use [provider-aws-ec2][provider-aws-ec2], [provider-aws-s3][provider-aws-s3], and [provider-aws-iam][provider-aws-iam], the resulting calculation is: - -```bash -provider-aws-ec2: 98 x 3 MB = 294 MB -provider-aws-s3: 23 x 3 MB = 69 MB -provider-aws-iam 22 x 3 MB = 66 MB ---- -total memory: 429 MB -``` - -In this scenario, you should budget ~430 MB of memory for provider usage on this control plane. - -:::tip -Do this calculation for each provider you plan to install on your control plane. -Then do this calculation for each control plane you plan to run in your Space. -::: - - -#### Total memory usage - -Add the memory usage from the previous sections. Given the preceding examples, -they result in a recommendation to budget ~1 GB memory for each control plane -you plan to run in the Space. - -:::important - -The 1 GB recommendation is an example. -You should input your own provider requirements to arrive at a final number for -your own deployment. - -::: - -### CPU considerations - -#### Managed resource CPU usage - -The number of managed resources under management by a control plane is the largest contributing factor for CPU usage in a Space. CPU usage scales linearly according to the number of managed resources under management by your control plane. In Upbound's testing, CPU usage requirements _does_ vary from provider to provider. Using the Upbound Official Provider families as a baseline: - - -| Provider | MR create operation (CPU core seconds) | MR update or reconciliation operation (CPU core seconds) | -| ---- | ---- | ---- | -| provider-family-aws | 10 | 2 to 3 | -| provider-family-gcp | 7 | 1.5 | -| provider-family-azure | 7 to 10 | 1.5 to 3 | - - -When resources are in a non-ready state, Crossplane providers reconcile often (as fast as every 15 seconds). Once a resource reaches `READY`, each Crossplane provider defaults to a 10 minute poll interval. Given this, a 16-core machine has `16x10x60 = 9600` CPU core seconds available. Interpreting this table: - -- A single control plane that needs to create 100 AWS MRs concurrently would consume 1000 CPU core seconds, or about 1.5 cores. -- A single control plane that continuously reconciles 100 AWS MRs once they've reached a `READY` state would consume 300 CPU core seconds, or a little under half a core. - -Since `provider-family-aws` has the highest recorded numbers for CPU time required, you can use that as an upper limit in your calculations. - -Using these calculations and extrapolating values, given a 16 core machine, it's recommended you don't exceed a single control plane managing 1000 MRs. Suppose you plan to run 10 control planes, each managing 1000 MRs. You want to make sure your node pool has capacity for 160 cores. If you are using a machine type that has 16 cores per machine, that would mean having a node pool of size 10. If you are using a machine type that has 32 cores per machine, that would mean having a node pool of size 5. - -#### Cloud API latency - -Oftentimes, you are using Crossplane providers to talk to external cloud APIs. Those external cloud APIs often have global API rate limits (examples: [Azure limits][azure-limits], [AWS EC2 limits][aws-ec2-limits]). - -For Crossplane providers built on [Upjet][upjet] (such as Upbound Official Provider families), these providers use Terraform under the covers. They expose some knobs (such as `--max-reconcile-rate`) you can use to tweak reconciliation rates. - -### Resource buffers - -The guidance in the preceding sections explains how to calculate CPU and memory usage requirements for: - -- a set of control planes in a Space -- tuned to the number of providers you plan to use -- according to the number of managed resource instances you plan to have managed by your control planes - -Upbound recommends budgeting an extra buffer of 20% to your resource capacity calculations. The numbers shared in the preceding sections don't account for peaks or surges since they're based off average measurements. Upbound recommends budgeting this buffer to account for these things. - -## Deploying more than one Space - -You are welcome to deploy more than one Space. You just need to make sure you have a 1:1 mapping of Space to Kubernetes clusters. Spaces are by their nature constrained to a single Kubernetes Cluster, which are regional entities. If you want to offer control planes in multiple cloud environments or multiple public clouds entirely, these are justifications for deploying >1 Spaces. - -## Cert-manager - -A Spaces deployment uses the [Certificate Custom Resource] from cert-manager to -provision certificates within the Space. This establishes a nice API boundary -between what your platform may need and the Certificate requirements of a -Space. - - -In the event you would like more control over the issuing Certificate Authority -for your deployment or the deployment of cert-manager itself, this guide is for -you. - - -### Deploying - -An Upbound Space deployment doesn't have any special requirements for the -cert-manager deployment itself. The only expectation is that cert-manager and -the corresponding Custom Resources exist in the cluster. - -You should be free to install cert-manager in the cluster in any way that makes -sense for your organization. You can find some [installation ideas] in the -cert-manager docs. - -### Issuers - -A default Upbound Space install includes a [ClusterIssuer]. This `ClusterIssuer` -is a `selfSigned` issuer that other certificates are minted from. You have a -couple of options available to you for changing the default deployment of the -Issuer: -1. Changing the issuer name. -2. Providing your own ClusterIssuer. - - -#### Changing the issuer name - -The `ClusterIssuer` name is controlled by the `certificates.space.clusterIssuer` -Helm property. You can adjust this during installation by providing the -following parameter (assuming your new name is 'SpaceClusterIssuer'): -```shell ---set "certificates.space.clusterIssuer=SpaceClusterIssuer" -``` - - - -#### Providing your own ClusterIssuer - -To provide your own `ClusterIssuer`, you need to first setup your own -`ClusterIssuer` in the cluster. The cert-manager docs have a variety of options -for providing your own. See the [Issuer Configuration] docs for more details. - -Once you have your own `ClusterIssuer` set up in the cluster, you need to turn -off the deployment of the `ClusterIssuer` included in the Spaces deployment. -To do that, provide the following parameter during installation: -```shell ---set "certificates.provision=false" -``` - -###### Considerations -If your `ClusterIssuer` has a name that's different from the default name that -the Spaces installation expects ('spaces-selfsigned'), you need to also specify -your `ClusterIssuer` name during install using: -```shell ---set "certificates.space.clusterIssuer=" -``` - -## Ingress - -To route requests from an external client (kubectl, ArgoCD, etc) to a -control plane, a Spaces deployment includes a default [Ingress] manifest. In -order to ease getting started scenarios, the current `Ingress` includes -configurations (properties and annotations) that assume that you installed the -commonly used [ingress-nginx ingress controller] in the cluster. This section -walks you through using a different `Ingress`, if that's something that your -organization needs. - -### Default manifest - -An example of what the current `Ingress` manifest included in a Spaces install -is below: - -```yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: mxe-router-ingress - namespace: upbound-system - annotations: - nginx.ingress.kubernetes.io/use-regex: "true" - nginx.ingress.kubernetes.io/ssl-redirect: "false" - nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" - nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" - nginx.ingress.kubernetes.io/proxy-request-buffering: "off" - nginx.ingress.kubernetes.io/proxy-body-size: "0" - nginx.ingress.kubernetes.io/proxy-http-version: "1.1" - nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" - nginx.ingress.kubernetes.io/proxy-ssl-verify: "on" - nginx.ingress.kubernetes.io/proxy-ssl-secret: "upbound-system/mxp-hostcluster-certs" - nginx.ingress.kubernetes.io/proxy-ssl-name: spaces-router - nginx.ingress.kubernetes.io/configuration-snippet: | - more_set_headers "X-Request-Id: $req_id"; - more_set_headers "Request-Id: $req_id"; - more_set_headers "Audit-Id: $req_id"; -spec: - ingressClassName: nginx - tls: - - hosts: - - {{ .Values.ingress.host }} - secretName: mxe-router-tls - rules: - - host: {{ .Values.ingress.host }} - http: - paths: - - path: "/v1/controlPlanes" - pathType: Prefix - backend: - service: - name: spaces-router - port: - name: http -``` - -The notable pieces are: -1. Namespace - - - -This property represents the namespace that the spaces-router is deployed to. -In most cases this is `upbound-system`. - - - -2. proxy-ssl-* annotations - -The spaces-router pod terminates TLS using certificates located in the -mxp-hostcluster-certs `Secret` located in the `upbound-system` `Namespace`. - -3. proxy-* annotations - -Requests coming into the ingress-controller can be variable depending on what -the client is requesting. For example, `kubectl get crds` has different -requirements for the connection compared to a 'watch', for example -`kubectl get pods -w`. The ingress-controller is configured to be able to -account for either scenario. - - -4. configuration-snippets - -These commands add headers to the incoming requests that help with telemetry -and diagnosing problems within the system. - -5. Rules - -Requests coming into the control planes use a `/v1/controlPlanes` prefix and -need to be routed to the spaces-router. - - -### Using a different ingress manifest - -Operators can choose to use an `Ingress` manifest and ingress controller that -makes the most sense for their organization. If they want to turn off deploying -the default `Ingress` manifest, they can do so during installation by providing -the following parameter during installation: -```shell ---set ".Values.ingress.provision=false" -``` - -#### Considerations - - - - - -Operators will need to take into account the following considerations when -disabling the default `Ingress` deployment. - -1. Ensure the custom `Ingress` manifest is placed in the same namespace as the -`spaces-router` pod. -2. Ensure that the ingress is configured to use a `spaces-router` as a secure -backend and that the secret used is the mxp-hostcluster-certs secret. -3. Ensure that the ingress is configured to handle long-lived connections. -4. Ensure that the routing rule sends requests prefixed with -`/v1/controlPlanes` to the `spaces-router` using the `http` port. - - - - - - -[cert-manager]: https://cert-manager.io/ -[Certificate Custom Resource]: https://cert-manager.io/docs/usage/certificate/ -[ClusterIssuer]: https://cert-manager.io/docs/concepts/issuer/ -[ingress-nginx ingress controller]: https://kubernetes.github.io/ingress-nginx/deploy/ -[installation ideas]: https://cert-manager.io/docs/installation/ -[Ingress]: https://kubernetes.io/docs/concepts/services-networking/ingress/ -[Issuer Configuration]: https://cert-manager.io/docs/configuration/ -[official-provider-families]: /manuals/packages/providers/provider-families -[aws-eks]: https://aws.amazon.com/eks/ -[google-cloud-gke]: https://cloud.google.com/kubernetes-engine -[microsoft-aks]: https://azure.microsoft.com/en-us/products/kubernetes-service -[upbound-account]: https://www.upbound.io/register/?utm_source=docs&utm_medium=cta&utm_campaign=docs_spaces -[provider-aws-ec2]: https://marketplace.upbound.io/providers/upbound/provider-aws-ec2 -[provider-aws-s3]: https://marketplace.upbound.io/providers/upbound/provider-aws-s3 -[provider-aws-iam]: https://marketplace.upbound.io/providers/upbound/provider-aws-iam -[azure-limits]: https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/request-limits-and-throttling -[aws-ec2-limits]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/throttling.html#throttling-limits-rate-based -[upjet]: https://github.com/upbound/upjet diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/dr.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/dr.md deleted file mode 100644 index 67ecbfecf..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/dr.md +++ /dev/null @@ -1,412 +0,0 @@ ---- -title: Disaster Recovery -sidebar_position: 13 -description: Configure Space-wide backups for disaster recovery. ---- - -:::info API Version Information -This guide applies to **Spaces v1.9.0 and later** (Self-Hosted only). Space Backups is a alpha feature enabled by default starting in v1.14.0. - -- **v1.9.0-v1.13.x**: Alpha feature (requires manual enablement) -- **v1.14.0+**: GA (enabled by default) - -For version-specific features and backup resources, see the . control-plane backups, see [Backup and Restore](../backup-and-restore.md). -::: - -:::important -For Connected and Disconnected Spaces, this feature requires Spaces `v1.9.0` and, starting with `v1.14.0`, Spaces enables it by default. - -To enable it on versions earlier than `v1.14.0`, set features.alpha.spaceBackup.enabled=true when you install Spaces. - -```bash -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - ... - --set "features.alpha.spaceBackup.enabled=true" -``` -::: - -Upbound's _Space Backups_ is a built-in Space-wide backup and restore feature. This guide explains how to configure Space Backups and how to restore from one of them in case of disaster recovery. - -This feature is meant for Space administrators. Group or Control Plane users can leverage [Shared Backups][shared-backups] to backup and restore their ControlPlanes. - -## Benefits -The Space Backups feature provides the following benefits: - -* Automatic backups for all resources in a Space and all resources in control planes, without any operational overhead. -* Backup schedules. -* Selectors to specify resources to backup. - -## Prerequisites - -Enabled the Space Backups feature in the Space: - -- Cloud Spaces: Not accessible to users. -- Connected Spaces: Space administrator must enable this feature. -- Disconnected Spaces: Space administrator must enable this feature. - -## Configure a Space Backup Config - -[SpaceBackupConfig][spacebackupconfig] is a cluster-scoped resource. This resource configures the storage details and provider. Whenever a backup executes (either by schedule or manually initiated), it references a SpaceBackupConfig to tell it where store the snapshot. - - -### Backup config provider - - -The `spec.objectStorage.provider` and `spec.objectStorage.config` fields configures: - -* The object storage provider -* The path to the provider -* The credentials needed to communicate with the provider - -You can only set one provider. Upbound currently supports AWS, Azure, and GCP as providers. - - -`spec.objectStorage.config` is a freeform map of configuration options for the object storage provider. See [Thanos object storage][thanos-object-storage] for more information on the formats for each supported cloud provider. `spec.bucket` and `spec.provider` overrides the required values in the config. - - -#### AWS as a storage provider - -This example demonstrates how to use AWS as a storage provider for your backups: - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackupConfig -metadata: - name: default -spec: - objectStorage: - provider: AWS - bucket: spaces-backup-bucket - config: - endpoint: s3.eu-west-2.amazonaws.com - region: eu-west-2 - credentials: - source: Secret - secretRef: - name: bucket-creds - namespace: upbound-system - key: creds -``` - -This example assumes you've already created an S3 bucket called -`spaces-backup-bucket` in the `eu-west-2` AWS region. To access the bucket, -define the account credentials as a Secret in the specified Namespace -(`upbound-system` in this example). - -#### Azure as a storage provider - -This example demonstrates how to use Azure as a storage provider for your backups: - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackupConfig -metadata: - name: default - namespace: default -spec: - objectStorage: - provider: Azure - bucket: upbound-backups - config: - storage_account: upbackupstore - container: upbound-backups - endpoint: blob.core.windows.net - credentials: - source: Secret - secretRef: - name: bucket-creds - namespace: upbound-system - key: creds -``` - - -This example assumes you've already created an Azure storage account called -`upbackupstore` and blob `upbound-backups`. To access the blob, -define the account credentials as a Secret in the specified Namespace -(`upbound-system` in this example). - - -#### GCP as a storage provider - -This example demonstrates how to use Google Cloud Storage as a storage provider for your backups: - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackupConfig -metadata: - name: default - namespace: default -spec: - objectStorage: - provider: GCP - bucket: spaces-backup-bucket - credentials: - source: Secret - secretRef: - name: bucket-creds - namespace: upbound-system - key: creds -``` - - -This example assumes you've already created a Cloud bucket called -"spaces-backup-bucket" and a service account with access to this bucket. Define the key file as a Secret in the specified Namespace -(`upbound-system` in this example). - - -## Configure a Space Backup Schedule - - -[SpaceBackupSchedule][spacebackupschedule] is a cluster-scoped resource. This resource defines a backup schedule for the whole Space. - -Below is an example of a Space Backup Schedule running every day. It backs up all groups having `environment: production` labels and all control planes in those groups having `backup: please` labels. - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackupSchedule -metadata: - name: daily-schedule -spec: - schedule: "@daily" - configRef: - kind: SpaceBackupConfig - name: default - match: - groups: - labelSelectors: - - matchLabels: - environment: production - controlPlanes: - labelSelectors: - - matchLabels: - backup: please -``` - -### Define a schedule - -The `spec.schedule` field is a [Cron-formatted][cron-formatted] string. Some common examples are below: - -| Entry | Description | -| ----------------- | ------------------------------------------------------------------------------------------------- | -| `@hourly` | Run once an hour. | -| `@daily` | Run once a day. | -| `@weekly` | Run once a week. | -| `0 0/4 * * *` | Run every 4 hours. | -| `0/15 * * * 1-5` | Run every fifteenth minute on Monday through Friday. | -| `@every 1h30m10s` | Run every 1 hour, 30 minutes, and 10 seconds. Hour is the largest measurement of time for @every. | - -### Suspend a schedule - -Use `spec.suspend` field to suspend the schedule. It creates no new backups, but allows running backups to complete. - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackupSchedule -metadata: - name: daily-schedule -spec: - suspend: true -... -``` - -### Garbage collect backups when the schedule gets deleted - -Set the `spec.useOwnerReferencesInBackup` to garbage collect associated `SpaceBackup` when a `SpaceBackupSchedule` gets deleted. If set to true, backups are garbage collected when the schedule gets deleted. - -### Set the time to live - -Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. - -The time to live is a duration, for example, `168h` for 7 days. - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackupSchedule -metadata: - name: daily-schedule -spec: - ttl: 168h # Backup is garbage collected after 7 days -... -``` - -## Selecting space resources to backup - -By default, a SpaceBackup selects all groups and, for each of them, all control planes, secrets, and any other group-scoped resources. - -By setting `spec.match`, you can include only specific groups, control planes, secrets, or other Space resources in the backup. - -By setting `spec.exclude`, you can filter out some matched Space API resources from the backup. - -### Including space resources in a backup - -Different fields are available to include resources based on labels or names: -- `spec.match.groups` to include only some groups in the backup. -- `spec.match.controlPlanes` to include only some control planes in the backup. -- `spec.match.secrets` to include only some secrets in the backup. -- `spec.match.extras` to include only some extra resources in the backup. - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackup -metadata: - name: my-backup -spec: - configRef: - kind: SpaceBackupConfig - name: default - match: - groups: - labelSelectors: - - matchLabels: - environment: production - controlPlanes: - labelSelectors: - - matchLabels: - backup: please - secrets: - names: - - my-secret - extras: - - apiGroup: "spaces.upbound.io" - kind: "SharedBackupConfig" - names: - - my-shared-backup -``` - -### Excluding Space resources from the backup - -Use the `spec.exclude` field to exclude matched Space API resources from the backup. - -Different fields are available to exclude resources based on labels or names: -- `spec.exclude.groups` to exclude some groups from the backup. -- `spec.exclude.controlPlanes` to exclude some control planes from the backup. -- `spec.exclude.secrets` to exclude some secrets from the backup. -- `spec.exclude.extras` to exclude some extra resources from the backup. - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackup -metadata: - name: my-backup -spec: - ttl: 168h # Backup is garbage collected after 7 days - configRef: - kind: SpaceBackupConfig - name: default - match: - groups: - labelSelectors: - - matchLabels: - environment: production - exclude: - groups: - names: - - not-this-one-please -``` - -### Exclude resources in control planes' backups - -By default, it backs up all resources in a selected control plane. - -Use the `spec.controlPlaneBackups.excludedResources` field to exclude resources from control planes' backups. - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackup -metadata: - name: my-backup -spec: - ttl: 168h # Backup is garbage collected after 7 days - configRef: - kind: SpaceBackupConfig - name: default - controlPlaneBackups: - excludedResources: - - secrets - - buckets.s3.aws.upbound.io -``` - -## Create a manual backup - -[SpaceBackup][spacebackup] is a cluster-scoped resource that causes a single backup to occur for the whole Space. - -Below is an example of a manual SpaceBackup: - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackup -metadata: - name: my-backup -spec: - configRef: - kind: SpaceBackupConfig - name: default - deletionPolicy: Delete -``` - - -The backup specification `DeletionPolicy` defines backup deletion actions, -including the deletion of the backup file from the bucket. The `Deletion Policy` -value defaults to `Orphan`. Set it to `Delete` to remove uploaded files -in the bucket. -For more information on the backup and restore process, review the [Spaces API documentation][spaces-api-documentation]. - -### Set the time to live - -Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackup -metadata: - name: my-backup -spec: - ttl: 168h # Backup is garbage collected after 7 days -... -``` - -## Restore from a space backup - -Space Backup and Restore focuses only on disaster recovery. The restore procedure assumes a new Space installation with no existing resources. The restore procedure is idempotent, so you can run it multiple times without any side effects in case of failures. - -To restore a Space from an existing Space Backup, follow these steps: - -1. Install Spaces from scratch as needed. -2. Create a `SpaceBackupConfig` as needed to access the SpaceBackup from the object storage, for example named `my-backup-config`. -3. Select the backup you want to restore from, for example `my-backup`. -4. Run the following command to restore the Space: - -```shell -export SPACE_BACKUP_CONFIG=my-backup-config -export SPACE_BACKUP=my-backup -kubectl exec -ti -n upbound-system deployments/spaces-controller -c spaces -- hyperspace restore $SPACE_BACKUP $SPACE_BACKUP_CONFIG -``` - -### Restore specific control planes - -:::important -This feature is available from Spaces v1.11. -::: - -Instead of restoring the whole Space, you can choose to restore specific control planes -from a backup using the `--controlplanes` flag. You can also use -the `--skip-space-restore` flag to skip restoring Space objects. -This allows Spaces admins to restore individual control planes without -needing to restore the entire Space. - -```shell -export SPACE_BACKUP_CONFIG=my-backup-config -export SPACE_BACKUP=my-backup -kubectl exec -ti -n upbound-system deployments/spaces-controller -c spaces --- hyperspace restore $SPACE_BACKUP $SPACE_BACKUP_CONFIG --controlplanes default/ctp1,default/ctp2 --skip-space-restore -``` - - -[shared-backups]: /spaces/howtos/self-hosted/workload-id/backup-restore-config/ -[spacebackupconfig]: /reference/apis/spaces-api/v1_9 -[thanos-object-storage]: https://thanos.io/tip/thanos/storage.md/ -[spacebackupschedule]: /reference/apis/spaces-api/v1_9 -[cron-formatted]: https://en.wikipedia.org/wiki/Cron -[spacebackup]: /reference/apis/spaces-api/v1_9 -[spaces-api-documentation]: /reference/apis/spaces-api/v1_9 - diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/gitops-with-argocd.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/gitops-with-argocd.md deleted file mode 100644 index 004247a10..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/gitops-with-argocd.md +++ /dev/null @@ -1,142 +0,0 @@ ---- -title: GitOps with ArgoCD in Self-Hosted Spaces -sidebar_position: 80 -description: Set up GitOps workflows with Argo CD in self-hosted Spaces -plan: "business" ---- - -:::info Deployment Model -This guide applies to **self-hosted Spaces** deployments. For Upbound Cloud Spaces, see [GitOps with Upbound Control Planes](/spaces/howtos/cloud-spaces/gitops-on-upbound/). -::: - -GitOps is an approach for managing a system by declaratively describing desired resources' configurations in Git and using controllers to realize the desired state. Upbound's control planes are compatible with this pattern and it's strongly recommended you integrate GitOps in the platforms you build on Upbound. - - -## Integrate with Argo CD - - -[Argo CD][argo-cd] is a project in the Kubernetes ecosystem commonly used for -GitOps. You can use it in tandem with Upbound control planes to achieve GitOps -flows. The sections below explain how to integrate these tools with Upbound. - -### Configure connection secrets for control planes - -You can configure control planes to write their connection details to a secret. -Do this by setting the -[`spec.writeConnectionSecretToRef`][spec-writeconnectionsecrettoref] field in a -control plane manifest. For example: - -```yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - name: ctp1 - namespace: default -spec: - writeConnectionSecretToRef: - name: kubeconfig-ctp1 - namespace: default -``` - - -### Configure Argo CD - - -To configure Argo CD for Annotation resource tracking, edit the Argo CD -ConfigMap in the Argo CD namespace. Add `application.resourceTrackingMethod: -annotation` to the data section as below. - -Next, configure the [auto respect RBAC for the Argo CD -controller][auto-respect-rbac-for-the-argo-cd-controller-1]. By default, Argo CD -attempts to discover some Kubernetes resource types that don't exist in a -control plane. You must configure Argo CD to respect the cluster's RBAC rules so -that Argo CD can sync. Add `resource.respectRBAC: normal` to the data section as -below. - -```bash -apiVersion: v1 -kind: ConfigMap -metadata: - name: argocd-cm -data: - ... - application.resourceTrackingMethod: annotation - resource.respectRBAC: normal -``` - -:::tip -The `resource.respectRBAC` configuration above tells Argo to respect RBAC for -_all_ cluster contexts. If you're using an Argo CD instance to manage more than -only control planes, you should consider changing the `clusters` string match -for the configuration to apply only to control planes. For example, if every -control plane context name followed the convention of being named -`controlplane-`, you could set the string match to be `controlplane-*` -::: - - -### Create a cluster context definition - - -Once the control plane is ready, extract the following values from the secret -containing the kubeconfig: - -```bash -kubeconfig_content=$(kubectl get secrets kubeconfig-ctp1 -n default -o jsonpath='{.data.kubeconfig}' | base64 -d) -server=$(echo "$kubeconfig_content" | grep 'server:' | awk '{print $2}') -bearer_token=$(echo "$kubeconfig_content" | grep 'token:' | awk '{print $2}') -ca_data=$(echo "$kubeconfig_content" | grep 'certificate-authority-data:' | awk '{print $2}') -``` - -Generate a new secret in the cluster where you installed Argo, using the prior -values extracted: - -```yaml -cat < - -import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; - - - - -:::important -This feature is only available for select Business Critical customers. You can't -set up your own Managed Space without the assistance of Upbound. If you're -interested in this deployment mode, please [contact us][contact]. -::: - - - -A Managed Space deployed on AWS is a single-tenant deployment of a control plane -space in your AWS organization in an isolated sub-account. With Managed Spaces, -you can use the same API, CLI, and Console that Upbound offers, with the benefit -of running entirely in a cloud account that you own and Upbound manages for you. - -The following guide walks you through setting up a Managed Space in your AWS -organization. If you have any questions while working through this guide, -contact your Upbound Account Representative for help. - - - - - -A Managed Space deployed on GCP is a single-tenant deployment of a control plane -space in your GCP organization in an isolated project. With Managed Spaces, you -can use the same API, CLI, and Console that Upbound offers, with the benefit of -running entirely in a cloud account that you own and Upbound manages for you. - -The following guide walks you through setting up a Managed Space in your GCP -organization. If you have any questions while working through this guide, -contact your Upbound Account Representative for help. - - - - -## Managed Space on your cloud architecture - - - -A Managed Space is a deployment of the Upbound Spaces software inside an -Upbound-controlled sub-account in your AWS cloud environment. The Spaces -software runs in this sub-account, orchestrated by Kubernetes. Backups and -billing data get stored inside bucket or blob storage in the same sub-account. -The control planes deployed and controlled by the Spaces software runs on the -Kubernetes cluster which gets deployed into the sub-account. - -The diagram below illustrates the high-level architecture of Upbound Managed Spaces: - -![Upbound Managed Spaces arch](/img/managed-arch-aws.png) - -The Spaces software gets deployed on an EKS Cluster in the region of your -choice. This EKS cluster is where your control planes are ultimately run. -Upbound also deploys buckets, 1 for the collection of the billing data and 1 for -control plane backups. - -Upbound doesn't have access to other sub-accounts nor your organization-level -settings in your cloud environment. Outside of your cloud organization, Upbound -runs the Upbound Console, which includes the Upbound API and web application, -including the dashboard you see at `console.upbound.io`. By default, all -connections are encrypted, but public. Optionally, you also have the option to -use private network connectivity through [AWS PrivateLink][aws-privatelink]. - - - - - - -A Managed Space is a deployment of the Upbound Spaces software inside an -Upbound-controlled project in your GCP cloud environment. The Spaces software -runs in this project, orchestrated by Kubernetes. Backups and billing data get -stored inside bucket or blob storage in the same project. The control planes -deployed and controlled by the Spaces software runs on the Kubernetes cluster -which gets deployed into the project. - -The diagram below illustrates the high-level architecture of Upbound Managed Spaces: - -![Upbound Managed Spaces arch](/img/managed-arch-gcp.png) - -The Spaces software gets deployed on a GKE Cluster in the region of your choice. -This GKE cluster is where your control planes are ultimately run. Upbound also -deploys cloud buckets, 1 for the collection of the billing data and 1 for -control plane backups. - -Upbound doesn't have access to other projects nor your organization-level -settings in your cloud environment. Outside of your cloud organization, Upbound -runs the Upbound Console, which includes the Upbound API and web application, -including the dashboard you see at `console.upbound.io`. By default, all -connections are encrypted, but public. Optionally, you also have the option to -use private network connectivity through [GCP Private Service -Connect][gcp-private-service-connect]. - - - -## Prerequisites - -- An organization created on Upbound - - - -- You should have a preexisting AWS organization to complete this guide. -- You must create a new AWS sub-account. Read the [AWS documentation][aws-documentation] to learn how to create a new sub-account in an existing organization on AWS. - -After the sub-account information gets provided to Upbound, **don't change it -any further.** Any changes made to the sub-account or the resources created by -Upbound for the purposes of the Managed Space deployments voids the SLA you have -with Upbound. If you want to make configuration changes, contact your Upbound -Solutions Architect. - - - - - -- You should have a preexisting GCP organization with an active Cloud Billing account to complete this guide. -- You must create a new GCP project. Read the [GCP documentation][gcp-documentation] to learn how to create a new project in an existing organization on GCP. - -After the project information gets provided to Upbound, **don't change it any -further.** Any changes made to the project or the resources created by Upbound -for the purposes of the Managed Space deployments voids the SLA you have with -Upbound. If you want to make configuration changes, contact your Upbound -Solutions Architect. - - - - - -## Set up cross-account management - -Upbound supports using AWS Key Management Service with cross-account IAM -permissions. This enables the isolation of keys so the infrastructure operated -by Upbound has limited access to symmetric keys. - -In the KMS key's account, apply the baseline key policy: - -```json -{ - "Sid": "Allow Upbound to use this key", - "Effect": "Allow", - "Principal": { - "AWS": ["[Managed Space sub-account ID]"] - }, - "Action": ["kms:Encrypt", "kms:Decrypt", "kms:ReEncrypt*", "kms:GenerateDataKey*", "kms:DescribeKey"], - "Resource": "*" -} -``` - -You need another key policy to let the sub-account create persistent resources -with the KMS key: - -```json -{ - "Sid": "Allow attachment of persistent resources for an Upbound Managed Space", - "Effect": "Allow", - "Principal": { - "AWS": "[Managed Space sub-account ID]" - }, - "Action": ["kms:CreateGrant", "kms:ListGrants", "kms:RevokeGrant"], - "Resource": "*", - "Condition": { - "Bool": { - "kms:GrantIsForAWSResource": "true" - } - } -} -``` - -### Configure PrivateLink - -By default, all connections to the Upbound Console are encrypted, but public. -AWS PrivateLink is a feature that allows VPC peering whereby your traffic -doesn't traverse the public internet. To have this configured, contact your -Upbound Account Representative. - - - - - -## Enable APIs - -Enable the following APIs in the new project: - -- Kubernetes Engine API -- Cloud Resource Manager API -- Compute Engine API -- Cloud DNS API - -:::tip -Read how to enable APIs in a GCP project [here][here]. -::: - -## Create a service account - -Create a service account in the new project. Name the service account, -upbound-sa. Give the service account the following roles: - -- Compute Admin -- Project IAM Admin -- Service Account Admin -- DNS Administrator -- Editor - -Select the service account you just created. Select keys. Add a new key and -select JSON. The key gets downloaded to your machine. Save this for later. - -## Create a DNS Zone - -Create a DNS Zone, set the **Zone type** to `Public`. - -### Configure Private Service Connect - -By default, all connections to the Upbound Console are encrypted, but public. -GCP Private Service Connect is a feature that allows VPC peering whereby your -traffic doesn't traverse the public internet. To have this configured, contact -your Upbound Account Representative. - - - -## Provide information to Upbound - -Once these policies get attached to the key, tell your Upbound Account -Representative, providing them the following: - - - -- the full ARN of the KMS key. -- the name of the organization that you created in Upbound. Use the up CLI command, `up org list`, so see this information. -- Confirmation of which region in AWS you want the deployment to target. - - - - - -- The service account JSON key -- The NS records associated with the DNS name created in the last step. -- the name of the organization that you created in Upbound. Use the up CLI command, `up org list`, so see this information. -- Confirmation of which region in GCP you want the deployment to target. - - - -Once Upbound has this information, the request gets processed in a business day. - -## Use your Managed Space - -Once the Managed Space gets deployed, you can see it in the Space selector when browsing your environment on [`console.upbound.io`][console-upbound-io]. - - - - -[contact]: https://www.upbound.io/contact-us -[aws-privatelink]: #configure-privatelink -[aws-documentation]: https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_create.html#orgs_manage_accounts_create-new -[gcp-private-service-connect]: #configure-private-service-connect -[gcp-documentation]: https://cloud.google.com/resource-manager/docs/creating-managing-organization -[here]: https://cloud.google.com/apis/docs/getting-started#enabling_apis -[console-upbound-io]: https://console.upbound.io/ diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/oidc-configuration.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/oidc-configuration.md deleted file mode 100644 index cbef4dc42..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/oidc-configuration.md +++ /dev/null @@ -1,289 +0,0 @@ ---- -title: Configure OIDC -sidebar_position: 20 -description: Configure OIDC in your Space ---- -:::important -This guide is only applicable for administrators who've deployed self-hosted Spaces. general RBAC in Upbound, read [Upbound RBAC][upbound-rbac]. -::: - -Upbound uses the Kubernetes [Structured Authentication Configuration][structured-auth-config] to validate OIDC tokens sent to the API. Upbound stores this -configuration as a `ConfigMap` and authenticates with the Upbound router -component during installation with Helm. - -This guide walks you through how to create and apply an authentication -configuration to validate Upbound with an external identity provider. Each -section focuses on a specific part of the configuration file. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. - -For details on authentication and access control across versions, see the . related platform authentication features, see the [Platform manual](../../../../platform/). -::: - -## Creating the `AuthenticationConfiguration` file - -First, create a file called `config.yaml` with an `AuthenticationConfiguration` -kind. The `AuthenticationConfiguration` is the initial authentication structure -necessary for Upbound to communicate with your chosen identity provider. - -```yaml -apiVersion: apiserver.config.k8s.io/v1beta1 -kind: AuthenticationConfiguration -jwt: -- issuer: - url: oidc-issuer-url - audiences: - - oidc-client-id - claimMappings: # optional - username: - claim: oidc-username-claim - prefix: oidc-username-prefix - groups: - claim: oidc-groups-claim - prefix: oidc-groups-prefix -``` - - -For detailed configuration options, including the CEL-based token validation, -review the feature [documentation][structured-auth-config]. - - -The `AuthenticationConfiguration` allows you to configure multiple JWT -authenticators as separate issuers. - -### Configure an issuer - -The `jwt` array requires an `issuer` specification and typically contains: - -- A `username` claim mapping -- A `groups` claim mapping -Optionally, the configuration may also include: -- A set of claim validation rules -- A set of user validation rules - -The `issuer` URL must be unique across all configured authenticators. - -```yaml -issuer: - url: https://example.com - discoveryUrl: https://discovery.example.com/.well-known/openid-configuration - certificateAuthority: |- - - audiences: - - client-id-a - - client-id-b - audienceMatchPolicy: MatchAny -``` - -By default, the authenticator assumes the OIDC Discovery URL is -`{issuer.url}/.well-known/openid-configuration`. Most identity providers follow -this structure, and you can omit the `discoveryUrl` field. To use a separate -discovery service, specify the full path to the discovery endpoint in this -field. - -If the CA for the Issuer isn't public, provide the PEM encoded CA for the Discovery URL. - -At least one of the `audiences` entries must match the `aud` claim in the JWT. -For OIDC tokens, this is the Client ID of the application attempting to access -the Upbound API. Having multiple values set allows the same configuration to -apply to multiple client applications, for example the `kubectl` CLI and an -Internal Developer Portal. - -If you specify multiple `audiences` , `audienceMatchPolicy` must equal `MatchAny`. - -### Configure `claimMappings` - -#### Username claim mapping - -By default, the authenticator uses the `sub` claim as the user name. To override this, either: - -- specify *both* `claim` and `prefix`. `prefix` may be explicitly set to the empty string. -or - -- specify a CEL `expression` to calculate the user name. - -```yaml -claimMappings: - username: - claim: "sub" - prefix: "keycloak" - # - expression: 'claims.username + ":external-user"' -``` - - -#### Groups claim mapping - -By default, this configuration doesn't map groups, unless you either: - -- specify both `claim` and `prefix`. `prefix` may be explicitly set to the empty string. -or - -- specify a CEL `expression` that returns a string or list of strings. - - -```yaml -claimMappings: - groups: - claim: "groups" - prefix: "" - # - expression: 'claims.roles.split(",")' -``` - - -### Validation rules - - -Validation rules are outside the scope of this document. Review the -[documentation][structured-auth-config] for more information. Examples include -using CEL expressions to validate authentication such as: - - -- Validating that a token claim has a specific value -- Validating that a token has a limited lifetime -- Ensuring usernames and groups don't contain reserved prefixes - -## Required claims - -To interact with Space and ControlPlane APIs, users must have the `upbound.io/aud` claim set to one of the following: - -| Upbound.io Audience | Notes | -| -------------------------------------------------------- | -------------------------------------------------------------------- | -| `[]` | No Access to Space-level or ControlPlane APIs | -| `['upbound:spaces:api']` | This Identity is only for Space-level APIs | -| `['upbound:spaces:controlplanes']` | This Identity is only for ControlPlane APIs | -| `['upbound:spaces:api', 'upbound:spaces:controlplanes']` | This Identity is for both Space-level and ControlPlane APIs | - - -You can set this claim in two ways: - -- In the identity provider mapped in the ID token. -- Inject in the authenticator with the `jwt.claimMappings.extra` array. - -For example: -```yaml -apiVersion: apiserver.config.k8s.io/v1beta1 -kind: AuthenticationConfiguration -jwt: -- issuer: - url: https://keycloak:8443/realms/master - certificateAuthority: |- - - audiences: - - master-realm - audienceMatchPolicy: MatchAny - claimMappings: - username: - claim: "preferred_username" - prefix: "keycloak:" - groups: - claim: "groups" - prefix: "" - extra: - - key: 'upbound.io/aud' - valueExpression: "['upbound:spaces:controlplanes', 'upbound:spaces:api']" -``` - -## Install the `AuthenticationConfiguration` - -Once you create an `AuthenticationConfiguration` file, specify this file as a -`ConfigMap` in the host cluster for the Upbound Space. - -```sh -kubectl create configmap -n upbound-system --from-file=config.yaml=./path/to/config.yaml -``` - - -To enable OIDC authentication and disable Upbound IAM when installing the Space, -reference the configuration and pass an empty value to the Upbound IAM issuer -parameter: - - -```sh -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - ... - --set "authentication.structuredConfig=" \ - --set "router.controlPlane.extraArgs[0]=--upbound-iam-issuer-url=" -``` - -## Configure RBAC - - -In this scenario, the external identity provider handles authentication, but -permissions for Spaces and ControlPlane APIs use standard RBAC objects. - -### Spaces APIs - -The Spaces APIs include: -```yaml -- apiGroups: - - spaces.upbound.io - resources: - - controlplanes - - sharedexternalsecrets - - sharedsecretstores - - backups - - backupschedules - - sharedbackups - - sharedbackupconfigs - - sharedbackupschedules -- apiGroups: - - observability.spaces.upbound.io - resources: - - sharedtelemetryconfigs -``` - -### ControlPlane APIs - - - -Crossplane specifies three [roles][crossplane-managed-clusterroles] for a -ControlPlane: admin, editor, and viewer. These map to the verbs `admin`, `edit`, -and `view` on the `controlplanes/k8s` resource in the `spaces.upbound.io` API -group. - - -### Control access - -The `groups` claim in the `AuthenticationConfiguration` allows you to control -resource access when you create a `ClusterRoleBinding`. A `ClusterRole` defines -the role parameters and a `ClusterRoleBinding` subject. - -The example below allows `admin` permissions for all ControlPlanes to members of -the `ctp-admins` group: - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: allow-ctp-admin -rules: -- apiGroups: - - spaces.upbound.io - resources: - - controlplanes/k8s - verbs: - - admin -``` - -ctp-admins ClusterRoleBinding -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: allow-ctp-admin -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: allow-ctp-admin -subjects: -- apiGroup: rbac.authorization.k8s.io - kind: Group - name: ctp-admins -``` - -[structured-auth-config]: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#using-authentication-configuration -[crossplane-managed-clusterroles]: https://github.com/crossplane/crossplane/blob/master/design/design-doc-rbac-manager.md#managed-rbac-clusterroles -[upbound-rbac]: /manuals/platform/concepts/authorization/upbound-rbac diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/proxies-config.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/proxies-config.md deleted file mode 100644 index 3802e4cb0..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/proxies-config.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: Proxied configuration -sidebar_position: 20 -description: Configure Upbound within a proxied environment ---- - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments in proxied environments. Proxy configuration procedures are consistent across versions. - -For version-specific deployment considerations, see the . -::: - - - -When you install Upbound with Helm in a proxied environment, please update the specified registry with your internal registry. - - - -```bash -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "${SPACES_VERSION}" \ - --set "ingress.host=${SPACES_ROUTER_HOST}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "authentication.hubIdentities=true" \ - --set "authorization.hubRBAC=true" \ - --set "registry=registry.company.corp/spaces" \ - --set "controlPlanes.uxp.registryOverride=registry.company.corp/xpkg.upbound.io" \ - --set "controlPlanes.uxp.repository=registry.company.corp/spaces" \ - --wait -``` diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/query-api.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/query-api.md deleted file mode 100644 index c112e9001..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/query-api.md +++ /dev/null @@ -1,396 +0,0 @@ ---- -title: Deploy Query API infrastructure -weight: 130 -description: Query API -aliases: - - /all-spaces/self-hosted-spaces/query-api - - /self-hosted-spaces/query-api - - all-spaces/self-hosted-spaces/query-api ---- - - - - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). Query API has evolved across versions: - -- **Cloud Spaces**: Available since v1.6 (enabled by default) -- **Self-Hosted**: Available since v1.8 (requires manual enablement) - -For details on Query API availability across versions, see the . -::: - -:::important - -This feature is in preview. The Query API is available in the Cloud Space offering in `v1.6` and enabled by default. - -This is a requirement to be able to connect a Space since `v1.8.0`, and is off by default, see below to enable it. - -::: - -Upbound's Query API allows users to inspect objects and resources within their control planes. The read-only `up alpha query` and `up alpha get` CLI commands allow you to gather information on your control planes in a fast and efficient package. These commands follow the [`kubectl` conventions][kubectl-conventions] for filtering, sorting, and retrieving information from your Space. - -Query API requires a PostgreSQL database to store the data. You can use the default PostgreSQL instance provided by Upbound or bring your own PostgreSQL instance. - -## Managed setup - -:::tip -If you don't have specific requirements for your setup, Upbound recommends following this approach. -::: - -To enable this feature, set `features.alpha.apollo.enabled=true` and `apollo.apollo.storage.postgres.create=true` when installing Spaces. - -However, you need to install CloudNativePG (`CNPG`) to provide the PostgreSQL instance. You can let the `up` CLI do this for you, or install it manually. - -For more customization, see the [Helm chart reference][helm-chart-reference]. You can modify the number -of PostgreSQL instances, pooling instances, storage size, and more. - -If you have specific requirements not addressed in the Helm chart, see below for more information on how to bring your own [PostgreSQL setup][postgresql-setup]. - -### Using the up CLI - -Before you begin, make sure you have the most recent version of the [`up` CLI installed][up-cli-installed]. - -To enable this feature, set `features.alpha.apollo.enabled=true` and `apollo.apollo.storage.postgres.create=true` when installing Spaces: - -```bash -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - ... - --set "features.alpha.apollo.enabled=true" \ - --set "apollo.apollo.storage.postgres.create=true" -``` - -`up space init` and `up space upgrade` install CloudNativePG automatically, if needed. - -### Helm chart - -If you are installing the Helm chart in some other way, you can manually install CloudNativePG in one of the [supported ways][supported-ways], for example: - -```shell -kubectl apply --server-side -f \ - https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.24.1/cnpg-1.24.1.yaml -kubectl rollout status -n cnpg-system deployment cnpg-controller-manager -w --timeout 120s -``` - -Next, install the Spaces Helm chart with the necessary values, for example: - -```shell -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "${SPACES_VERSION}" \ - ... - --set "features.alpha.apollo.enabled=true" \ - --set "apollo.apollo.storage.postgres.create=true" \ - --wait -``` - -## Self-hosted PostgreSQL configuration - - -If your workflow requires more customization, you can provide your own -PostgreSQL instance and configure credentials manually. - -Using your own PostgreSQL instance requires careful architecture consideration. -Review the architecture and requirements guidelines. - -### Architecture - -The Query API architecture uses three components, other than a PostgreSQL database: -* **Apollo Syncers**: Watching `ETCD` for changes and syncing them to PostgreSQL. One, or more, per control plane. -* **Apollo Server**: Serving the Query API out of the data in PostgreSQL. One, or more, per Space. - -The default setup also uses the `PgBouncer` connection pooler to manage connections from the syncers. -```mermaid -graph LR - User[User] - - subgraph Cluster["Cluster (Spaces)"] - direction TB - Apollo[apollo] - - subgraph ControlPlanes["Control Planes"] - APIServer[API Server] - Syncer[apollo-syncer] - end - end - - PostgreSQL[(PostgreSQL)] - - User -->|requests| Apollo - - Apollo -->|connects| PostgreSQL - Apollo -->|creates schemas & users| PostgreSQL - - Syncer -->|watches| APIServer - Syncer -->|writes| PostgreSQL - - PostgreSQL -->|data| Apollo - - style PostgreSQL fill:#e1f5ff,stroke:#333,stroke-width:2px,color:#000 - style Apollo fill:#ffe1e1,stroke:#333,stroke-width:2px,color:#000 - style Cluster fill:#f0f0f0,stroke:#333,stroke-width:2px,color:#000 - style ControlPlanes fill:#fff,stroke:#666,stroke-width:1px,stroke-dasharray: 5 5,color:#000 -``` - - -Each component needs to connect to the PostgreSQL database. - -In the event of database issues, you can provide a new database and the syncers -automatically repopulate the data. - -### Requirements - -* A PostgreSQL 16 instance or cluster. -* A database, for example named `upbound`. -* **Optional**: A dedicated user for the Apollo Syncers, otherwise the Spaces Controller generates a dedicated set of credentials per syncer with the necessary permissions, for example named `syncer`. -* A dedicated **superuser or admin account** for the Apollo Server. -* **Optional**: A connection pooler, like PgBouncer, to manage connections from the Apollo Syncers. If you didn't provide the optional users, you might have to configure the pooler to allow users to connect using the same credentials as PostgreSQL. -* **Optional**: A read replica for the Apollo Syncers to connect to, to reduce load on the primary database, this might cause a slight delay in the data being available through the Query API. - -Below you can find examples of setups to get you started, you can mix and match the examples to suit your needs. - -### In-cluster setup - -:::tip - -If you don't have strong opinions on your setup, but still want full control on -the resources created for some unsupported customizations, Upbound recommends -the in-cluster setup. - -::: - -For more customization than the managed setup, you can use CloudNativePG for -PostgreSQL in the same cluster. - -For in-cluster setup, manually deploy the operator in one of the [supported ways][supported-ways-1], for example: - -```shell -kubectl apply --server-side -f \ - https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.24.1/cnpg-1.24.1.yaml -kubectl rollout status -n cnpg-system deployment cnpg-controller-manager -w --timeout 120s -``` - -Then create a `Cluster` and `Pooler` in the `upbound-system` namespace, for example: - -```shell -kubectl create ns upbound-system - -kubectl apply -f - < - -### External setup - - -:::tip - -If you want to run your PostgreSQL instance outside the cluster, but are fine with credentials being managed by the `apollo` user, this is the suggested way to proceed. - -::: - -When using this setup, you must manually create the required Secrets in the -`upbound-system` namespace. The `apollo` user must have permissions to create -schemas and users. - -```shell - -kubectl create ns upbound-system - -# A Secret containing the necessary credentials to connect to the PostgreSQL instance -kubectl create secret generic spaces-apollo-pg-app -n upbound-system \ - --from-literal=password=supersecret - -# A Secret containing the necessary CA certificate to verify the connection to the PostgreSQL instance -kubectl create secret generic spaces-apollo-pg-ca -n upbound-system \ - --from-file=ca.crt=/path/to/ca.crt -``` - -Next, install Spaces with the necessary settings: - -```shell -export PG_URL=your-postgres-host:5432 -export PG_POOLED_URL=your-pgbouncer-host:5432 # this could be the same as above - -helm upgrade --install ... \ - --set "features.alpha.apollo.enabled=true" \ - --set "apollo.apollo.storage.postgres.create=false" \ - --set "apollo.apollo.storage.postgres.connection.url=$PG_URL" \ - --set "apollo.apollo.storage.postgres.connection.credentials.secret.name=spaces-apollo-pg-app" \ - --set "apollo.apollo.storage.postgres.connection.credentials.format=basicauth" \ - --set "apollo.apollo.storage.postgres.connection.ca.name=spaces-apollo-pg-ca" \ - --set "apollo.apollo.storage.postgres.connection.syncer.url=$PG_POOLED_URL" -``` - -### External setup with all custom credentials - -For custom credentials with Apollo Syncers or Server, create a new secret in the -`upbound-system` namespace: - -```shell -export APOLLO_SYNCER_USER=syncer -export APOLLO_SERVER_USER=apollo - -kubectl create ns upbound-system - -# A Secret containing the necessary credentials to connect to the PostgreSQL instance -kubectl create secret generic spaces-apollo-pg-app -n upbound-system \ - --from-literal=password=supersecret - -# A Secret containing the necessary CA certificate to verify the connection to the PostgreSQL instance -kubectl create secret generic spaces-apollo-pg-ca -n upbound-system \ - --from-file=ca.crt=/path/to/ca.crt - -# A Secret containing the necessary credentials for the Apollo Syncers to connect to the PostgreSQL instance. -# These will be used by all Syncers in the Space. -kubectl create secret generic spaces-apollo-pg-syncer -n upbound-system \ - --from-literal=username=$APOLLO_SYNCER_USER \ - --from-literal=password=supersecret - -# A Secret containing the necessary credentials for the Apollo Server to connect to the PostgreSQL instance. -kubectl create secret generic spaces-apollo-pg-apollo -n upbound-system \ - --from-literal=username=$APOLLO_SERVER_USER \ - --from-literal=password=supersecret -``` - -Next, install Spaces with the necessary settings: - -```shell -export PG_URL=your-postgres-host:5432 -export PG_POOLED_URL=your-pgbouncer-host:5432 # this could be the same as above - -helm ... \ - --set "features.alpha.apollo.enabled=true" \ - --set "apollo.apollo.storage.postgres.create=false" \ - --set "apollo.apollo.storage.postgres.connection.url=$PG_URL" \ - --set "apollo.apollo.storage.postgres.connection.credentials.secret.name=spaces-apollo-pg-app" \ - --set "apollo.apollo.storage.postgres.connection.credentials.format=basicauth" \ - --set "apollo.apollo.storage.postgres.connection.ca.name=spaces-apollo-pg-ca" \ - --set "apollo.apollo.storage.postgres.connection.syncer.url=$PG_POOLED_URL" \ - - #. the syncers - --set "apollo.apollo.storage.postgres.connection.syncer.credentials.format=basicauth" \ - --set "apollo.apollo.storage.postgres.connection.syncer.credentials.user=$APOLLO_SYNCER_USER" \ - --set "apollo.apollo.storage.postgres.connection.syncer.credentials.secret.name=spaces-apollo-pg-syncer" \ - - #. the server - --set "apollo.apollo.storage.postgres.connection.apollo.credentials.format=basicauth" \ - --set "apollo.apollo.storage.postgres.connection.apollo.credentials.user=$APOLLO_SERVER_USER" \ - --set "apollo.apollo.storage.postgres.connection.apollo.credentials.secret.name=spaces-apollo-pg-apollo" \ - --set "apollo.apollo.storage.postgres.connection.apollo.url=$PG_POOLED_URL" -``` - - -## Using the Query API - - -See the [Query API documentation][query-api-documentation] for more information on how to use the Query API. - - - - -[postgresql-setup]: #self-hosted-postgresql-configuration -[up-cli-installed]: /manuals/cli/overview -[query-api-documentation]: /spaces/howtos/query-api - -[helm-chart-reference]: /reference/helm-reference -[kubectl-conventions]: https://kubernetes.io/docs/reference/kubectl/generated/kubectl_get/ -[supported-ways]: https://cloudnative-pg.io/documentation/current/installation_upgrade/ -[supported-ways-1]: https://cloudnative-pg.io/documentation/current/installation_upgrade/ -[cloudnativepg-documentation]: https://cloudnative-pg.io/documentation/1.24/storage/#configuration-via-a-pvc-template -[postgresql-cluster]: https://cloudnative-pg.io/documentation/1.24/resource_management/ -[pooler]: https://cloudnative-pg.io/documentation/1.24/connection_pooling/#pod-templates -[postgresql-cluster-2]: https://cloudnative-pg.io/documentation/1.24/replication/ -[pooler-3]: https://cloudnative-pg.io/documentation/1.24/connection_pooling/#high-availability-ha -[postgresql-cluster-4]: https://cloudnative-pg.io/documentation/1.24/operator_capability_levels/#override-of-operand-images-through-the-crd -[pooler-5]: https://cloudnative-pg.io/documentation/1.24/connection_pooling/#pod-templates -[cloudnativepg-documentation-6]: https://cloudnative-pg.io/documentation/1.24/postgresql_conf/ diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/scaling-resources.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/scaling-resources.md deleted file mode 100644 index 7bb04d2c2..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/scaling-resources.md +++ /dev/null @@ -1,184 +0,0 @@ ---- -title: Scaling vCluster and etcd Resources -weight: 950 -description: A guide for scaling vCluster and etcd resources in self-hosted Spaces -aliases: - - /all-spaces/self-hosted-spaces/scaling-resources - - /spaces/scaling-resources ---- - -In large workloads or control plane migration, you may performance impacting -resource constraints. This guide explains how to scale vCluster and `etcd` -resources for optimal performance in your self-hosted Space. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Scaling procedures are consistent across versions. - -For version-specific resource requirements and capacity planning, see the . -::: - -## Signs of resource constraints - -You may need to scale your vCluster or `etcd` resources if you observe: - -- API server timeout errors such as `http: Handler timeout` -- Error messages about `too many requests` and requests to `try again later` -- Operations like provider installation failing with errors like `cannot apply provider package secret` -- vCluster pods experiencing continuous restarts -- API performance degrades with high resource volume - - -## Scaling vCluster resources - - -The vCluster component handles Kubernetes API requests for your control planes. -Deployments with multiple control planes or providers may exceed default resource allocations. - -```yaml -# Default settings -controlPlanes.vcluster.resources.limits.cpu: "3000m" -controlPlanes.vcluster.resources.limits.memory: "3960Mi" -controlPlanes.vcluster.resources.requests.cpu: "170m" -controlPlanes.vcluster.resources.requests.memory: "1320Mi" -``` - -For larger workloads, like migrating from an existing control plane with several -providers, increase these resource limits in your Spaces `values.yaml` file. - -```yaml -controlPlanes: - vcluster: - resources: - limits: - cpu: "4000m" # Increase to 4 cores - memory: "6Gi" # Increase to 6GB memory - requests: - cpu: "500m" # Increase baseline CPU request - memory: "2Gi" # Increase baseline memory request -``` - -## Scaling `etcd` storage - -Kubernetes relies on `etcd` performance, which can lead to IOPS (input/output -operations per second) bottlenecks. Upbound allocates `50Gi` volumes for `etcd` -in cloud environments to ensure adequate IOPS performance. - -```yaml -# Default setting -controlPlanes.etcd.persistence.size: "5Gi" -``` - -For production environments or when migrating large control planes, increase -`etcd` volume size and specify an appropriate storage class: - -```yaml -controlPlanes: - etcd: - persistence: - size: "50Gi" # Recommended for production - storageClassName: "fast-ssd" # Use a high-performance storage class -``` - -### Storage class considerations - -For AWS: -- Use GP3 volumes with adequate IOPS --. AWS GP3 volumes, IOPS scale with volume size (3000 IOPS baseline) --. optimal performance, provision at least 32Gi to support up to 16,000 IOPS - -For GCP and Azure: -- Use SSD-based persistent disk types for optimal performance -- Consider premium storage options for high-throughput workloads - -## Scaling Crossplane resources - -Crossplane manages provider resources in your control planes. You may need to increase provider resources for larger deployments: - -```yaml -# Default settings -controlPlanes.uxp.resourcesCrossplane.requests.cpu: "370m" -controlPlanes.uxp.resourcesCrossplane.requests.memory: "400Mi" -``` - - -For environments with many providers or managed resources: - - -```yaml -controlPlanes: - uxp: - resourcesCrossplane: - limits: - cpu: "1000m" # Add CPU limit - memory: "1Gi" # Add memory limit - requests: - cpu: "500m" # Increase CPU request - memory: "512Mi" # Increase memory request -``` - -## High availability configuration - -For production environments, enable High Availability mode to ensure resilience: - -```yaml -controlPlanes: - ha: - enabled: true -``` - -## Best practices for migration scenarios - -When migrating from existing control planes into a self-hosted Space: - -1. **Pre-scale resources**: Scale up resources before performing the migration -2. **Monitor resource usage**: Watch resource consumption during and after migration with `kubectl top pods` -3. **Scale incrementally**: If issues persist, increase resources incrementally until performance stabilizes -4. **Consider storage performance**: `etcd` is sensitive to storage I/O performance - -## Helm values configuration - -Apply these settings through your Spaces Helm values file: - -```yaml -controlPlanes: - vcluster: - resources: - limits: - cpu: "4000m" - memory: "6Gi" - requests: - cpu: "500m" - memory: "2Gi" - etcd: - persistence: - size: "50Gi" - storageClassName: "gp3" # Use your cloud provider's fast storage class - uxp: - resourcesCrossplane: - limits: - cpu: "1000m" - memory: "1Gi" - requests: - cpu: "500m" - memory: "512Mi" - ha: - enabled: true #. production environments -``` - -Apply the configuration using Helm: - -```bash -helm upgrade --install spaces oci://xpkg.upbound.io/spaces-artifacts/spaces \ - -f values.yaml \ - -n upbound-system -``` - -## Considerations - -- **Provider count**: Each provider adds resource overhead - consider using provider families to optimize resource usage -- **Managed resources**: The number of managed resources impacts CPU usage more than memory -- **Vertical pod autoscaling**: Consider using vertical pod autoscaling in Kubernetes to automatically adjust resources based on usage -- **Storage performance**: Storage performance is as important as capacity for etcd -- **Network latency**: Low-latency connections between components improve performance - - diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/self-hosted-spaces-deployment.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/self-hosted-spaces-deployment.md deleted file mode 100644 index e549e3939..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/self-hosted-spaces-deployment.md +++ /dev/null @@ -1,461 +0,0 @@ ---- -title: Deployment Workflow -sidebar_position: 3 -description: A quickstart guide for Upbound Spaces -tier: "business" ---- -import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; - - - - - -This guide deploys a self-hosted Upbound cluster in AWS. - - - - - -This guide deploys a self-hosted Upbound cluster in Azure. - - - - - -This guide deploys a self-hosted Upbound cluster in GCP. - - - -Disconnected Spaces allows you to host control planes in your preferred environment. - -## Prerequisites - -To get started deploying your own Disconnected Space, you need: - -- An Upbound organization account string, provided by your Upbound account representative -- A `token.json` license, provided by your Upbound account representative - - - -- An AWS account and the AWS CLI - - - - - -- An Azure account and the Azure CLI - - - - - -- An GCP account and the GCP CLI - - - -:::important -Disconnected Spaces are a business critical feature of Upbound and requires a license token to successfully complete the installation. [Contact Upbound][contact-upbound] if you want to try out Upbound with Disconnected Spaces. -::: - -## Provision the hosting environment - -### Create a cluster - - - -Configure the name and target region you want the EKS cluster deployed to. - -```ini -export SPACES_CLUSTER_NAME=upbound-space-quickstart -export SPACES_REGION=us-east-1 -``` - -Provision a 3-node cluster using eksctl. - -```bash -cat < - - - -Configure the name and target region you want the AKS cluster deployed to. - -```ini -export SPACES_RESOURCE_GROUP_NAME=upbound-space-quickstart -export SPACES_CLUSTER_NAME=upbound-space-quickstart -export SPACES_LOCATION=westus -``` - -Provision a new Azure resource group. - -```bash -az group create --name ${SPACES_RESOURCE_GROUP_NAME} --location ${SPACES_LOCATION} -``` - -Provision a 3-node cluster. - -```bash -az aks create -g ${SPACES_RESOURCE_GROUP_NAME} -n ${SPACES_CLUSTER_NAME} \ - --enable-managed-identity \ - --node-count 3 \ - --node-vm-size Standard_D4s_v4 \ - --enable-addons monitoring \ - --enable-msi-auth-for-monitoring \ - --generate-ssh-keys \ - --network-plugin kubenet \ - --network-policy calico -``` - -Get the kubeconfig of your AKS cluster. - -```bash -az aks get-credentials --resource-group ${SPACES_RESOURCE_GROUP_NAME} --name ${SPACES_CLUSTER_NAME} -``` - - - - - -Configure the name and target region you want the GKE cluster deployed to. - -```ini -export SPACES_PROJECT_NAME=upbound-spaces-project -export SPACES_CLUSTER_NAME=upbound-spaces-quickstart -export SPACES_LOCATION=us-west1-a -``` - -Create a new project and set it as the current project. - -```bash -gcloud projects create ${SPACES_PROJECT_NAME} -gcloud config set project ${SPACES_PROJECT_NAME} -``` - -Provision a 3-node cluster. - -```bash -gcloud container clusters create ${SPACES_CLUSTER_NAME} \ - --enable-network-policy \ - --num-nodes=3 \ - --zone=${SPACES_LOCATION} \ - --machine-type=e2-standard-4 -``` - -Get the kubeconfig of your GKE cluster. - -```bash -gcloud container clusters get-credentials ${SPACES_CLUSTER_NAME} --zone=${SPACES_LOCATION} -``` - - - -## Configure the pre-install - -### Set your Upbound organization account details - -Set your Upbound organization account string as an environment variable for use in future steps - -```ini -export UPBOUND_ACCOUNT= -``` - -### Set up pre-install configurations - -Export the path of the license token JSON file provided by your Upbound account representative. - -```ini {copy-lines="2"} -# Change the path to where you saved the token. -export SPACES_TOKEN_PATH="/path/to/token.json" -``` - -Set the version of Spaces software you want to install. - -```ini -export SPACES_VERSION= -``` - -Set the router host and cluster type. The `SPACES_ROUTER_HOST` is the domain name that's used to access the control plane instances. It's used by the ingress controller to route requests. - -```ini -export SPACES_ROUTER_HOST="proxy.upbound-127.0.0.1.nip.io" -``` - -:::important -Make sure to replace the placeholder text in `SPACES_ROUTER_HOST` and provide a real domain that you own. -::: - - -## Install the Spaces software - - -### Install cert-manager - -Install cert-manager. - -```bash -kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.11.0/cert-manager.yaml -kubectl wait deployment -n cert-manager cert-manager-webhook --for condition=Available=True --timeout=360s -``` - - - -### Install ALB Load Balancer - -```bash -helm install aws-load-balancer-controller aws-load-balancer-controller --namespace kube-system \ - --repo https://aws.github.io/eks-charts \ - --set clusterName=${SPACES_CLUSTER_NAME} \ - --set serviceAccount.create=false \ - --set serviceAccount.name=aws-load-balancer-controller \ - --wait -``` - - - -### Install ingress-nginx - -Starting with Spaces v1.10.0, you need to configure the ingress-nginx -controller to allow SSL-passthrough mode. You can do so by passing the -`--enable-ssl-passthrough=true` command-line option to the controller. -The following Helm install command enables this with the `controller.extraArgs` -parameter: - - - -```bash -helm upgrade --install ingress-nginx ingress-nginx \ - --create-namespace --namespace ingress-nginx \ - --repo https://kubernetes.github.io/ingress-nginx \ - --version 4.12.1 \ - --set 'controller.service.type=LoadBalancer' \ - --set 'controller.extraArgs.enable-ssl-passthrough=true' \ - --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-type=external' \ - --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-scheme=internet-facing' \ - --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-nlb-target-type=ip' \ - --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-healthcheck-protocol=http' \ - --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-healthcheck-path=/healthz' \ - --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-healthcheck-port=10254' \ - --wait -``` - - - - - -```bash -helm upgrade --install ingress-nginx ingress-nginx \ - --create-namespace --namespace ingress-nginx \ - --repo https://kubernetes.github.io/ingress-nginx \ - --version 4.12.1 \ - --set 'controller.service.type=LoadBalancer' \ - --set 'controller.extraArgs.enable-ssl-passthrough=true' \ - --set 'controller.service.annotations.service\.beta\.kubernetes\.io/azure-load-balancer-health-probe-request-path=/healthz' \ - --wait -``` - - - - - -```bash -helm upgrade --install ingress-nginx ingress-nginx \ - --create-namespace --namespace ingress-nginx \ - --repo https://kubernetes.github.io/ingress-nginx \ - --version 4.12.1 \ - --set 'controller.service.type=LoadBalancer' \ - --set 'controller.extraArgs.enable-ssl-passthrough=true' \ - --wait -``` - - - -### Install Upbound Spaces software - -Create an image pull secret so that the cluster can pull Upbound Spaces images. - -```bash -kubectl create ns upbound-system -kubectl -n upbound-system create secret docker-registry upbound-pull-secret \ - --docker-server=https://xpkg.upbound.io \ - --docker-username="$(jq -r .accessId $SPACES_TOKEN_PATH)" \ - --docker-password="$(jq -r .token $SPACES_TOKEN_PATH)" -``` - -Log in with Helm to be able to pull chart images for the installation commands. - -```bash -jq -r .token $SPACES_TOKEN_PATH | helm registry login xpkg.upbound.io -u $(jq -r .accessId $SPACES_TOKEN_PATH) --password-stdin -``` - -Install the Spaces software. - -```bash -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "${SPACES_VERSION}" \ - --set "ingress.host=${SPACES_ROUTER_HOST}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "authentication.hubIdentities=true" \ - --set "authorization.hubRBAC=true" \ - --wait -``` - -### Create a DNS record - -:::important -If you chose to create a public ingress, you also need to create a DNS record for the load balancer of the public facing ingress. Do this before you create your first control plane. -::: - -Create a DNS record for the load balancer of the public facing ingress. To get the address for the Ingress, run the following: - - - -```bash -kubectl get ingress \ - -n upbound-system mxe-router-ingress \ - -o jsonpath='{.status.loadBalancer.ingress[0].hostname}' -``` - - - - - -```bash -kubectl get ingress \ - -n upbound-system mxe-router-ingress \ - -o jsonpath='{.status.loadBalancer.ingress[0].ip}' -``` - - - - - -```bash -kubectl get ingress \ - -n upbound-system mxe-router-ingress \ - -o jsonpath='{.status.loadBalancer.ingress[0].ip}' -``` - - - -If the preceding command doesn't return a load balancer address then your provider may not have allocated it yet. Once it's available, add a DNS record for the `ROUTER_HOST` to point to the given load balancer address. If it's an IPv4 address, add an A record. If it's a domain name, add a CNAME record. - -## Configure the up CLI - -With your kubeconfig pointed at the Kubernetes cluster where you installed -Upbound Spaces, create a new profile in the `up` CLI. This profile interacts -with your Space: - -```bash -up profile create --use ${SPACES_CLUSTER_NAME} --type=disconnected --organization ${UPBOUND_ACCOUNT} -``` - -Optionally, log in to your Upbound account using the new profile so you can use the Upbound Marketplace with this profile as well: - -```bash -up login -``` - - -## Connect to your Space - - -Use `up ctx` to create a kubeconfig context pointed at your new Space: - -```bash -up ctx disconnected/$(kubectl config current-context) -``` - -## Create your first control plane - -You can now create a control plane with the `up` CLI: - -```bash -up ctp create ctp1 -``` - -You can also create a control plane with kubectl: - -```yaml -cat < -```yaml -observability: - spacesCollector: - env: - - name: API_KEY - valueFrom: - secretKeyRef: - name: my-secret - key: api-key - config: - exporters: - otlphttp: - endpoint: "" - headers: - api-key: ${env:API_KEY} - exportPipeline: - logs: - - otlphttp - metrics: - - otlphttp - traces: - - otlphttp -``` - - -You can export metrics, logs, and traces from your Crossplane installation, Spaces -infrastructure (controller, API, router, etc.), provider-helm, and -provider-kubernetes. - -### Router metrics - -The Spaces router component uses Envoy as a reverse proxy and exposes detailed -metrics about request handling, circuit breakers, and connection pooling. -Upbound collects these metrics in your Space after you enable Space-level -observability. - -Envoy metrics in Upbound include: - -- **Upstream cluster metrics** - Request status codes, timeouts, retries, and latency for traffic to control planes and services -- **Circuit breaker metrics** - Connection and request circuit breaker state for both `DEFAULT` and `HIGH` priority levels -- **Downstream listener metrics** - Client connections and requests received -- **HTTP connection manager metrics** - End-to-end HTTP request processing and latency - -For a complete list of available router metrics and example PromQL queries, see the [Router metrics reference][router-ref]. - -### Router tracing - -The Spaces router generates distributed traces through OpenTelemetry integration, -providing end-to-end visibility into request flow across the system. Use these -traces to debug latency issues, understand request paths, and correlate errors -across services. - -The router uses: - -- **Protocol**: OTLP (OpenTelemetry Protocol) over gRPC -- **Service name**: `spaces-router` -- **Transport**: TLS-encrypted connection to telemetry collector - -#### Trace configuration - -Enable tracing and configure the sampling rate with the following Helm values: - -```yaml -observability: - enabled: true - tracing: - enabled: true - sampling: - rate: 0.1 # Sample 10% of new traces (0.0-1.0) -``` - -The sampling behavior depends on whether a parent trace context exists: - -- **With parent context**: If a `traceparent` header is present, the parent's - sampling decision is respected, enabling proper distributed tracing across services. -- **Root spans**:. new traces without a parent, Envoy samples based on - `x-request-id` hashing. The default sampling rate is 10%. - -#### TLS configuration for external collectors - -To send traces to an external OTLP collector, configure the endpoint and TLS settings: - -```yaml -observability: - enabled: true - tracing: - enabled: true - endpoint: "otlp-gateway.example.com" - port: 443 - tls: - caBundleSecretRef: "custom-ca-secret" -``` - -If `caBundleSecretRef` is set, the router uses the CA bundle from the referenced -Kubernetes secret. The secret must contain a key named `ca.crt` with the -PEM-encoded CA bundle. If not set, the router uses the Spaces CA for the -in-cluster collector. - -#### Custom trace tags - -The router adds custom tags to every span to enable filtering and grouping by -control plane: - -| Tag | Source | Description | -|-----|--------|-------------| -| `controlplane.id` | `x-upbound-mxp-id` header | Control plane UUID | -| `controlplane.name` | `x-upbound-mxp-host` header | Internal vcluster hostname | -| `hostcluster.id` | `x-upbound-hostcluster-id` header | Host cluster identifier | - -These tags enable queries like "show all slow requests to control plane X" or -"find errors for control planes in host cluster Y." - -#### Example trace - -The following example shows the attributes from a successful GET request: - -```text -Span: ingress -├─ Service: spaces-router -├─ Duration: 8.025ms -├─ Attributes: -│ ├─ http.method: GET -│ ├─ http.status_code: 200 -│ ├─ upstream_cluster: ctp-b2b37aaa-ee55-492c-ba0c-4d561a6325fa-api-cluster -│ ├─ controlplane.id: b2b37aaa-ee55-492c-ba0c-4d561a6325fa -│ ├─ controlplane.name: vcluster.mxp-b2b37aaa-ee55-492c-ba0c-4d561a6325fa-system -│ └─ response_size: 1827 -``` - -## Available metrics - -Space-level observability collects metrics from multiple infrastructure components: - -### Infrastructure component metrics - -- Crossplane controller metrics -- Spaces controller, API, and router metrics -- Provider metrics (provider-helm, provider-kubernetes) - -### Router metrics - -The router component exposes Envoy proxy metrics for monitoring traffic flow and -service health. Key metric categories include: - -- `envoy_cluster_upstream_rq_*` - Upstream request metrics (status codes, timeouts, retries, latency) -- `envoy_cluster_circuit_breakers_*` - Circuit breaker state and capacity -- `envoy_listener_downstream_*` - Client connection and request metrics -- `envoy_http_downstream_*` - HTTP request processing metrics - -Example query to monitor total request rate: - -```promql -sum(rate(envoy_cluster_upstream_rq_total{job="spaces-router-envoy"}[5m])) -``` - -Example query for P95 latency: - -```promql -histogram_quantile( - 0.95, - sum by (le) ( - rate(envoy_cluster_upstream_rq_time_bucket{job="spaces-router-envoy"}[5m]) - ) -) -``` - -For detailed router metrics documentation and more query examples, see the [Router metrics reference][router-ref]. - - -## OpenTelemetryCollector image - - -Control plane (`SharedTelemetry`) and Space observability deploy the same custom -OpenTelemetry Collector image. The OpenTelemetry Collector image supports -`otlhttp`, `datadog`, and `debug` exporters. - -For more information on observability configuration, review the [Helm chart reference][helm-chart-reference]. - -## Observability in control planes - -Read the [observability documentation][observability-documentation] to learn -about the features Upbound offers for collecting telemetry from control planes. - - -## Router metrics reference {#router-ref} - -To avoid overwhelming observability tools with hundreds of Envoy metrics, an -allow-list filters metrics to only the following metric families. - -### Upstream cluster metrics - -Metrics tracking requests sent from Envoy to configured upstream clusters. -Individual control planes, spaces-api, and other services are each considered -an upstream cluster. Use these metrics to monitor service health, identify -upstream errors, and measure backend latency. - -| Metric | Description | -|--------|-------------| -| `envoy_cluster_upstream_rq_xx_total` | HTTP status codes (2xx, 3xx, 4xx, 5xx) with label `envoy_response_code_class` | -| `envoy_cluster_upstream_rq_timeout_total` | Requests that timed out waiting for upstream | -| `envoy_cluster_upstream_rq_retry_limit_exceeded_total` | Requests that exhausted retry attempts | -| `envoy_cluster_upstream_rq_total` | Total upstream requests | -| `envoy_cluster_upstream_rq_time_bucket` | Latency histogram (for P50/P95/P99 calculations) | -| `envoy_cluster_upstream_rq_time_sum` | Sum of request durations | -| `envoy_cluster_upstream_rq_time_count` | Count of requests | - -### Circuit breaker metrics - - - -Metrics tracking circuit breaker state and remaining capacity. Circuit breakers -prevent cascading failures by limiting connections and concurrent requests to -unhealthy upstreams. Two priority levels exist: `DEFAULT` for watch requests and -`HIGH` for API requests. - - -| Name | Description | -|--------|-------------| -| `envoy_cluster_circuit_breakers_default_cx_open` | `DEFAULT` priority connection circuit breaker open (gauge) | -| `envoy_cluster_circuit_breakers_default_rq_open` | `DEFAULT` priority request circuit breaker open (gauge) | -| `envoy_cluster_circuit_breakers_default_remaining_cx` | Available `DEFAULT` priority connections (gauge) | -| `envoy_cluster_circuit_breakers_default_remaining_rq` | Available `DEFAULT` priority request slots (gauge) | -| `envoy_cluster_circuit_breakers_high_cx_open` | `HIGH` priority connection circuit breaker open (gauge) | -| `envoy_cluster_circuit_breakers_high_rq_open` | `HIGH` priority request circuit breaker open (gauge) | -| `envoy_cluster_circuit_breakers_high_remaining_cx` | Available `HIGH` priority connections (gauge) | -| `envoy_cluster_circuit_breakers_high_remaining_rq` | Available `HIGH` priority request slots (gauge) | - -### Downstream listener metrics - -Metrics tracking requests received from clients such as kubectl and API consumers. -Use these metrics to monitor client connection patterns, overall request volume, -and responses sent to external users. - -| Name | Description | -|--------|-------------| -| `envoy_listener_downstream_rq_xx_total` | HTTP status codes for responses sent to clients | -| `envoy_listener_downstream_rq_total` | Total requests received from clients | -| `envoy_listener_downstream_cx_total` | Total connections from clients | -| `envoy_listener_downstream_cx_active` | Currently active client connections (gauge) | - - - -### HTTP connection manager metrics - - -Metrics from Envoy's HTTP connection manager tracking end-to-end request -processing. These metrics provide a comprehensive view of the HTTP request -lifecycle including status codes and client-perceived latency. - -| Name | Description | -|--------|-------------| -| `envoy_http_downstream_rq_xx` | HTTP status codes (note: no `_total` suffix for this metric family) | -| `envoy_http_downstream_rq_total` | Total HTTP requests received | -| `envoy_http_downstream_rq_time_bucket` | Downstream request latency histogram | -| `envoy_http_downstream_rq_time_sum` | Sum of downstream request durations | -| `envoy_http_downstream_rq_time_count` | Count of downstream requests | - -[router-ref]: #router-ref -[observability-documentation]: /spaces/howtos/observability -[opentelemetry-collector]: https://opentelemetry.io/docs/collector/ -[opentelemetry-operator]: https://opentelemetry.io/docs/kubernetes/operator/ -[helm-chart-reference]: /reference/helm-reference diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/spaces-management.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/spaces-management.md deleted file mode 100644 index 3df61c306..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/spaces-management.md +++ /dev/null @@ -1,219 +0,0 @@ ---- -title: Interacting with Disconnected Spaces -sidebar_position: 10 -description: Common operations in Spaces ---- - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Spaces management procedures are consistent across versions. - -For version compatibility details, see the . -::: - -## Spaces management - -### Create a Space - -To install an Upbound Space into a cluster, it's recommended you dedicate an entire Kubernetes cluster for the Space. You can use [up space init][up-space-init] to install an Upbound Space. Below is an example: - -```bash -up space init "v1.9.0" -``` -:::tip -For a full guide to get started with Spaces, read the [quickstart][quickstart] guide: -::: - -You can also install the helm chart for Spaces directly. In order for a Spaces install to succeed, you must install some prerequisites first and configure them. This includes: - -- UXP -- provider-helm and provider-kubernetes -- cert-manager - -Furthermore, the Spaces chart requires a pull secret, which Upbound must provide to you. - -```bash -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "v1.9.0" \ - --set "ingress.host=your-host.com" \ - --set "clusterType=eks" \ - --set "account=your-upbound-account" \ - --wait -``` -For a complete tutorial of the helm install, read one of the deployment guides for [AWS][aws], [Azure][azure] , or [GCP][gcp] which cover the step-by-step process. - -### Upgrade a Space - -To upgrade a Space from one version to the next, use [up space upgrade][up-space-upgrade]. Spaces supports upgrading from version `ver x.N.*` to version `ver x.N+1.*`. - -```bash -up space upgrade "v1.9.0" -``` - -You can also upgrade a Space by manually bumping the Helm chart version. Before -upgrading, review the release notes for any breaking changes or -special requirements: - -1. Review the release notes for the target version in the [Spaces Release Notes][spaces-release-notes] -2. Upgrade the Space by updating the helm chart version: - -```bash -helm -n upbound-system upgrade spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "v1.9.0" \ - --reuse-values \ - --wait -``` - -For major version upgrades or configuration changes, extract your current values -and adjust: - -```bash -# Extract current values to a file -helm -n upbound-system get values spaces > spaces-values.yaml - -# Upgrade with modified values -helm -n upbound-system upgrade spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "v1.9.0" \ - -f spaces-values.yaml \ - --wait -``` - -### Downgrade a Space - -To rollback a Space from one version to the previous, use [up space upgrade][up-space-upgrade-1]. Spaces supports downgrading from version `ver x.N.*` to version `ver x.N-1.*`. - -```bash -up space upgrade --rollback -``` - -You can also downgrade a Space manually using Helm by specifying an earlier version: - -```bash -helm -n upbound-system upgrade spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "v1.8.0" \ - --reuse-values \ - --wait -``` - -When downgrading, make sure to: -1. Check the [release notes][release-notes] for specific downgrade instructions -2. Verify compatibility between the downgraded Space and any control planes -3. Back up any critical data before proceeding - -### Uninstall a Space - -To uninstall a Space from a Kubernetes cluster, use [up space destroy][up-space-destroy]. A destroy operation uninstalls core components and orphans control planes and their associated resources. - -```bash -up space destroy -``` - -## Control plane management - -You can manage control planes in a Space via the [up CLI][up-cli] or the Spaces-local Kubernetes API. When you install a Space, it defines new a API type, `kind: Controlplane`, that you can use to create and manage control planes in the Space. - -### Create a control plane - -To create a control plane in a Space using `up`, run the following: - -```bash -up ctp create ctp1 -``` - -You can also declare a new control plane like the example below and apply it to your Spaces cluster: - -```yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - name: ctp1 - namespace: default -spec: - writeConnectionSecretToRef: - name: kubeconfig-ctp1 - namespace: default -``` - -This manifest: - -- Creates a new control plane in the space called `ctp1`. -- Publishes the kubeconfig to connect to the control plane to a secret in the Spaces cluster, called `kubeconfig-ctp1` - -### Connect to a control plane - -To connect to a control plane in a Space using `up`, run the following: - -```bash -up ctp connect new-control-plane -``` - -The command changes your kubeconfig's current context to the control plane you specify. If you want to change your kubeconfig back to a previous context, run: - -```bash -up ctp disconnect -``` - -If you configured your control plane to publish connection details, you can also access it this way. Once the control plane is ready, use the secret (containing connection details) to connect to the API server of your control plane. - -```bash -kubectl get secret -n default -o jsonpath='{.data.kubeconfig}' | base64 -d > /tmp/.yaml -``` - -Reference the kubeconfig whenever you want to interact directly with the API server of the control plane (vs the Space's API server): - -```bash -kubectl get providers --kubeconfig=/tmp/.yaml -``` - -### Configure a control plane - -Spaces offers a built-in feature that allows you to connect a control plane to a Git source. This experience is like when a control plane runs in [Upbound's SaaS environment][upbound-s-saas-environment]. Upbound recommends using the built-in Git integration to drive configuration of your control planes in a Space. - -Learn more in the [Spaces Git integration][spaces-git-integration] documentation. - -### List control planes - -To list all control planes in a Space using `up`, run the following: - -```bash -up ctp list -``` - -Or you can use Kubernetes-style semantics to list the control plane: - -```bash -kubectl get controlplanes -``` - - -### Delete a control plane - -To delete a control plane in a Space using `up`, run the following: - -```bash -up ctp delete ctp1 -``` - -Or you can use Kubernetes-style semantics to delete the control plane: - -```bash -kubectl delete controlplane ctp1 -``` - - -[up-space-init]: /reference/cli-reference -[quickstart]: / -[aws]: /spaces/howtos/self-hosted/self-hosted-spaces-deployment -[azure]:/spaces/howtos/self-hosted/self-hosted-spaces-deployment -[gcp]:/spaces/howtos/self-hosted/self-hosted-spaces-deployment -[up-space-upgrade]: /reference/cli-reference -[spaces-release-notes]: /reference/release-notes/spaces -[up-space-upgrade-1]: /reference/cli-reference -[release-notes]: /reference/release-notes/spaces -[up-space-destroy]: /reference/cli-reference -[up-cli]: /reference/cli-reference -[upbound-s-saas-environment]: /spaces/howtos/self-hosted/spaces-management -[spaces-git-integration]: /spaces/howtos/self-hosted/gitops diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/troubleshooting.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/troubleshooting.md deleted file mode 100644 index 8d1ca6517..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/troubleshooting.md +++ /dev/null @@ -1,132 +0,0 @@ ---- -title: Troubleshooting -sidebar_position: 100 -description: A guide for troubleshooting an issue that occurs in a Space ---- - -Find guidance below on how to find solutions for issues you encounter when deploying and using an Upbound Space. Use the tips below as a supplement to the observability metrics discussed in the [Observability][observability] page. - -## General tips - -Most issues fall into two general categories: - -1. issues with the Spaces management plane -2. issues on a control plane - -If your control plane doesn't reach a `Ready` state, it's indicative of the former. If your control plane is in a created and running state, but resources aren't reconciling, it's indicative of the latter. - -### Spaces component layout - -Run `kubectl get pods -A` against the cluster hosting a Space. You should see a variety of pods across several namespaces. It should look something like this: - -```bash -NAMESPACE NAME READY STATUS RESTARTS AGE -cert-manager cert-manager-6d6769565c-mc5df 1/1 Running 0 25m -cert-manager cert-manager-cainjector-744bb89575-nw4fg 1/1 Running 0 25m -cert-manager cert-manager-webhook-759d6dcbf7-ps4mq 1/1 Running 0 25m -ingress-nginx ingress-nginx-controller-7f8ccfccc6-6szlp 1/1 Running 0 25m -kube-system coredns-5d78c9869d-4p477 1/1 Running 0 26m -kube-system coredns-5d78c9869d-pdxt6 1/1 Running 0 26m -kube-system etcd-kind-control-plane 1/1 Running 0 26m -kube-system kindnet-8s7pq 1/1 Running 0 26m -kube-system kube-apiserver-kind-control-plane 1/1 Running 0 26m -kube-system kube-controller-manager-kind-control-plane 1/1 Running 0 26m -kube-system kube-proxy-l68r8 1/1 Running 0 26m -kube-system kube-scheduler-kind-control-plane 1/1 Running 0 26m -local-path-storage local-path-provisioner-6bc4bddd6b-qsdjt 1/1 Running 0 26m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system coredns-5dc69d6447-f56rh-x-kube-system-x-vcluster 1/1 Running 0 21m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system crossplane-6b6d67bc66-6b8nx-x-upbound-system-x-vcluster 1/1 Running 0 20m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system crossplane-rbac-manager-78f6fc7cb4-pjkhc-x-upbound-s-12253c3c4e 1/1 Running 0 20m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system kube-state-metrics-7f8f4dcc5b-8p8c4 1/1 Running 0 22m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system mxp-gateway-68f546b9c8-xnz5j-x-upbound-system-x-vcluster 1/1 Running 0 20m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system mxp-ksm-config-54655667bb-hv9br 1/1 Running 0 22m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system mxp-readyz-5f7f97d967-b98bw 1/1 Running 0 22m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system otlp-collector-56d7d46c8d-g5sh5-x-upbound-system-x-vcluster 1/1 Running 0 20m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-67c9fb8959-ppb2m 1/1 Running 0 22m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-api-6bfbccc49d-ffgpj 1/1 Running 0 22m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-controller-7cc6855656-8c46b 1/1 Running 0 22m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-etcd-0 1/1 Running 0 22m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vector-754b494b84-wljw4 1/1 Running 0 22m -mxp-system mxp-charts-chartmuseum-7587f77558-8tltb 1/1 Running 0 23m -upbound-system crossplane-b4dc7b4c9-6hjh5 1/1 Running 0 25m -upbound-system crossplane-contrib-provider-helm-ce18dd03e6e4-7945d8985-4gcwr 1/1 Running 0 24m -upbound-system crossplane-contrib-provider-kubernetes-1f1e32c1957d-577756gs2x4 1/1 Running 0 24m -upbound-system crossplane-rbac-manager-d8cb49cbc-gbvvf 1/1 Running 0 25m -upbound-system spaces-controller-6647677cf9-5zl5q 1/1 Running 0 24m -upbound-system spaces-router-bc78c96d7-kzts2 2/2 Running 0 24m -``` - -What you are seeing is: - -- Pods in the `upbound-system` namespace are components required to run the management plane of the Space. This includes the `spaces-controller`, `spaces-router`, and install of UXP. -- Pods in the `mxp-{GUID}-system` namespace are components that collectively power a control plane. Notable call outs include pod names that look like `vcluster-api-{GUID}` and `vcluster-controller-{GUID}`, which are integral components of a control plane. -- Pods in other notable namespaces, including `cert-manager` and `ingress-nginx`, are prerequisite components that support a Space's successful operation. - - - -### Troubleshooting tips for the Spaces management plane - -Start by getting the status of all the pods in a Space: - -1. Make sure the current context of your kubeconfig points at the Kubernetes cluster hosting your Space -2. Get the status of all the pods in the Space: -```bash -kubectl get pods -A -``` -3. Scan the `Status` column to see if any of the pods report a status besides `Running`. -4. Scan the `Restarts` column to see if any of the pods have restarted. -5. If you notice a Status other than `Running` or see pods that restarted, you should investigate their events by running -```bash -kubectl describe pod -n -``` - -Next, inspect the status of objects and releases: - -1. Make sure the current context of your kubeconfig points at the Kubernetes cluster hosting your Space -2. Inspect the objects in your Space. If any are unhealthy, describe those objects to get the events: -```bash -kubectl get objects -``` -3. Inspect the releases in your Space. If any are unhealthy, describe those releases to get the events: -```bash -kubectl get releases -``` - -### Troubleshooting tips for control planes in a Space - -General troubleshooting in a control plane starts by fetching the events of the control plane: - -1. Make sure the current context of your kubeconfig points at the Kubernetes cluster hosting your Space -2. Run the following to fetch your control planes. -```bash -kubectl get ctp -``` -3. Describe the control plane by providing its name, found in the preceding instruction. -```bash -kubectl describe controlplanes.spaces.upbound.io -``` - -## Issues - - -### Your control plane is stuck in a 'creating' state - -#### Error: unknown field "ports" in io.k8s.api.networking.v1.NetworkPolicySpec - -This error is emitted by a Helm release named `control-plane-host-policies` attempting to be installed by the Spaces software. The full error is: - -_CannotCreateExternalResource failed to install release: unable to build kubernetes objects from release manifest: error validating "": error validating data: ValidationError(NetworkPolicy.spec): unknown field "ports" in io.k8s.api.networking.v1.NetworkPolicySpec_ - -This error may be caused by running a Space on an earlier version of Kubernetes than is supported (`v1.26 or later`). To resolve this issue, upgrade the host Kubernetes cluster version to 1.25 or later. - -### Your Spaces install fails - -#### Error: You tried to install a Space on a previous Crossplane installation - -If you try to install a Space on an existing cluster that previously had Crossplane or UXP on it, you may encounter errors. Due to how the Spaces installer tests for the presence of UXP, it may detect orphaned CRDs that weren't cleaned up by the previous uninstall of Crossplane. You may need to manually [remove old Crossplane CRDs][remove-old-crossplane-crds] for the installer to properly detect the UXP prerequisite. - - - - -[observability]: /spaces/howtos/observability -[remove-old-crossplane-crds]: https://docs.crossplane.io/latest/guides/uninstall-crossplane/ diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/use-argo.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/use-argo.md deleted file mode 100644 index d58f7db44..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/use-argo.md +++ /dev/null @@ -1,228 +0,0 @@ ---- -title: Use ArgoCD Plugin -sidebar_position: 15 -description: A guide for integrating Argo with control planes in a Space. -aliases: - - /all-spaces/self-hosted-spaces/use-argo - - /deploy/disconnected-spaces/use-argo-flux - - /all-spaces/self-hosted-spaces/use-argo-flux - - /connect/use-argo ---- - - -:::info API Version Information -This guide covers the ArgoCD Plugin, a preview feature available in all supported versions (v1.9-v1.14+) for self-hosted deployments. - -For details on GitOps patterns and related features across versions, see [Automation and GitOps Overview](/spaces/howtos/automation-and-gitops/overview/). -::: - -:::important -This feature is in preview and is off by default. To enable, set `features.alpha.argocdPlugin.enabled=true` when installing Spaces: - -```bash -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - ... - --set "features.alpha.argocdPlugin.enabled=true" -``` -::: - -Spaces provides an optional plugin to assist with integrating a control plane in a Space with Argo CD. You must enable the plugin for the entire Space at Spaces install or upgrade time. The plugin's job is to propagate the connection details of each control plane in a Space to Argo CD. By default, Upbound stores these connection details in a Kubernetes secret named after the control plane. To run Argo CD across multiple namespaces, Upbound recommends enabling the `features.alpha.argocdPlugin.useUIDFormatForCTPSecrets` flag to use a UID-based format for secret names to avoid conflicts. - -:::tip -For general guidance on integrating Upbound with GitOps flows, see [GitOps with Control Planes][gitops-with-control-planes]. -::: - -## On cluster Argo CD - -If you are running Argo CD on the same cluster as the Space, run the following to enable the plugin: - - - - - - -```bash {hl_lines="3-4"} -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "features.alpha.argocdPlugin.enabled=true" \ - --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ - --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" -``` - - - - - -```bash {hl_lines="7-8"} -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "${SPACES_VERSION}" \ - --set "ingress.host=${SPACES_ROUTER_HOST}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "features.alpha.argocdPlugin.enabled=true" \ - --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ - --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ - --wait -``` - - - - - - -The important flags are: - -- `features.alpha.argocdPlugin.enabled=true` -- `features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true` -- `features.alpha.argocdPlugin.target.secretNamespace=argocd` - -The first flag enables the feature and the second indicates the namespace on the cluster where you installed Argo CD. - -Be sure to [configure Argo][configure-argo] after it's installed. - -## External cluster Argo CD - -If you are running Argo CD on an external cluster from where you installed your Space, you need to provide some extra flags: - - - - - - -```bash {hl_lines="3-7"} -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "features.alpha.argocdPlugin.enabled=true" \ - --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ - --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ - --set "features.alpha.argocdPlugin.target.externalCluster.enabled=true" \ - --set "features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster" \ - --set "features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig" -``` - - - - - -```bash {hl_lines="7-11"} -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "${SPACES_VERSION}" \ - --set "ingress.host=${SPACES_ROUTER_HOST}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "features.alpha.argocdPlugin.enabled=true" \ - --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ - --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ - --set "features.alpha.argocdPlugin.target.externalCluster.enabled=true" \ - --set "features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster" \ - --set "features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig" \ - --wait -``` - - - - - -```bash -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "${SPACES_VERSION}" \ - --set "ingress.host=${SPACES_ROUTER_HOST}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "features.alpha.argocdPlugin.enabled=true" \ - --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ - --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ - --set "features.alpha.argocdPlugin.target.externalCluster.enabled=true" \ - --set "features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster" \ - --set "features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig" \ - --wait -``` - -The extra flags are: - -- `features.alpha.argocdPlugin.target.externalCluster.enabled=true` -- `features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true` -- `features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster` -- `features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig` - -These flags tell the plugin (running in Spaces) where your Argo CD instance is. After you've done this at install-time, you also need to create a `Secret` on the Spaces cluster. This secret must contain a kubeconfig pointing to your Argo CD instance. The secret needs to be in the same namespace as the `spaces-controller`, which is `upbound-system`. - -Once you enable the plugin and configure it, the plugin automatically propagates connection details for your control planes to your Argo CD instance. You can then target the control plane and use Argo to sync Crossplane-related objects to it. - -Be sure to [configure Argo][configure-argo-1] after it's installed. - -## Configure Argo - -Argo's default configuration causes it to try to query for resource kinds that don't exist in control planes. You should configure Argo's [general configmap][general-configmap] to include the resource group/kinds which make sense in the context of control planes. example, the concept of `nodes` isn't exposed in control planes. - -To configure Argo CD, connect to the cluster where you've installed it and edit the configmap: - -```bash -kubectl edit configmap argocd-cm -n argocd -``` - -Adjust the resource inclusions and exclusions under the `data` field of the configmap: - -```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: argocd-cm - namespace: argocd -data: - resource.exclusions: | - - apiGroups: - - "*" - kinds: - - "*" - clusters: - - "*" - resource.inclusions: | - - apiGroups: - - "*" - kinds: - - Provider - - Configuration - clusters: - - "*" -``` - -The preceding configuration causes Argo to exclude syncing **all** resource group/kinds--except Crossplane `providers` and `configurations`--for **all** control planes. You're encouraged to adjust the `resource.inclusions` to include the types that make sense for your control plane, such as an `XRD` you've built with Crossplane. You're also encouraged to customize the `clusters` pattern to selectively apply these exclusions/inclusions to control planes (for example, `control-plane-prod-*`). - -## Control plane connection secrets - -To deploy control planes through Argo CD, you need to configure the `writeConnectionSecretToRef` field in your control plane spec. This field specifies where to store the control plane's `kubeconfig` and makes connection details available to Argo CD. - -### Basic Configuration - -In your control plane manifest, include the `writeConnectionSecretToRef` field: - -```yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - name: my-control-plane - namespace: my-control-plane-group -spec: - writeConnectionSecretToRef: - name: kubeconfig-my-control-plane - namespace: my-control-plane-group - # ... other control plane configuration -``` - -### Parameters - -The `writeConnectionSecretToRef` field requires two parameters: - -- `name`: A unique name for the secret containing the kubeconfig (`kubeconfig-my-control-plane`) -- `namespace`: The Kubernetes namespace where you store the secret, which must match the metadata namespace. The system copies it into the `argocd` namespace when you set the `features.alpha.argocdPlugin.target.secretNamespace=argocd` configuration parameter. - -Control plane labels automatically propagate to the connection secret, which allows you to use label selectors in Argo CD for automated discovery and management. - -This configuration enables Argo CD to automatically discover and manage resources on your control planes. - - -[gitops-with-control-planes]: /spaces/howtos/cloud-spaces/gitops -[configure-argo]: #configure-argo -[configure-argo-1]: #configure-argo -[general-configmap]: https://argo-cd.readthedocs.io/en/stable/operator-manual/argocd-cm-yaml/ diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/workload-id/_category_.json b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/workload-id/_category_.json deleted file mode 100644 index c5ecc93f6..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/workload-id/_category_.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "label": "Workload Identity Configuration", - "position": 2, - "collapsed": true, - "customProps": { - "plan": "business" - } - -} - - diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/workload-id/backup-restore-config.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/workload-id/backup-restore-config.md deleted file mode 100644 index 935ca69ec..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/workload-id/backup-restore-config.md +++ /dev/null @@ -1,384 +0,0 @@ ---- -title: Backup and Restore Workload ID -weight: 1 -description: Configure workload identity for Spaces Backup and Restore ---- -import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; - - - - - - - -Workload-identity authentication lets you use access policies to grant temporary -AWS credentials to your Kubernetes pod with a service account. Assigning IAM roles and service accounts allows the pod to assume the IAM role dynamically and much more securely than static credentials. - -This guide walks you through creating an IAM trust role policy and applying it -to your EKS cluster to handle backup and restore storage. - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary Azure credentials to your Kubernetes pod based on -a service account. Assigning managed identities and service accounts allows the pod to -authenticate with Azure resources dynamically and much more securely than static credentials. - -This guide walks you through creating a managed identity and federated credential for your AKS -cluster to handle backup and restore storage. - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary GCP credentials to your Kubernetes pod based on -a service account. Assigning IAM roles and service accounts allows the pod to -access cloud resources dynamically and much more securely than static credentials. - -This guide walks you through configuring workload identity for your GKE -cluster to handle backup and restore storage. - - - -## Prerequisites - - -To set up a workload-identity, you'll need: - - -- A self-hosted Space cluster -- Administrator access in your cloud provider -- Helm and `kubectl` - -## About the backup and restore component - -The `mxp-controller` component handles backup and restore workloads. It needs to -access your cloud storage to store and retrieve backups. By default, this -component runs in each control plane's host namespace. - -## Configuration - - - -Upbound supports workload-identity configurations in AWS with IAM Roles for -Service Accounts and EKS pod identity association. - -#### IAM Roles for Service Accounts (IRSA) - -With IRSA, you can associate a Kubernetes service account in an EKS cluster with -an AWS IAM role. Upbound authenticates workloads with that service account as -the IAM role using temporary credentials instead of static role credentials. -IRSA relies on AWS `AssumeRoleWithWebIdentity` `STS` to exchange OIDC ID tokens with -the IAM role's temporary credentials. IRSA uses the `eks.amazon.aws/role-arn` -annotation to link the service account and the IAM role. - -First, create an IAM role with appropriate permissions to access your S3 bucket: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:GetObject", - "s3:PutObject", - "s3:ListBucket", - "s3:DeleteObject" - ], - "Resource": [ - "arn:aws:s3:::${YOUR_BACKUP_BUCKET}", - "arn:aws:s3:::${YOUR_BACKUP_BUCKET}/*" - ] - } - ] -} -``` - -Next, ensure your EKS cluster has an OIDC identity provider: - -```shell -eksctl utils associate-iam-oidc-provider --cluster ${YOUR_CLUSTER_NAME} --approve -``` - -Configure the IAM role trust policy with the namespace for each -provisioned control plane. - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:oidc-provider/${YOUR_OIDC_PROVIDER}" - }, - "Action": "sts:AssumeRoleWithWebIdentity", - "Condition": { - "StringEquals": { - "${YOUR_OIDC_PROVIDER}:aud": "sts.amazonaws.com", - "${YOUR_OIDC_PROVIDER}:sub": "system:serviceaccount:${YOUR_NAMESPACE}:mxp-controller" - } - } - } - ] -} -``` - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the Backup and Restore component: - -```shell ---set controlPlanes.mxpController.serviceAccount.annotations."eks\.amazonaws\.com/role-arn"="${SPACES_BR_IAM_ROLE_ARN}" -``` - -This command allows the backup and restore component to authenticate with your -dedicated IAM role in your EKS cluster environment. - -#### EKS pod identities - -Upbound also supports EKS Pod Identity configuration. EKS Pod Identities allow -you to create a pod identity association with your Kubernetes namespace, a -service account, and an IAM role, which allows the EKS control plane to -automatically handle the credential exchange. - -First, create an IAM role with appropriate permissions to access your S3 bucket: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:GetObject", - "s3:PutObject", - "s3:ListBucket", - "s3:DeleteObject" - ], - "Resource": [ - "arn:aws:s3:::${YOUR_BACKUP_BUCKET}", - "arn:aws:s3:::${YOUR_BACKUP_BUCKET}/*" - ] - } - ] -} -``` - -When you install or upgrade your Space with Helm, add the backup/restore values: - -```shell -helm upgrade spaces spaces-helm-chart \ - --set "billing.enabled=true" \ - --set "backup.enabled=true" \ - --set "backup.storage.provider=aws" \ - --set "backup.storage.aws.region= ${YOUR_AWS_REGION}" \ - --set "backup.storage.aws.bucket= ${YOUR_BACKUP_BUCKET}" -``` - -After Upbound provisions your control plane, create a Pod Identity Association -with the `aws` CLI: - -```shell -aws eks create-pod-identity-association \ - --cluster-name ${YOUR_CLUSTER_NAME} \ - --namespace ${YOUR_CONTROL_PLANE_NAMESPACE} \ - --service-account mxp-controller \ - --role-arn arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/backup-restore-role -``` - - - - - -Upbound supports workload-identity configurations in Azure with Azure's built-in -workload identity feature. - -#### Prepare your cluster - -First, enable the OIDC issuer and workload identity in your AKS cluster: - -```shell -az aks update --resource-group ${YOUR_RESOURCE_GROUP} --name ${YOUR_AKS_CLUSTER_NAME} --enable-oidc-issuer --enable-workload-identity -``` - -Next, find and store the OIDC issuer URL as an environment variable: - -```shell -export AKS_OIDC_ISSUER="$(az aks show --name ${YOUR_AKS_CLUSTER_NAME} --resource-group ${YOUR_RESOURCE_GROUP} --query "oidcIssuerProfile.issuerUrl" --output tsv)" -``` - -#### Create a User-Assigned Managed Identity - -Create a new managed identity to associate with the backup and restore component: - -```shell -az identity create --name backup-restore-identity --resource-group ${YOUR_RESOURCE_GROUP} --location ${YOUR_LOCATION} -``` - -Retrieve the client ID and store it as an environment variable: - -```shell -export USER_ASSIGNED_CLIENT_ID="$(az identity show --name backup-restore-identity --resource-group ${YOUR_RESOURCE_GROUP} --query clientId -otsv)" -``` - -Grant the managed identity you created to access your Azure Storage account: - -```shell -az role assignment create \ - --role "Storage Blob Data Contributor" \ - --assignee ${USER_ASSIGNED_CLIENT_ID} \ - --scope /subscriptions/${YOUR_SUBSCRIPTION_ID}/resourceGroups/${YOUR_RESOURCE_GROUP}/providers/Microsoft.Storage/storageAccounts/${YOUR_STORAGE_ACCOUNT} -``` - -#### Apply the managed identity role - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the backup and restore component: - -```shell ---set controlPlanes.mxpController.serviceAccount.annotations."azure\.workload\.identity/client-id"="${YOUR_USER_ASSIGNED_CLIENT_ID}" ---set controlPlanes.mxpController.pod.customLabels."azure\.workload\.identity/use"="true" -``` - -#### Create a Federated Identity credential - -```shell -az identity federated-credential create \ - --name backup-restore-federated-identity \ - --identity-name backup-restore-identity \ - --resource-group ${YOUR_RESOURCE_GROUP} \ - --issuer ${AKS_OIDC_ISSUER} \ - --subject system:serviceaccount:${YOUR_CONTROL_PLANE_NAMESPACE}:mxp-controller -``` - - - - - -Upbound supports workload-identity configurations in GCP with IAM principal -identifiers and service account impersonation. - -#### Prepare your cluster - -First, enable Workload Identity Federation on your GKE cluster: - -```shell -gcloud container clusters update ${YOUR_CLUSTER_NAME} \ - --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ - --region=${YOUR_REGION} -``` - -#### Create a Google Service Account - -Create a service account for the backup and restore component: - -```shell -gcloud iam service-accounts create backup-restore-sa \ - --display-name "Backup Restore Service Account" \ - --project ${YOUR_PROJECT_ID} -``` - -Grant the service account access to your Google Cloud Storage bucket: - -```shell -gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ - --member "serviceAccount:backup-restore-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" \ - --role "roles/storage.objectAdmin" -``` - -#### Configure Workload Identity - -Create an IAM binding to grant the Kubernetes service account access to the Google service account: - -```shell -gcloud iam service-accounts add-iam-policy-binding \ - backup-restore-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com \ - --role roles/iam.workloadIdentityUser \ - --member "serviceAccount:${YOUR_PROJECT_ID}.svc.id.goog[${YOUR_CONTROL_PLANE_NAMESPACE}/mxp-controller]" -``` - -#### Apply the service account configuration - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the backup and restore component: - -```shell ---set controlPlanes.mxpController.serviceAccount.annotations."iam\.gke\.io/gcp-service-account"="backup-restore-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" -``` - - - -## Verify your configuration - -After you apply the configuration use `kubectl` to verify the service account -has the correct annotation: - -```shell -kubectl get serviceaccount mxp-controller -n ${YOUR_CONTROL_PLANE_NAMESPACE} -o yaml -``` - -Verify the `mxp-controller` pod is running: - -```shell -kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep mxp-controller -``` - -## Restart workload - -You must manually restart a workload's pod when you add the workload identity annotations to the running pod's service account. - - - -This restart enables the EKS pod identity webhook to inject the necessary -environment for using IRSA. - - - - - -This restart enables the workload identity webhook to inject the necessary -environment for using Azure workload identity. - - - - - -This restart enables the workload identity webhook to inject the necessary -environment for using GCP workload identity. - - - -```shell -kubectl rollout restart deployment mxp-controller -n ${YOUR_CONTROL_PLANE_NAMESPACE} -``` - -## Use cases - - -Configuring backup and restore with workload identity eliminates the need for -static credentials in your cluster and the overhead of credential rotation. -These benefits are helpful in: - -* Disaster recovery scenarios -* Control plane migration -* Compliance requirements -* Rollbacks after unsuccessful upgrades - -## Next steps - -Now that you have a workload identity configured for the backup and restore -component, visit the [Backup Configuration][backup-restore-guide] documentation. - -Other workload identity guides are: -* [Billing][billing] -* [Shared Secrets][secrets] - -[backup-restore-guide]: /spaces/howtos/backup-and-restore -[billing]: /spaces/howtos/self-hosted/workload-id/billing-config -[secrets]: /spaces/howtos/self-hosted/workload-id/eso-config diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/workload-id/billing-config.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/workload-id/billing-config.md deleted file mode 100644 index 323a6122f..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/workload-id/billing-config.md +++ /dev/null @@ -1,454 +0,0 @@ ---- -title: Billing Workload ID -weight: 1 -description: Configure workload identity for Spaces Billing ---- -import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; - - - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary AWS credentials to your Kubernetes pod based on -a service account. Assigning IAM roles and service accounts allows the pod to -assume the IAM role dynamically and much more securely than static credentials. - -This guide walks you through creating an IAM trust role policy and applying it to your EKS -cluster for billing in your Space cluster. - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary Azure credentials to your Kubernetes pod based on -a service account. Assigning managed identities and service accounts allows the pod to -authenticate with Azure resources dynamically and much more securely than static credentials. - -This guide walks you through creating a managed identity and federated credential for your AKS -cluster for billing in your Space cluster. - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary GCP credentials to your Kubernetes pod based on -a service account. Assigning IAM roles and service accounts allows the pod to -access cloud resources dynamically and much more securely than static -credentials. - -This guide walks you through configuring workload identity for your GKE -cluster's billing component. - - - -## Prerequisites - - -To set up a workload-identity, you'll need: - - -- A self-hosted Space cluster -- Administrator access in your cloud provider -- Helm and `kubectl` - -## About the billing component - -The `vector.dev` component handles billing metrics collection in spaces. It -stores account data in your cloud storage. By default, this component runs in -each control plane's host namespace. - -## Configuration - - - -Upbound supports workload-identity configurations in AWS with IAM Roles for -Service Accounts and EKS pod identity association. - -#### IAM Roles for Service Accounts (IRSA) - -With IRSA, you can associate a Kubernetes service account in an EKS cluster with -an AWS IAM role. Upbound authenticates workloads with that service account as -the IAM role using temporary credentials instead of static role credentials. -IRSA relies on AWS `AssumeRoleWithWebIdentity` `STS` to exchange OIDC ID tokens with -the IAM role's temporary credentials. IRSA uses the `eks.amazon.aws/role-arn` -annotation to link the service account and the IAM role. - -**Create an IAM role and trust policy** - -First, create an IAM role appropriate permissions to access your S3 bucket: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:GetObject", - "s3:PutObject", - "s3:ListBucket", - "s3:DeleteObject" - ], - "Resource": [ - "arn:aws:s3:::${YOUR_BILLING_BUCKET}", - "arn:aws:s3:::${YOUR_BILLING_BUCKET}/*" - ] - } - ] -} -``` - -You must configure the IAM role trust policy with the exact match for each -provisioned control plane. An example of a trust policy for a single control -plane is below: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:oidc-provider/${YOUR_OIDC_PROVIDER}" - }, - "Action": "sts:AssumeRoleWithWebIdentity", - "Condition": { - "StringEquals": { - ":aud": "sts.amazonaws.com", - ":sub": "system:serviceaccount:${YOUR_NAMESPACE}:vector" - } - } - } - ] -} -``` - -**Configure the EKS OIDC provider** - -Next, ensure your EKS cluster has an OIDC identity provider: - -```shell -eksctl utils associate-iam-oidc-provider --cluster ${YOUR_CLUSTER_NAME} --approve -``` - -**Apply the IAM role** - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the Billing component: - -```shell ---set "billing.enabled=true" ---set "billing.storage.provider=aws" ---set "billing.storage.aws.region=${YOUR_AWS_REGION}" ---set "billing.storage.aws.bucket=${YOUR_BILLING_BUCKET}" ---set "billing.storage.secretRef.name=" ---set controlPlanes.vector.serviceAccount.customAnnotations."eks\.amazonaws\.com/role-arn"="arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_BILLING_ROLE_NAME}" -``` - -:::important -You **must** set the `billing.storage.secretRef.name` to an empty string to -enable workload identity for the billing component -::: - -#### EKS pod identities - -Upbound also supports EKS Pod Identity configuration. EKS Pod Identities allow -you to create a pod identity association with your Kubernetes namespace, a -service account, and an IAM role, which allows the EKS control plane to -automatically handle the credential exchange. - -**Create an IAM role** - -First, create an IAM role appropriate permissions to access your S3 bucket: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:GetObject", - "s3:PutObject", - "s3:ListBucket" - ], - "Resource": [ - "arn:aws:s3:::${YOUR_BILLING_BUCKET}", - "arn:aws:s3:::${YOUR_BILLING_BUCKET}/*" - ] - } - ] -} -``` - -**Configure your Space with Helm** - -When you install or upgrade your Space with Helm, add the billing values: - -```shell -helm upgrade spaces spaces-helm-chart \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=aws" \ - --set "billing.storage.aws.region=${YOUR_AWS_REGION}" \ - --set "billing.storage.aws.bucket=${YOUR_BILLING_BUCKET}" \ - --set "billing.storage.secretRef.name=" -``` - -**Create a Pod Identity Association** - -After Upbound provisions your control plane, create a Pod Identity Association -with the `aws` CLI: - -```shell -aws eks create-pod-identity-association \ - --cluster-name ${YOUR_CLUSTER_NAME} \ - --namespace ${YOUR_CONTROL_PLANE_NAMESPACE} \ - --service-account vector \ - --role-arn arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_BILLING_ROLE_NAME} -``` - - - - - -Upbound supports workload-identity configurations in Azure with Azure's built-in -workload identity feature. - -First, enable the OIDC issuer and workload identity in your AKS cluster: - -```shell -az aks update --resource-group ${YOUR_RESOURCE_GROUP} --name ${YOUR_AKS_CLUSTER_NAME} --enable-oidc-issuer --enable-workload-identity -``` - -Next, find and store the OIDC issuer URL as an environment variable: - -```shell -export AKS_OIDC_ISSUER="$(az aks show --name ${YOUR_AKS_CLUSTER_NAME} --resource-group ${YOUR_RESOURCE_GROUP} --query "oidcIssuerProfile.issuerUrl" --output tsv)" -``` - -Create a new managed identity to associate with the billing component: - -```shell -az identity create --name billing-identity --resource-group ${YOUR_RESOURCE_GROUP} --location ${YOUR_LOCATION} -``` - -Retrieve the client ID and store it as an environment variable: - -```shell -export USER_ASSIGNED_CLIENT_ID="$(az identity show --name billing-identity --resource-group ${YOUR_RESOURCE_GROUP} --query clientId -otsv)" -``` - -Grant the managed identity you created to access your Azure Storage account: - -```shell -az role assignment create --role "Storage Blob Data Contributor" --assignee $USER_ASSIGNED_CLIENT_ID --scope /subscriptions/${YOUR_SUBSCRIPTION_ID}/resourceGroups/${YOUR_RESOURCE_GROUP}/providers/Microsoft.Storage/storageAccounts/${YOUR_STORAGE_ACCOUNT} -``` - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the billing component: - -```shell ---set "billing.enabled=true" ---set "billing.storage.provider=azure" ---set "billing.storage.azure.storageAccount=${SPACES_BILLING_STORAGE_ACCOUNT}" ---set "billing.storage.azure.container=${SPACES_BILLING_STORAGE_CONTAINER}" ---set "billing.storage.secretRef.name=" ---set controlPlanes.vector.serviceAccount.customAnnotations."azure\.workload\.identity/client-id"="${SPACES_BILLING_APP_ID}" ---set controlPlanes.vector.pod.customLabels."azure\.workload\.identity/use"="true" -``` - -Create a federated credential to establish trust between the managed identity -and your AKS OIDC provider: - -```shell -az identity federated-credential create \ - --name billing-federated-identity \ - --identity-name billing-identity \ - --resource-group ${YOUR_RESOURCE_GROUP} \ - --issuer ${AKS_OIDC_ISSUER} \ - --subject system:serviceaccount:${YOUR_CONTROL_PLANE_NAMESPACE}:vector -``` - - - - - -Upbound supports workload-identity configurations in GCP with IAM principal -identifiers or service account impersonation. - -#### IAM principal identifiers - -IAM principal identifiers allow you to grant permissions directly to -Kubernetes service accounts without additional annotation. Upbound recommends -this approach for ease-of-use and flexibility. - -First, enable Workload Identity Federation on your GKE cluster: - -```shell -gcloud container clusters update ${YOUR_CLUSTER_NAME} \ - --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ - --region=${YOUR_REGION} -``` - -Next, configure your Spaces installation with the Spaces Helm chart parameters: - -```shell ---set "billing.enabled=true" ---set "billing.storage.provider=gcp" ---set "billing.storage.gcp.bucket=${YOUR_BILLING_BUCKET}" ---set "billing.storage.secretRef.name=" -``` - -:::important -You **must** set the `billing.storage.secretRef.name` to an empty string to -enable workload identity for the billing component. -::: - -Grant the necessary permissions to your Kubernetes service account: - -```shell -gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ - --member="principalSet://iam.googleapis.com/projects/${YOUR_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${YOUR_PROJECT_ID}.svc.id.goog/attribute.kubernetes_namespace/${YOUR_CONTROL_PLANE_NAMESPACE}/attribute.kubernetes_service_account/vector" \ - --role="roles/storage.objectAdmin" -``` - -Enable uniform bucket-level access on your storage bucket: - -```shell -gcloud storage buckets update gs://${YOUR_BILLING_BUCKET} --uniform-bucket-level-access -``` - -#### Service account impersonation - -Service account impersonation allows you to link a Kubernetes service account to -a GCP service account. The Kubernetes service account assumes the permissions of -the GCP service account you specify. - -Enable workload id federation on your GKE cluster: - -```shell -gcloud container clusters update ${YOUR_CLUSTER_NAME} \ - --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ - --region=${YOUR_REGION} -``` - -Next, create a dedicated service account for your billing operations: - -```shell -gcloud iam service-accounts create billing-sa \ - --project=${YOUR_PROJECT_ID} -``` - -Grant storage permissions to the service account you created: - -```shell -gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ - --member="serviceAccount:billing-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" \ - --role="roles/storage.objectAdmin" -``` - -Link the Kubernetes service account to the GCP service account: - -```shell -gcloud iam service-accounts add-iam-policy-binding \ - billing-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com \ - --role="roles/iam.workloadIdentityUser" \ - --member="serviceAccount:${YOUR_PROJECT_ID}.svc.id.goog[${YOUR_CONTROL_PLANE_NAMESPACE}/vector]" -``` - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the billing component: - -```shell ---set "billing.enabled=true" ---set "billing.storage.provider=gcp" ---set "billing.storage.gcp.bucket=${YOUR_BILLING_BUCKET}" ---set "billing.storage.secretRef.name=" ---set controlPlanes.vector.serviceAccount.customAnnotations."iam\.gke\.io/gcp-service-account"="billing-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" -``` - - - -## Verify your configuration - -After you apply the configuration use `kubectl` to verify the service account -has the correct annotation: - -```shell -kubectl get serviceaccount vector -n ${YOUR_CONTROL_PLANE_NAMESPACE} -o yaml -``` - -Verify the `vector` pod is running: - -```shell -kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep vector -``` - -## Restart workload - - - -You must manually restart a workload's pod when you add the -`eks.amazonaws.com/role-arn key` annotation to the running pod's service -account. - -This restart enables the EKS pod identity webhook to inject the necessary -environment for using IRSA. - - - - - -You must manually restart a workload's pod when you add the workload identity annotations to the running pod's service account. - -This restart enables the workload identity webhook to inject the necessary -environment for using Azure workload identity. - - - - - -GCP workload identity doesn't require pod restarts after configuration changes. -If you do need to restart the workload, use the `kubectl` command to force the -component restart: - - - -```shell -kubectl rollout restart deployment vector -``` - - -## Use cases - - -Using workload identity authentication for billing eliminates the need for static -credentials in your cluster as well as the overhead of credential rotation. -These benefits are helpful in: - -* Resource usage tracking across teams/projects -* Cost allocation for multi-tenant environments -* Financial auditing requirements -* Capacity billing and resource optimization -* Automated billing workflows - -## Next steps - -Now that you have workload identity configured for the billing component, visit -the [Billing guide][billing-guide] for more information. - -Other workload identity guides are: -* [Backup and restore][backuprestore] -* [Shared Secrets][secrets] - -[billing-guide]: /spaces/howtos/self-hosted/billing -[backuprestore]: /spaces/howtos/self-hosted/workload-id/backup-restore-config -[secrets]: /spaces/howtos/self-hosted/workload-id/eso-config diff --git a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/workload-id/eso-config.md b/spaces_versioned_docs/version-v1.13/howtos/self-hosted/workload-id/eso-config.md deleted file mode 100644 index c1418c171..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/self-hosted/workload-id/eso-config.md +++ /dev/null @@ -1,503 +0,0 @@ ---- -title: Shared Secrets Workload ID -weight: 1 -description: Configure workload identity for Spaces Shared Secrets ---- -import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; - - - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary AWS credentials to your Kubernetes pod based on -a service account. Assigning IAM roles and service accounts allows the pod to -assume the IAM role dynamically and much more securely than static credentials. - -This guide walks you through creating an IAM trust role policy and applying it to your EKS -cluster for secret sharing with Kubernetes. - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary Azure credentials to your Kubernetes pod based on -a service account. Assigning managed identities and service accounts allows the pod to -authenticate with Azure resources dynamically and much more securely than static credentials. - -This guide walks you through creating a managed identity and federated credential for your AKS -cluster for shared secrets in your Space cluster. - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary GCP credentials to your Kubernetes pod based on -a service account. Assigning IAM roles and service accounts allows the pod to -access cloud resources dynamically and much more securely than static -credentials. - -This guide walks you through configuring workload identity for your GKE -cluster's Shared Secrets component. - - - -## Prerequisites - - -To set up a workload-identity, you'll need: - - -- A self-hosted Space cluster -- Administrator access in your cloud provider -- Helm and `kubectl` - - -## About the Shared Secrets component - - - - -The External Secrets Operator (ESO) runs in each control plane's host namespace as `external-secrets-controller`. It needs to access -your external secrets management service like AWS Secrets Manager. - -To configure your shared secrets workflow controller, you must: - -* Annotate the Kubernetes service account to associate it with a cloud-side - principal (such as an IAM role, service account, or enterprise application). The workload must then - use this service account. -* Label the workload (pod) to allow the injection of a temporary credential set, - enabling authentication. - - - - - -The External Secrets Operator (ESO) component runs in each control plane's host -namespace as `external-secrets-controller`. It synchronizes secrets from -external APIs into Kubernetes secrets. Shared secrets allow you to manage -credentials outside your Kubernetes cluster while making them available to your -application - - - - - -The External Secrets Operator (ESO) component runs in each control plane's host -namespace as `external-secrets-controller`. It synchronizes secrets from -external APIs into Kubernetes secrets. Shared secrets allow you to manage -credentials outside your Kubernetes cluster while making them available to your -application - - - -## Configuration - - - -Upbound supports workload-identity configurations in AWS with IAM Roles for -Service Accounts or EKS pod identity association. - -#### IAM Roles for Service Accounts (IRSA) - -With IRSA, you can associate a Kubernetes service account in an EKS cluster with -an AWS IAM role. Upbound authenticates workloads with that service account as -the IAM role using temporary credentials instead of static role credentials. -IRSA relies on AWS `AssumeRoleWithWebIdentity` `STS` to exchange OIDC ID tokens with -the IAM role's temporary credentials. IRSA uses the `eks.amazon.aws/role-arn` -annotation to link the service account and the IAM role. - -**Create an IAM role and trust policy** - -First, create an IAM role with appropriate permissions to access AWS Secrets Manager: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "secretsmanager:GetSecretValue", - "secretsmanager:DescribeSecret", - "ssm:GetParameter" - ], - "Resource": [ - "arn:aws:secretsmanager:${YOUR_REGION}:${YOUR_AWS_ACCOUNT_ID}:secret:${YOUR_SECRET_PREFIX}*", - "arn:aws:ssm:${YOUR_REGION}:${YOUR_AWS_ACCOUNT_ID}:parameter/${YOUR_PARAMETER_PREFIX}*" - ] - } - ] -} -``` - -You must configure the IAM role trust policy with the exact match for each -provisioned control plane. An example of a trust policy for a single control -plane is below: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:oidc-provider/${YOUR_OIDC_PROVIDER}" - }, - "Action": "sts:AssumeRoleWithWebIdentity", - "Condition": { - "StringEquals": { - ":aud": "sts.amazonaws.com" - }, - "StringLike": { - ":sub": "system:serviceaccount:*:external-secrets-controller" - } - } - } - ] -} -``` - -**Configure the EKS OIDC provider** - -Next, ensure your EKS cluster has an OIDC identity provider: - -```shell -eksctl utils associate-iam-oidc-provider --cluster ${YOUR_CLUSTER_NAME} --approve -``` - -**Apply the IAM role** - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the shared secrets component: - -```yaml ---set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."eks\.amazonaws\.com/role-arn"="arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_ESO_ROLE_NAME}" -``` - -This command allows the shared secrets component to authenticate with your -dedicated IAM role in your EKS cluster environment. - -#### EKS pod identities - -Upbound also supports EKS Pod Identity configuration. EKS Pod Identities allow -you to create a pod identity association with your Kubernetes namespace, a -service account, and an IAM role, which allows the EKS control plane to -automatically handle the credential exchange. - -**Create an IAM role** - -First, create an IAM role with appropriate permissions to access AWS Secrets Manager: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "secretsmanager:GetSecretValue", - "secretsmanager:DescribeSecret", - "ssm:GetParameter" - ], - "Resource": [ - "arn:aws:secretsmanager:${YOUR_AWS_REGION}:${YOUR_AWS_ACCOUNT_ID}:secret:${YOUR_SECRET_PREFIX}*", - "arn:aws:ssm:${YOUR_AWS_REGION}:${YOUR_AWS_ACCOUNT_ID}:parameter/${YOUR_PARAMETER_PREFIX}*" - ] - } - ] -} -``` - -**Configure your Space with Helm** - -When you install or upgrade your Space with Helm, add the shared secrets value: - -```shell -helm upgrade spaces spaces-helm-chart \ - --set "sharedSecrets.enabled=true" -``` - -**Create a Pod Identity Association** - -After Upbound provisions your control plane, create a Pod Identity Association -with the `aws` CLI: - -```shell -aws eks create-pod-identity-association \ - --cluster-name ${YOUR_CLUSTER_NAME} \ - --namespace ${YOUR_CONTROL_PLANE_NAMESPACE} \ - --service-account external-secrets-controller \ - --role-arn arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_ROLE_NAME} -``` - - - - - -Upbound supports workload-identity configurations in Azure with Azure's built-in -workload identity feature. - -First, enable the OIDC issuer and workload identity in your AKS cluster: - -```shell -az aks update --resource-group ${YOUR_RESOURCE_GROUP} --name ${YOUR_AKS_CLUSTER_NAME} --enable-oidc-issuer --enable-workload-identity -``` - -Next, find and store the OIDC issuer URL as an environment variable: - -```shell -export AKS_OIDC_ISSUER="$(az aks show --name ${YOUR_AKS_CLUSTER_NAME} --resource-group ${YOUR_RESOURCE_GROUP} --query "oidcIssuerProfile.issuerUrl" --output tsv)" -``` - -Create a new managed identity to associate with the shared secrets component: - -```shell -az identity create --name secrets-identity --resource-group ${YOUR_RESOURCE_GROUP} --location ${YOUR_LOCATION} -``` - -Retrieve the client ID and store it as an environment variable: - -```shell -export USER_ASSIGNED_CLIENT_ID="$(az identity show --name secrets-identity --resource-group ${YOUR_RESOURCE_GROUP} --query clientId -otsv)" -``` - -Grant the managed identity you created to access your Azure Storage account: - -```shell -az keyvault set-policy --name ${YOUR_KEY_VAULT_NAME} \ - --resource-group ${YOUR_RESOURCE_GROUP} \ - --object-id $(az identity show --name secrets-identity --resource-group ${YOUR_RESOURCE_GROUP} --query principalId -otsv) \ - --secret-permissions get list -``` - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the shared secrets component: - -```shell ---set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."azure\.workload\.identity/client-id"="${USER_ASSIGNED_CLIENT_ID}" ---set controlPlanes.sharedSecrets.pod.customLabels."azure\.workload\.identity/use"="true" -``` - -Next, create a federated credential to establish trust between the managed identity -and your AKS OIDC provider: - -```shell -az identity federated-credential create \ - --name secrets-federated-identity \ - --identity-name secrets-identity \ - --resource-group ${YOUR_RESOURCE_GROUP} \ - --issuer ${AKS_OIDC_ISSUER} \ - --subject system:serviceaccount:${YOUR_CONTROL_PLANE_NAMESPACE}:external-secrets-controller -``` - - - - - -Upbound supports workload-identity configurations in GCP with IAM principal -identifiers or service account impersonation. - -#### IAM principal identifiers - -IAM principal identifiers allow you to grant permissions directly to -Kubernetes service accounts without additional annotation. Upbound recommends -this approach for ease-of-use and flexibility. - -First, enable Workload Identity Federation on your GKE cluster: - -```shell -gcloud container clusters update ${YOUR_CLUSTER_NAME} \ - --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ - --region=${YOUR_REGION} -``` - -Next, grant the necessary permissions to your Kubernetes service account: - -```shell -gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ - --member="principalSet://iam.googleapis.com/projects/${YOUR_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${YOUR_PROJECT_ID}.svc.id.goog/attribute.kubernetes_namespace/${YOUR_CONTROL_PLANE_NAMESPACE}/attribute.kubernetes_service_account/external-secrets-controller" \ - --role="roles/secretmanager.secretAccessor" -``` - -#### Service account impersonation - -Service account impersonation allows you to link a Kubernetes service account to -a GCP service account. The Kubernetes service account assumes the permissions of -the GCP service account you specify. - -Enable workload id federation on your GKE cluster: - -```shell -gcloud container clusters update ${YOUR_CLUSTER_NAME} \ - --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ - --region=${YOUR_REGION} -``` - -Next, create a dedicated service account for your secrets operations: - -```shell -gcloud iam service-accounts create secrets-sa \ - --project=${YOUR_PROJECT_ID} -``` - -Grant secret access permissions to the service account you created: - -```shell -gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ - --member="serviceAccount:secrets-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" \ - --role="roles/secretmanager.secretAccessor" -``` - -Link the Kubernetes service account to the GCP service account: - -```shell -gcloud iam service-accounts add-iam-policy-binding \ - secrets-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com \ - --role="roles/iam.workloadIdentityUser" \ - --member="serviceAccount:${YOUR_PROJECT_ID}.svc.id.goog[${YOUR_CONTROL_PLANE_NAMESPACE}/external-secrets-controller]" -``` - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the shared secrets component: - -```shell ---set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."iam\.gke\.io/gcp-service-account"="secrets-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" -``` - - - -## Verify your configuration - -After you apply the configuration use `kubectl` to verify the service account -has the correct annotation: - -```shell -kubectl get serviceaccount external-secrets-controller -n ${YOUR_CONTROL_PLANE_NAMESPACE} -o yaml -``` - - - -Verify the `external-secrets` pod is running correctly: - -```shell -kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep external-secrets -``` - - - - - -Verify the External Secrets Operator pod is running correctly: - -```shell -kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep external-secrets -``` - - - - - -Verify the `external-secrets` pod is running correctly: - -```shell -kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep external-secrets -``` - - - -## Restart workload - - - -You must manually restart a workload's pod when you add the -`eks.amazonaws.com/role-arn key` annotation to the running pod's service -account. - -This restart enables the EKS pod identity webhook to inject the necessary -environment for using IRSA. - - - - - -You must manually restart a workload's pod when you add the workload identity annotations to the running pod's service account. - -This restart enables the workload identity webhook to inject the necessary -environment for using Azure workload identity. - - - - - -GCP workload identity doesn't require pod restarts after configuration changes. -If you do need to restart the workload, use the `kubectl` command to force the -component restart: - - - -```shell -kubectl rollout restart deployment external-secrets -``` - -## Use cases - - - - -Shared secrets with workload identity eliminates the need for static credentials -in your cluster. These benefits are particularly helpful in: - -* Secure application credentials management -* Database connection string storage -* API token management -* Compliance with secret rotation security standards -* Multi-environment configuration with centralized secret management - - - - - -Using workload identity authentication for shared secrets eliminates the need for static -credentials in your cluster as well as the overhead of credential rotation. -These benefits are particularly helpful in: - -* Secure application credentials management -* Database connection string storage -* API token management -* Compliance with secret rotation security standards - - - - - -Configuring the external secrets operator with workload identity eliminates the need for -static credentials in your cluster and the overhead of credential rotation. -These benefits are particularly helpful in: - -* Secure application credentials management -* Database connection string storage -* API token management -* Compliance with secret rotation security standards - - - -## Next steps - -Now that you have workload identity configured for the shared secrets component, visit -the [Shared Secrets][eso-guide] guide for more information. - -Other workload identity guides are: -* [Backup and restore][backuprestore] -* [Billing][billing] - -[eso-guide]: /spaces/howtos/secrets-management -[backuprestore]: /spaces/howtos/self-hosted/workload-id/backup-restore-config -[billing]: /spaces/howtos/self-hosted/workload-id/billing-config diff --git a/spaces_versioned_docs/version-v1.13/howtos/simulations.md b/spaces_versioned_docs/version-v1.13/howtos/simulations.md deleted file mode 100644 index 26cb0e657..000000000 --- a/spaces_versioned_docs/version-v1.13/howtos/simulations.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -title: Simulate changes to your Control Plane Projects -sidebar_position: 100 -description: Use the Up CLI to mock operations before deploying to your environments. ---- - -:::info API Version Information -This guide covers Simulations, available in v1.10+ (GA since v1.13). version-specific availability and features, see the . - -For API specifications on the Simulations CRD, see the [Spaces API Reference](../../../reference/apis/spaces-api/). -::: - -:::important -The Simulations feature is in private preview. more information, [reach out to Upbound][reach-out-to-upbound]. -::: - -Control plane simulations allow you to preview changes to your resources before -applying them to your control planes. Like a plan or dry-run operation, -simulations expose the impact of updates to compositions or claims without -changing your actual resources. - -A control plane simulation creates a temporary copy of your control plane and -returns a preview of the desired changes. The simulation change plan helps you -reduce the risk of unexpected behavior based on your changes. - -## Simulation benefits - -Control planes are dynamic systems that automatically reconcile resources to -match your desired state. Simulations provide visibility into this -reconciliation process by showing: - - -* New resources to create -* Existing resources to change -* Existing resources to delete -* How configuration changes propagate through the system - -These insights are crucial when planning complex changes or upgrading Crossplane -packages. - -## Requirements - -Simulations are available to select customers on Upbound Cloud with Team -Tier or higher. more information, [reach out to Upbound][reach-out-to-upbound-1]. - -## How to simulate your control planes - -Before you start a simulation, build your project and use the `up -project run` command to run your control plane. - -Use the `up project simulate` command with your control plane name to start the -simulation: - -```ini {copy-lines="all"} -up project simulate --complete-after=60s --terminate-on-finish -``` - -The `complete-after` flag determines how long to run the simulation before it completes and calculates the results. Depending on the change, a simulation may not complete within your defined interval leaving unaffected resources as `unchanged`. - -The `terminate-on-finish` flag terminates the simulation after the time -you set - deleting the control plane that ran the simulation. - -At the end of your simulation, your CLI returns: -* A summary of the resources created, modified, or deleted -* Diffs for each resource affected - -## View your simulation in the Upbound Console -You can also view your simulation results in the Upbound Console: - -1. Navigate to your base control plane in the Upbound Console -2. Select the "Simulations" tab in the menu -3. Select a simulation object for a change list of all - resources affected. - -The Console provides visual indications of changes: - -- Created Resources: Marked with green -- Modified Resources: Marked with yellow -- Deleted Resources: Marked with red -- Unchanged Resources: Displayed in gray - -![Upbound Console Simulation](/img/simulations.png) - -## Considerations - -Simulations is a **private preview** feature. - -Be aware of the following limitations: - -- Simulations can't predict the exact behavior of external systems due to the - complexity and non-deterministic reconciliation pattern in Crossplane. - -- The only completion criteria for a simulation is time. Your simulation may not - receive a conclusive result within that interval. Upbound recommends the - default `60s` value. - -- Providers don't run in simulations. Simulations can't compose resources that - rely on the status of Managed Resources. - - -The Upbound team is working to improve these limitations. Your feedback is always appreciated. - -## Next steps - -For more information, follow the [tutorial][tutorial] on Simulations. - - -[tutorial]: /manuals/cli/howtos/simulations -[reach-out-to-upbound]: https://www.upbound.io/contact-us -[reach-out-to-upbound-1]: https://www.upbound.io/contact-us diff --git a/spaces_versioned_docs/version-v1.13/overview/_category_.json b/spaces_versioned_docs/version-v1.13/overview/_category_.json deleted file mode 100644 index 54bb16430..000000000 --- a/spaces_versioned_docs/version-v1.13/overview/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Overview", - "position": 0 -} diff --git a/spaces_versioned_docs/version-v1.13/overview/index.md b/spaces_versioned_docs/version-v1.13/overview/index.md deleted file mode 100644 index 7b79f6e44..000000000 --- a/spaces_versioned_docs/version-v1.13/overview/index.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Spaces Overview -sidebar_position: 0 ---- - -# Upbound Spaces - -Welcome to the Upbound Spaces documentation. This section contains comprehensive documentation for Spaces API and Spaces operations across all supported versions (v1.9 through v1.15). - -## Get Started - -- **[Concepts](/spaces/concepts/control-planes)** - Core concepts for Spaces -- **[How-To Guides](/spaces/howtos/auto-upgrade)** - Step-by-step guides for operating Spaces -- **[API Reference](/spaces/reference/)** - API specifications and resources diff --git a/spaces_versioned_docs/version-v1.13/reference/_category_.json b/spaces_versioned_docs/version-v1.13/reference/_category_.json deleted file mode 100644 index 4a6a139c4..000000000 --- a/spaces_versioned_docs/version-v1.13/reference/_category_.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "label": "Spaces API", - "position": 1, - "collapsed": true -} diff --git a/spaces_versioned_docs/version-v1.13/reference/index.md b/spaces_versioned_docs/version-v1.13/reference/index.md deleted file mode 100644 index 5e68b0768..000000000 --- a/spaces_versioned_docs/version-v1.13/reference/index.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: Spaces API Reference -description: Documentation for the Spaces API resources (v1.15 - Latest) -sidebar_position: 1 ---- -import CrdDocViewer from '@site/src/components/CrdViewer'; - - -This page documents the Custom Resource Definitions (CRDs) for the Spaces API. - - -## Control Planes -### Control Planes - - -## Observability -### Shared Telemetry Configs - - -## `pkg` -### Controller Revisions - - -### Controller Runtime Configs - - -### Controllers - - -### Remote Configuration Revisions - - -### Remote Configurations - - -## Policy -### Shared Upbound Policies - - -## References -### Referenced Objects - - -## Scheduling -### Environments - - -## Secrets -### Shared External Secrets - - -### Shared Secret Stores - - -## Simulations - - -## Spaces Backups -### Backups - - -### Backup Schedules - - -### Shared Backup Configs - - -### Shared Backups - - -### Shared Backup Schedules - diff --git a/spaces_versioned_docs/version-v1.14/concepts/_category_.json b/spaces_versioned_docs/version-v1.14/concepts/_category_.json deleted file mode 100644 index 4b8667e29..000000000 --- a/spaces_versioned_docs/version-v1.14/concepts/_category_.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "label": "Concepts", - "position": 2, - "collapsed": true -} - - diff --git a/spaces_versioned_docs/version-v1.14/concepts/control-planes.md b/spaces_versioned_docs/version-v1.14/concepts/control-planes.md deleted file mode 100644 index 7066343de..000000000 --- a/spaces_versioned_docs/version-v1.14/concepts/control-planes.md +++ /dev/null @@ -1,227 +0,0 @@ ---- -title: Control Planes -weight: 1 -description: An overview of control planes in Upbound ---- - - -Control planes in Upbound are fully isolated Crossplane control plane instances that Upbound manages for you. This means: - -- the underlying lifecycle of infrastructure (compute, memory, and storage) required to power your instance. -- scaling of the infrastructure. -- the maintenance of the core Crossplane components that make up a control plane. - -This lets users focus on building their APIs and operating their control planes, while Upbound handles the rest. Each control plane has its own dedicated API server connecting users to their control plane. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). - -For detailed information about Control Plane API specifications and CRD fields, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: - -## Control plane architecture - -![Managed Control Plane Architecture](/img/mcp.png) - -Along with underlying infrastructure, Upbound manages the Crossplane system components. You don't need to manage the Crossplane API server or core resource controllers because Upbound manages your control plane lifecycle from creation to deletion. - -### Crossplane API - -Each control plane offers a unified endpoint. You interact with your control plane through Kubernetes and Crossplane API calls. Each control plane runs a Kubernetes API server to handle API requests. You can make API calls in the following ways: - -- Direct calls: HTTP/gRPC -- Indirect calls: the up CLI, Kubernetes clients such as kubectl, or the Upbound Console. - -Like in Kubernetes, the API server is the hub for all communication for the control plane. All internal components such as system processes and provider controllers act as clients of the API server. - -Your API requests tell Crossplane your desired state for the resources your control plane manages. Crossplane attempts to constantly maintain that state. Crossplane lets you configure objects in the API either imperatively or declaratively. - -### Crossplane versions and features - -Upbound automatically upgrades Crossplane system components on control planes to new Crossplane versions for updated features and improvements in the open source project. With [automatic upgrades][automatic-upgrades], you choose the cadence that Upbound automatically upgrades the system components in your control plane. You can also choose to manually upgrade your control plane to a different Crossplane version. - -For detailed information on versions and upgrades, refer to the [release notes][release-notes] and the automatic upgrade documentation. If you don't enroll a control plane in a release channel, Upbound doesn't apply automatic upgrades. - -Features considered "alpha" in Crossplane are by default not supported in a control plane unless otherwise specified. - -### Hosting environments - -Every control plane in Upbound belongs to a [control plane group][control-plane-group]. Control plane groups are a logical grouping of one or more control planes with shared objects (such as secrets or backup configuration). Every group resides in a [Space][space] in Upbound, which are hosting environments for control planes. - -Think of a Space as being conceptually the same as an AWS, Azure, or GCP region. Regardless of the Space type you run a control plane in, the core experience is identical. - -## Management - -### Create a control plane - -You can create a new control plane from the Upbound Console, [up CLI][up-cli], or with Kubernetes clients such as `kubectl`. - - - - - -To use the CLI, run the following: - -```shell -up ctp create -``` - -To learn more about control plane-related commands in `up`, go to the [CLI reference][cli-reference] documentation. - - - -You can create and manage control planes declaratively in Upbound. Before you -begin, ensure you're logged into Upbound and set the correct context: - -```bash -up login -# Example: acmeco/upbound-gcp-us-west-1/default -up ctx ${yourOrganization}/${yourSpace}/${yourGroup} -```` - -```yaml -#controlplane-a.yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - name: controlplane-a -spec: - crossplane: - autoUpgrade: - channel: Rapid -``` - -```bash -kubectl apply -f controlplane-a.yaml -``` - - - - - -### Connect directly to your control plane - -Each control plane offers a unified endpoint. You interact with your control plane through Kubernetes and Crossplane API calls. Each control plane runs a Kubernetes API server to handle API requests. - -You can connect to a control plane's API server directly via the up CLI. Use the [`up ctx`][up-ctx] command to set your kubeconfig's current context to a control plane: - -```shell -# Example: acmeco/upbound-gcp-us-west-1/default/ctp1 -up ctx ${yourOrganization}/${yourSpace}/${yourGroup}/${yourControlPlane} -``` - -To disconnect from your control plane and revert your kubeconfig's current context to the previous entry, run the following: - -```shell -up ctx .. -``` - -You can also generate a `kubeconfig` file for a control plane with [`up ctx -f`][up-ctx-f]. - -```shell -up ctx ${yourOrganization}/${yourSpace}/${yourGroup}/${yourControlPlane} -f - > ctp-kubeconfig.yaml -``` - -:::tip -To learn more about how to use `up ctx` to navigate different contexts in Upbound, read the [CLI documentation][cli-documentation]. -::: - -## Configuration - -When you create a new control plane, Upbound provides you with a fully isolated instance of Crossplane. Configure your control plane by installing packages that extend its capabilities, like to create and manage the lifecycle of new types of infrastructure resources. - -You're encourage to install any available Crossplane package type (Providers, Configurations, Functions) available in the [Upbound Marketplace][upbound-marketplace] on your control planes. - -### Install packages - -Below are a couple ways to install Crossplane packages on your control plane. - - - - - - -Use the `up` CLI to install Crossplane packages from the [Upbound Marketplace][upbound-marketplace-1] on your control planes. Connect directly to your control plane via `up ctx`. Then, to install a provider: - -```shell -up ctp provider install xpkg.upbound.io/upbound/provider-family-aws -``` - -To install a Configuration: - -```shell -up ctp configuration install xpkg.upbound.io/upbound/platform-ref-aws -``` - -To install a Function: - -```shell -up ctp function install xpkg.upbound.io/crossplane-contrib/function-kcl -``` - - -You can use kubectl to directly apply any Crossplane manifest. Below is an example for installing a Crossplane provider: - -```yaml -cat < - - - -For production-grade scenarios, it's recommended you configure your control plane declaratively via Git plus a Continuous Delivery (CD) Engine such as Argo. guidance on this topic, read [GitOps with control planes][gitops-with-control-planes]. - - - - - - -### Configure Crossplane ProviderConfigs - -#### ProviderConfigs with OpenID Connect - -Use OpenID Connect (`OIDC`) to authenticate to Upbound control planes without credentials. OIDC lets your control plane exchange short-lived tokens directly with your cloud provider. Read how to [connect control planes to external services][connect-control-planes-to-external-services] to learn more. - -#### Generic ProviderConfigs - -The Upbound Console doesn't allow direct editing of ProviderConfigs that don't support `Upbound` authentication. To edit these ProviderConfigs on your control plane, connect to the control plane directly by following the instructions in the previous section and using `kubectl`. - -### Configure secrets - -Upbound gives users the ability to configure the synchronization of secrets from external stores into control planes. Configure this capability at the group-level, explained in the [Spaces documentation][spaces-documentation]. - -### Configure backups - -Upbound gives users the ability to configure backup schedules, take impromptu backups, and conduct self-service restore operations. Configure this capability at the group-level, explained in the [Spaces documentation][spaces-documentation-1]. - -### Configure telemetry - - -Upbound gives users the ability to configure the collection of telemetry (logs, metrics, and traces) in their control planes. Using Upbound's built-in [OTEL][otel] support, you can stream this data out to your preferred observability solution. Configure this capability at the group-level, explained in the [Spaces documentation][spaces-documentation-2]. - - - -[automatic-upgrades]: /spaces/howtos/auto-upgrade -[release-notes]: https://github.com/upbound/universal-crossplane/releases -[control-plane-group]: /spaces/concepts/groups -[space]: /spaces/overview -[up-cli]: /reference/cli-reference -[cli-reference]: /reference/cli-reference -[up-ctx]: /reference/cli-reference -[up-ctx-f]: /reference/cli-reference -[cli-documentation]: /manuals/cli/concepts/contexts -[upbound-marketplace]: https://marketplace.upbound.io -[upbound-marketplace-1]: https://marketplace.upbound.io -[gitops-with-control-planes]: /spaces/howtos/cloud-spaces/gitops -[connect-control-planes-to-external-services]: /manuals/platform/howtos/oidc -[spaces-documentation]: /spaces/howtos/secrets-management -[spaces-documentation-1]: /spaces/howtos/backup-and-restore -[otel]: https://otel.com -[spaces-documentation-2]: /spaces/howtos/observability diff --git a/spaces_versioned_docs/version-v1.14/concepts/deployment-modes.md b/spaces_versioned_docs/version-v1.14/concepts/deployment-modes.md deleted file mode 100644 index f5e718f88..000000000 --- a/spaces_versioned_docs/version-v1.14/concepts/deployment-modes.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: Deployment Modes -sidebar_position: 10 -description: An overview of deployment modes for Spaces ---- - -Upbound Spaces can be deployed and used in a variety of modes: - -- **Cloud Spaces:** Multi-tenant Upbound-hosted, Upbound-managed Space environment. Cloud Spaces provide a typical SaaS experience. -- **[Dedicated Spaces][dedicated-spaces]:** Single-tenant Upbound-hosted, Upbound-managed Space environment. Dedicated Spaces provide a SaaS experience, with additional isolation guarantees that your workloads run in a fully isolated context. -- **[Managed Spaces][managed-spaces]:** Single-tenant customer-hosted, Upbound-managed Space environment. Managed Spaces provide a SaaS-like experience, with additional guarantees of all hosting infrastructure being served from your own cloud account. -- **[Self-Hosted Spaces][self-hosted-spaces]:** Single-tenant customer-hosted, customer-managed Space environment. This is a fully self-hosted, self-managed software experience for using Spaces. Upbound delivers the Spaces software and you run it yourself. - -The Upbound platform uses a federated model to connect each Space back to a -central service called the [Upbound Console][console], which is deployed and -managed by Upbound. - -By default, customers have access to a set of Cloud Spaces. - -## Supported clouds - -You can use host Upbound Spaces on Amazon Web Services (AWS), Microsoft Azure, -and Google Cloud Platform (GCP). Regardless of the hosting platform, you can use -Spaces to deploy control planes that manage the lifecycle of your resources. - -## Supported regions - -This table lists the cloud service provider regions supported by Upbound. - -### GCP - -| Region | Location | -| --- | --- | -| `us-west-1` | Western US (Oregon) -| `us-central-1` | Central US (Iowa) -| `eu-west-3` | Eastern Europe (Frankfurt) - -### AWS - -| Region | Location | -| --- | --- | -| `us-east-1` | Eastern US (Northern Virginia) - -### Azure - -| Region | Location | -| --- | --- | -| `us-east-1` | Eastern US (Iowa) - -[dedicated-spaces]: /spaces/howtos/cloud-spaces/dedicated-spaces-deployment -[managed-spaces]: /spaces/howtos/self-hosted/managed-spaces-deployment -[self-hosted-spaces]: /spaces/howtos/self-hosted/self-hosted-spaces-deployment -[console]: /manuals/console/upbound-console/ diff --git a/spaces_versioned_docs/version-v1.14/concepts/groups.md b/spaces_versioned_docs/version-v1.14/concepts/groups.md deleted file mode 100644 index d2ccacdb3..000000000 --- a/spaces_versioned_docs/version-v1.14/concepts/groups.md +++ /dev/null @@ -1,115 +0,0 @@ ---- -title: Control Plane Groups -sidebar_position: 2 -description: An introduction to the Control Plane Groups in Upbound -plan: "enterprise" ---- - - - -In Upbound, Control Plane Groups (or just, 'groups') are a logical grouping of one or more control planes with shared resources like [secrets][secrets] or [backups][backups]. It's a mechanism for isolating these groups of resources within a single [Space][space]. All role-based access control in Upbound happens at the control plane group-level. - -## When to use multiple groups - -You should use groups in environments where there's a need to have Crossplane manage infrastructure across multiple cloud accounts or projects. For users who only need to deploy and manage resources in a couple cloud accounts, you shouldn't need to think about groups at all. - -Groups are a way to divide access in Upbound between multiple teams. Think of a group as being analogous to a Kubernetes _namespace_. - -## The 'default' group - -Every Cloud Space in Upbound has a group named _default_ available. - -## Working with groups - -### View groups - -You can list groups in a Space using: - -```shell -up group list -``` - -If you're operating in a single-tenant Space and have access to the underlying cluster, you can list namespaces that have the group label: - -```shell -kubectl get namespaces -l spaces.upbound.io/group=true -``` - -### Set the group for a request - -Several commands in _up_ have a group context. To set the group for a request, use the `--group` flag: - -```shell -up ctp list --group=team1 -``` -```shell -up ctp create new-ctp --group=team2 -``` - -### Set the group preference - -The _up_ CLI operates upon a single [Upbound context][upbound-context]. Whatever context gets set is then used as the preference for other commands. An Upbound context is capable of pointing at a variety of altitudes: - -1. A Space in Upbound -2. A group within a Space -3. a control plane within a group - -To set the group preference, use `up ctx` to choose a group as your preferred Upbound context. For example: - -```shell -# This sets the context for the up CLI to the default group in an Upbound-managed Cloud Space (gcp-us-west-1) for an organization called 'acmeco' -up ctx acmeco/upbound-gcp-us-west-1/default/ -``` - -### Create a group - -To create a group, login to Upbound and set your context to your desired Space: - -```shell -up login -up ctx '/' -# Example: up ctx acmeco/upbound-gcp-us-west-1 -``` - - -Create a group: - -```shell -up group create my-new-group -``` - -### Delete a group - -To delete a group, login to Upbound and set your context to your desired Space: - -```shell -up login -up ctx '/' -# Example: up ctx acmeco/upbound-gcp-us-west-1 -``` - -Delete a group: - -```shell -up group delete my-new-group -``` - -### Protected groups - -Once a control plane gets created in a group, Upbound enforces a protection policy on the group. Upbound prevents accidental deletion of the group. To delete a group that has control planes in it, you should first delete all control planes in the group. - -## Groups in the context of single-tenant Spaces - -Upbound offers a variety of deployment models to use the product. If you deploy your own single-tenant Upbound Space (whether connected or disconnected), you're self-hosting Upbound software in a Kubernetes cluster. In these environments, a control plane group maps to a corresponding namespace in the cluster which hosts the Space. - -Most Kubernetes clusters come with some set of predefined namespaces. Because a group maps to a corresponding Kubernetes namespace, whenever a group gets created, there too must be a Kubernetes namespace accordingly. When the Spaces software is newly installed, no groups exist. You _can_ elevate a Kubernetes namespace to become a group by doing the following: - -1. Creating a group with the same name as a preexisting Kubernetes namespace -2. Creating a control plane in a preexisting Kubernetes namespace -3. Labeling a Kubernetes namespace with the label `spaces.upbound.io/group=true` - - -[secrets]: /spaces/howtos/secrets-management -[backups]: /spaces/howtos/self-hosted/workload-id/backup-restore-config/ -[space]: /spaces/overview -[upbound-context]: /manuals/cli/concepts/contexts diff --git a/spaces_versioned_docs/version-v1.14/howtos/_category_.json b/spaces_versioned_docs/version-v1.14/howtos/_category_.json deleted file mode 100644 index d3a8547aa..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/_category_.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "label": "How-tos", - "position": 3, - "collapsed": true -} - - diff --git a/spaces_versioned_docs/version-v1.14/howtos/api-connector.md b/spaces_versioned_docs/version-v1.14/howtos/api-connector.md deleted file mode 100644 index a14468f52..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/api-connector.md +++ /dev/null @@ -1,413 +0,0 @@ ---- -title: API Connector -weight: 90 -description: Connect Kubernetes clusters to remote Crossplane control planes for resource synchronization -aliases: - - /api-connector - - /concepts/api-connector ---- -:::info API Version Information -This guide covers API Connector, a preview feature available in all supported versions (v1.9-v1.14+). - -For related API specifications and available resources, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: - -:::warning -API Connector is currently in **Preview**. The feature is under active -development and subject to breaking changes. Use for testing and evaluation -purposes only. -::: - -API Connector enables seamless integration between Kubernetes application -clusters consuming APIs and remote Crossplane control planes providing and -reconciling APIs. - -You can use the API Connector to decouple where Crossplane is running (for -example in an Upbound control plane), and where APIs are consumed -(for example in an existing Kubernetes cluster). This gives you flexibility and -consistency in your control plane operations. - - - -Unlike the [Control Plane Connector](ctp-connector.md) which offers only -coarse-grained connectivity between app clusters and a control plane, API -connector offers fine-grained configuration of which APIs get offered along with -multi-cluster connectivity. - -## Architecture overview - -![API Connector Architecture](/img/api-connector.png) - -API Connector uses a **provider-consumer** model: - -- **Provider control plane**: The Upbound control plane that provides APIs and manages infrastructure. -- **Consumer cluster**: Any Kubernetes cluster where its users wants to use APIs provided by the provider control plane, without having to run Crossplane. API connector gets installed in the consumer cluster, and bidirectionally syncs API objects to the provider. - -### Key components - -**Custom Resource Definitions (CRDs)**: - - -- `ClusterConnection`: Establishes a connection from the consumer to the provider cluster. Pulls bindable CRD APIs from the provider into the consumer cluster for use. - -- `ClusterAPIBinding`: Instructs API connector to sync all API objects cluster-wide with a given API group to a given provider cluster. -- `APIBinding`: Namespaced version of `ClusterAPIBinding`. Instructs API connector to sync API objects within a given namespace and with a given API group to a given provider cluster. - - -## Prerequisites - -Before using API Connector, ensure: - -1. **Consumer cluster** has network access to the provider control plane -1. You have an license to use API connector. If you are unsure, [contact Upbound][contact] or your sales representative. - -This guide walks through how to automate connecting your cluster to an Upbound -control plane. You can also manually configure the API Connector. - -## Publishing APIs in the provider cluster - - - - -First, log in to your provider control plane, and choose which CRD APIs you want -to make accessible to the consumer cluster's. API connector only syncs -these "bindable" CRDs. - - - - - - -Use the `up` CLI to login: - -```bash -up login -``` - -Connect to your control plane: - -```bash -up ctx -``` - -Check what CRDs are available: - -```bash -kubectl get crds -``` - - -Label all CRDs you want to publish with the bindable label: - - -```bash -kubectl label crd 'connect.upbound.io/bindable'='true' --overwrite -``` - - - - -Change context to the provider cluster: -```bash -kubectl config set-context -``` - -Check what CRDs are available: -```bash -kubectl get crds -``` - - -Label all CRDs you want to publish with the bindable label - -```bash -kubectl label crd 'connect.upbound.io/bindable'='true' --overwrite -``` - - - -## Installation - - - - -The up CLI provides the simplest installation method with automatic -configuration: - -Make sure the current Kubeconfig context is set to the **provider control plane** -```bash -up ctx - -up controlplane api-connector install --consumer-kubeconfig [OPTIONS] -``` - -The command: -1. creates a Robot account (named ``) in the Upbound Cloud organization ``, -1. Gives the created robot account `admin` permissions to the provider control plane `` -1. Generates a JWT token for the robot account, and stores it in a Kubernetes Secret in the consumer cluster. -1. Installs the API connector Helm chart in the consumer cluster. -1. Creates a `ClusterConnection` object in the consumer cluster, referring to the newly generated Secret, so that API connector can authenticate successfully to the provider control plane. -1. API connector pulls all published CRDs from the previous step into the consumer cluster. - -**Example**: -```bash -up controlplane api-connector install \ - --consumer-kubeconfig ~/.kube/config \ - --consumer-context my-cluster \ - --upbound-token -``` - -This command uses provided token to authenticate with the **Provider control plane** -and create a `ClusterConnection` resource in the **Consumer cluster** to connect to the -**Provider control plane**. - -**Key Options**: -- `--consumer-kubeconfig`: Path to consumer cluster kubeconfig (required) -- `--consumer-context`: Context name for consumer cluster (required) -- `--name`: Custom name for connection resources (optional) -- `--upbound-token`: API token for authentication (optional) -- `--upgrade`: Upgrade existing installation (optional) -- `--version`: Specific version to install (optional) - - - - -For manual installation or custom configurations: - -```bash -helm upgrade --install api-connector oci://xpkg.upbound.io/spaces-artifacts/api-connector \ - --namespace upbound-system \ - --create-namespace \ - --version \ - --set consumerClusterDisplayName= -``` - -### Authentication methods - -API Connector supports two authentication methods: - - - - -For Upbound Spaces integration: - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: spaces-secret - namespace: upbound-system -type: Opaque -stringData: - token: - organization: - spacesBaseURL: - controlPlaneGroupName: - controlPlaneName: -``` - - - -For direct cluster access: - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: provider-kubeconfig - namespace: upbound-system -type: Opaque -data: - kubeconfig: -``` - - - - -### Connection setup - -Create a `ClusterConnection` to establish connectivity: - - - - -```yaml -apiVersion: connect.upbound.io/v1alpha1 -kind: ClusterConnection -metadata: - name: spaces-connection - namespace: upbound-system -spec: - secretRef: - kind: UpboundRobotToken - name: spaces-secret - namespace: upbound-system - crdManagement: - pullBehavior: Pull -``` - - - - -```yaml -apiVersion: connect.upbound.io/v1alpha1 -kind: ClusterConnection -metadata: - name: provider-connection - namespace: upbound-system -spec: - secretRef: - kind: KubeConfig - name: provider-kubeconfig - namespace: upbound-system - crdManagement: - pullBehavior: Pull -``` - - - - - - - -### Configuration - -Bind APIs to make them available in your consumer cluster: - -```yaml -apiVersion: connect.upbound.io/v1alpha1 -kind: ClusterAPIBinding -metadata: - name: -spec: - connectionRef: - kind: ClusterConnection - name: # Or --name value -``` - - - - -The `ClusterAPIBinding` name must match the **Resource.Group** (name of the CustomResourceDefinition) of the CRD you want to bind. - - - - -## Usage example - -After configuration, you can create API objects (in the consumer cluster) that -will be synchronized to the provider cluster: - -```yaml -apiVersion: nop.example.org/v1alpha1 -kind: NopResource -metadata: - name: my-resource - namespace: default -spec: - coolField: "Synchronized resource" - compositeDeletePolicy: Foreground -``` - -Verify the resource status: - -```bash -kubectl get nopresource my-resource -o yaml - -``` -When the `APIBound=True` condition is present, it means that the API object has -been synced to the provider cluster, and is being reconciled there. Whenever the -API object in the provider cluster gets status updates (for example -`Ready=True`), that status is synced back to the consumer cluster. - -Switch contexts to the provider cluster to see the API object being created: - -```bash -up ctx -# or kubectl config set-context -``` - -```bash -kubectl get nopresource my-resource -o yaml -``` - -Note that in the provider cluster, the API object is labeled with information on -where the API object originates from, and `connect.upbound.io/managed=true`. - -## Monitoring and troubleshooting - -### Check connection status - -```bash -kubectl get clusterconnection -``` - -Expected output: -``` -NAME STATUS MESSAGE -spaces-connection Ready Provider controlplane is available -``` - -### View available APIs - -```bash -kubectl get clusterconnection spaces-connection -o jsonpath='{.status.offeredAPIs[*].name}' -``` - -### Check API binding status - -```bash -kubectl get clusterapibinding -``` - -### Debug resource synchronization - -```bash -kubectl describe -``` - -## Removal - -### Using the up CLI - -```bash -up controlplane api-connector uninstall \ - --consumer-kubeconfig ~/.kube/config \ - --all -``` - -The `--all` flag removes all resources including connections and secrets. -Without the flag, only runtime related resources won't be removed. - -:::note -Uninstall doesn't remove any API objects in the provider control plane. If you -want to clean up all API objects there, delete all API objects from the consumer -cluster before API connector uninstallation, and wait for the objects to get -deleted. -::: - - -### Using Helm - -```bash -helm uninstall api-connector -n upbound-system -``` - -## Limitations - -- **Preview feature**: Subject to breaking changes. Not yet production grade. -- **CRD updates**: CRDs are pulled once but not automatically updated. If multiple Crossplane clusters offer the same CRD API, API changes must be synchronized out of band, for example using a [Crossplane Configuration](https://docs.crossplane.io/latest/packages/). -- **Network requirements**: Consumer cluster must have direct network access to provider cluster. -- **Wide permissions needed in consumer cluster**: Because the API connector doesn't know up front the names of the APIs it needs to reconcile, it currently runs with full "root" privileges in the consumer cluster. - -- **Connector polling**: API Connector checks for drift between the consumer and provider cluster - periodically through polling. The poll interval can be changed with the `pollInterval` Helm value. - - -## Advanced configuration - -### Multiple connections - -You can connect to multiple provider clusters simultaneously by creating multiple `ClusterConnection` resources with different names and configurations. - -[contact]: https://www.upbound.io/contact-us diff --git a/spaces_versioned_docs/version-v1.14/howtos/auto-upgrade.md b/spaces_versioned_docs/version-v1.14/howtos/auto-upgrade.md deleted file mode 100644 index 249056fb4..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/auto-upgrade.md +++ /dev/null @@ -1,131 +0,0 @@ ---- -title: Automatically upgrade control planes -sidebar_position: 50 -description: How to configure automatic upgrades of Crossplane in a control plane -plan: "standard" ---- - - - -Upbound Spaces can automatically upgrade the version of Upbound Crossplane in managed control plane instances. You can edit the `spec.crossplane.autoUpgrade` field in your `ControlPlane` specification with the available release channels below. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). The `spec.crossplane.autoUpgrade` field has been available since v1.9. - -For ControlPlane API specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: - -| Channel | Description | Example | -|------------|-----------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------| -| **None** | Disables auto upgrades. | _Uses version specified in `spec.crossplane.version`._ | -| **Patch** | Upgrades to the latest supported patch release. | _Control plane version 1.12.2-up.2 auto upgrades to 1.12.3-up.1 upon release._ | -| **Stable** | Default setting. Upgrades to the latest supported patch release on minor version _N-1_ where N is the latest supported minor version. | _If latest supported minor version is 1.14, auto upgrades to latest patch - 1.13.2-up.3_ | -| **Rapid** | Upgrades to the latest supported patch release on the latest supported minor version. | _If the latest supported minor version is 1.14, auto upgrades to the latest patch of minor version. 1.14 upgrades to 1.14.5-up.1_ | - - -:::warning - -The `Rapid` channel is only recommended for users willing to accept the risk of new features and potentially breaking changes. - -::: - -## Examples - -The specs below are examples of how to edit the `autoUpgrade` channel in your `ControlPlane` specification. - -To run a control plane with the `Rapid` auto upgrade channel, your spec should look like this: - -```yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - name: example-ctp -spec: - crossplane: - autoUpgrade: - channel: Rapid - writeConnectionSecretToRef: - name: kubeconfig-example-ctp -``` - -To run a control plane with a pinned version of Crossplane, specify in the `version` field: - -```yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - name: example-ctp -spec: - crossplane: - version: 1.14.3-up.1 - autoUpgrade: - channel: None - writeConnectionSecretToRef: - name: kubeconfig-example-ctp -``` - -## Supported Crossplane versions - -Spaces supports the three [preceding minor versions][preceding-minor-versions] from the last supported minor version. example, if the last supported minor version is `1.14`, minor versions `1.13` and `1.12` are also supported. Versions older than the three most recent minor versions aren't supported. Only supported Crossplane versions are valid specifications for new control planes. - -Current Crossplane version support by Spaces version: - -| Spaces Version | Crossplane Version Min | Crossplane Version Max | -|:--------------:|:----------------------:|:----------------------:| -| 1.2 | 1.13 | 1.15 | -| 1.3 | 1.13 | 1.15 | -| 1.4 | 1.14 | 1.16 | -| 1.5 | 1.14 | 1.16 | -| 1.6 | 1.14 | 1.16 | -| 1.7 | 1.14 | 1.16 | -| 1.8 | 1.15 | 1.17 | -| 1.9 | 1.16 | 1.18 | -| 1.10 | 1.16 | 1.18 | -| 1.11 | 1.16 | 1.18 | -| 1.12 | 1.17 | 1.19 | - - -Upbound offers extended support for all installed Crossplane versions released within a 12 month window since the last Spaces release. Contact your Upbound sales representative for more information on version support. - - -:::warning - -If the auto upgrade channel is `Stable` or `Rapid`, the Crossplane version will always stay within the support window after auto upgrade. If set to `Patch` or `None`, the minor version may be outside the support window. You are responsible for upgrading to a supported version - -::: - -To view the support status of a control plane instance, use `kubectl get ctp`. - -```bash -kubectl get ctp -NAME CROSSPLANE VERSION SUPPORTED READY MESSAGE AGE -example-ctp 1.13.2-up.3 True True 31m - -``` - -Unsupported versions return `SUPPORTED: False`. - -```bash -kubectl get ctp -NAME CROSSPLANE VERSION SUPPORTED READY MESSAGE AGE -example-ctp 1.11.5-up.1 False True 31m - -``` - -For more information, use the `-o yaml` flag to return more information. - -```bash -kubectl get controlplanes.spaces.upbound.io example-ctp -o yaml -status: -conditions: -... -- lastTransitionTime: "2024-01-23T06:36:10Z" - message: Crossplane version 1.11.5-up.1 is outside of the support window. - Oldest supported minor version is 1.12. - reason: UnsupportedCrossplaneVersion - status: "False" - type: Supported -``` - - -[preceding-minor-versions]: /reference/usage/lifecycle/#maintenance-and-updates diff --git a/spaces_versioned_docs/version-v1.14/howtos/automation-and-gitops/_category_.json b/spaces_versioned_docs/version-v1.14/howtos/automation-and-gitops/_category_.json deleted file mode 100644 index b65481af6..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/automation-and-gitops/_category_.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "label": "Automation & GitOps", - "position": 11, - "collapsed": true, - "customProps": { - "plan": "business" - } -} diff --git a/spaces_versioned_docs/version-v1.14/howtos/automation-and-gitops/overview.md b/spaces_versioned_docs/version-v1.14/howtos/automation-and-gitops/overview.md deleted file mode 100644 index 57eeb15fc..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/automation-and-gitops/overview.md +++ /dev/null @@ -1,138 +0,0 @@ ---- -title: Automation and GitOps Overview -sidebar_label: Overview -sidebar_position: 1 -description: Guide to automating control plane deployments with GitOps and Argo CD -plan: "business" ---- - -Automating control plane deployments with GitOps enables declarative, version-controlled infrastructure management. This section covers integrating GitOps workflows with Upbound control planes using Argo CD and related tools. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). GitOps patterns are compatible across all versions of Upbound Spaces. - -For details on deployment models and their differences, see the [Deployment Modes](../../concepts/deployment-modes.md) concept guide. version-specific features, see the . -::: - -## What is GitOps? - -GitOps is an approach for managing infrastructure by: -- **Declaratively describing** desired system state in Git -- **Using controllers** to continuously reconcile actual state with desired state -- **Treating Git as the source of truth** for all configuration and deployments - -Upbound control planes are fully compatible with GitOps patterns and we strongly recommend integrating GitOps in the platforms you build on Upbound. - -## Key Concepts - -### Argo CD -[Argo CD](https://argo-cd.readthedocs.io/) is a popular Kubernetes-native GitOps controller. It continuously monitors Git repositories and automatically applies changes to your infrastructure when commits are detected. - -### Deployment Models - -The way you configure GitOps depends on your deployment model: - -| Aspect | Cloud Spaces | Self-Hosted Spaces | -|--------|--------------|-------------------| -| **Access Method** | Upbound API with tokens | Kubernetes native (secrets/kubeconfig) | -| **Configuration** | Kubeconfig via `up` CLI | Control plane connection secrets | -| **Setup Complexity** | More involved (API integration) | Simpler (native Kubernetes) | -| **Typical Use Case** | Managing Upbound resources | Managing workloads on control planes | - -## Getting Started - -**Choose your path based on your deployment model:** - -###. Cloud Spaces -If you're using Upbound Cloud Spaces (Dedicated or Managed): -1. Start with [GitOps with Upbound Control Planes](../cloud-spaces/gitops-on-upbound.md) -2. Learn how to integrate Argo CD with Cloud Spaces -3. Manage both control plane infrastructure and Upbound resources declaratively - -###. Self-Hosted Spaces -If you're running self-hosted Spaces: -1. Start with [GitOps with ArgoCD in Self-Hosted Spaces](../self-hosted/gitops-with-argocd.md) -2. Learn how to configure control plane connection secrets -3. Manage workloads deployed to your control planes - -## Common Workflows - -### Workflow 1: Managing Control Planes with GitOps -Create and manage control planes themselves declaratively using provider-kubernetes: - -```yaml -apiVersion: kubernetes.crossplane.io/v1alpha2 -kind: Object -metadata: - name: my-controlplane -spec: - forProvider: - manifest: - apiVersion: spaces.upbound.io/v1beta1 - kind: ControlPlane - # ... control plane configuration -``` - -### Workflow 2: Managing Workloads on Control Planes -Deploy applications and resources to control planes using standard Kubernetes GitOps patterns: - -```yaml -apiVersion: v1 -kind: Namespace -metadata: - name: my-app ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: my-app - namespace: my-app -# ... deployment configuration -``` - -### Workflow 3: Managing Upbound Resources -Use provider-upbound to manage Upbound IAM and repository resources: - -- Teams -- Robots and their team memberships -- Repositories and permissions - -## Advanced Topics - -### Argo CD Plugin for Upbound -Learn more in the [ArgoCD Plugin guide](../self-hosted/use-argo.md) for enhanced integration with self-hosted Spaces. - -### Declarative Control Plane Creation -See [Declaratively create control planes](../self-hosted/declarative-ctps.md) for advanced automation patterns. - -### Consuming Control Plane APIs -Understand how to [consume control plane APIs in your app cluster](../mcp-connector-guide.md) with Argo CD. - -## Prerequisites - -Before implementing GitOps with control planes, ensure you have: - -**For Cloud Spaces:** -- Access to Upbound Cloud Spaces -- `up` CLI installed and configured -- API token with appropriate permissions -- Argo CD or similar GitOps controller running -- Familiarity with Kubernetes RBAC - -**For Self-Hosted Spaces:** -- Self-hosted Spaces deployed and running -- Argo CD deployed in your infrastructure -- Kubectl access to the cluster hosting Spaces -- Understanding of control plane architecture - -## Next Steps - -1. **Choose your deployment model** above -2. **Review the relevant getting started guide** -3. **Set up your GitOps controller** (Argo CD) -4. **Deploy your first automated control plane** -5. **Explore advanced topics** as needed - -:::tip -Start with simple deployments to test your GitOps workflow before moving to production. Use [simulations](../simulations.md) to preview changes before applying them. -::: diff --git a/spaces_versioned_docs/version-v1.14/howtos/backup-and-restore.md b/spaces_versioned_docs/version-v1.14/howtos/backup-and-restore.md deleted file mode 100644 index 3b8d026cb..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/backup-and-restore.md +++ /dev/null @@ -1,530 +0,0 @@ ---- -title: Backup and restore -sidebar_position: 13 -description: Configure and manage backups in your Upbound Space. -plan: "enterprise" ---- - - - -Upbound's _Shared Backups_ is a built-in backup and restore feature. Shared Backups lets you configure automatic schedules for taking snapshots of your control planes. You can restore data from these backups by making new control planes. This guide explains how to use Shared Backups for disaster recovery or upgrade scenarios. - -:::info API Version Information & Available Versions -This guide applies to **all supported versions** (v1.9-v1.15+). - -**Select your API version**: -- [Spaces API v1.15 (Latest)](../../../next/reference/apis/spaces-api/) -- [Spaces API v1.14](../../../1.14/reference/apis/spaces-api/) -- [Spaces API v1.13](../../../1.13/reference/apis/spaces-api/) -- [Spaces API v1.12](../../../1.12/reference/apis/spaces-api/) -- [Spaces API v1.11](../../../1.11/reference/apis/spaces-api/) -- [Spaces API v1.10](../../../1.10/reference/apis/spaces-api/) -- [Spaces API v1.9](../../../1.9/reference/apis/spaces-api/) - -For version support policy, see . version compatibility details, see the . -::: - -## Benefits - -The Shared Backups feature provides the following benefits: - -* Automatic backups for control planes without any operational overhead -* Backup schedules for multiple control planes in a group -* Shared Backups are available across all hosting environments of Upbound (Disconnected, Connected or Cloud Spaces) - - -## Configure a Shared Backup Config - - -[SharedBackupConfig][sharedbackupconfig] is a [group-scoped][group-scoped] resource. You should create them in a group containing one or more control planes. This resource configures the storage details and provider. Whenever a backup executes (either by schedule or manually initiated), it references a SharedBackupConfig to tell it where store the snapshot. - - -### Backup config provider - - -The `spec.objectStorage.provider` and `spec.objectStorage.config` fields configures: - -* The object storage provider -* The path to the provider -* The credentials needed to communicate with the provider - -You can only set one provider. Upbound currently supports AWS, Azure, and GCP as providers. - - -`spec.objectStorage.config` is a freeform map of configuration options for the object storage provider. See [Thanos object storage][thanos-object-storage] for more information on the formats for each supported cloud provider. `spec.bucket` and `spec.provider` overrides the required values in the config. - - - -#### AWS as a storage provider - -:::important -For Cloud Spaces, static credentials are currently the only supported auth method. -::: - -This example demonstrates how to use AWS as a storage provider for your backups: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupConfig -metadata: - name: default - namespace: default -spec: - objectStorage: - provider: AWS - bucket: spaces-backup-bucket - config: - endpoint: s3.eu-west-2.amazonaws.com - region: eu-west-2 - credentials: - source: Secret - secretRef: - name: bucket-creds - key: creds -``` - - -This example assumes you've already created an S3 bucket called "spaces-backup-bucket" in AWS `eu-west-2` region. The account credentials to access the bucket should exist in a secret of the same namespace as the Shared Backup Config. - -#### Azure as a storage provider - -:::important -For Cloud Spaces, static credentials are currently the only supported auth method. -::: - -This example demonstrates how to use Azure as a storage provider for your backups: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupConfig -metadata: - name: default - namespace: default -spec: - objectStorage: - provider: Azure - bucket: upbound-backups - config: - storage_account: upbackupstore - container: upbound-backups - endpoint: blob.core.windows.net - credentials: - source: Secret - secretRef: - name: bucket-creds - key: creds -``` - - -This example assumes you've already created an Azure storage account called `upbackupstore` and blob `upbound-backups`. The storage account key to access the blob should exist in a secret of the same namespace as the Shared Backup Config. - - -#### GCP as a storage provider - -:::important -For Cloud Spaces, static credentials are currently the only supported auth method. -::: - -This example demonstrates how to use Google Cloud Storage as a storage provider for your backups: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupConfig -metadata: - name: default - namespace: default -spec: - objectStorage: - provider: GCP - bucket: spaces-backup-bucket - credentials: - source: Secret - secretRef: - name: bucket-creds - key: creds -``` - - -This example assumes you've already created a Cloud bucket called "spaces-backup-bucket" and a service account with access to this bucket. The key file should exist in a secret of the same namespace as the Shared Backup Config. - - -## Configure a Shared Backup Schedule - - -[SharedBackupSchedule][sharedbackupschedule] is a [group-scoped][group-scoped-1] resource. You should create them in a group containing one or more control planes. This resource defines a backup schedule for control planes within its corresponding group. - -Below is an example of a Shared Backup Schedule that takes backups every day of all control planes having `environment: production` labels: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: daily-schedule - namespace: default -spec: - schedule: "@daily" - configRef: - kind: SharedBackupConfig - name: default - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production -``` - -### Define a schedule - -The `spec.schedule` field is a [Cron-formatted][cron-formatted] string. Some common examples are below: - - -| Entry | Description | -| ----------------- | ------------------------------------------------------------------------------------------------- | -| `@hourly` | Run once an hour. | -| `@daily` | Run once a day. | -| `@weekly` | Run once a week. | -| `0 0/4 * * *` | Run every 4 hours. | -| `0/15 * * * 1-5` | Run every fifteenth minute on Monday through Friday. | -| `@every 1h30m10s` | Run every 1 hour, 30 minutes, and 10 seconds. Hour is the largest measurement of time for @every. | - - -### Exclude resources from the backup - -The `spec.excludedResources` field is an array of resource names to exclude from each backup. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: daily-schedule -spec: - excludedResources: - - "xclusters.aws.platformref.upbound.io" - - "xdatabase.aws.platformref.upbound.io" - - "xrolepolicyattachment.iam.aws.crossplane.io" -``` - -:::warning -You must specify resource names in lowercase "resource.group" format (for example, `xclusters.aws.platformref.upbound.io`). Using only the resource kind (for example, `XCluster`) isn't supported. -::: - -### Suspend a schedule - -Use `spec.suspend` field to suspend the schedule. It creates no new backups, but allows running backups to complete. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: daily-schedule -spec: - suspend: true -``` - -### Set the time to live - -Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: daily-schedule -spec: - ttl: 168h # Backup is garbage collected after 7 days -``` -:::tip -By default, this setting doesn't delete uploaded files. Review the next section to define -the deletion policy. -::: - -### Define the deletion policy - -Set the `spec.deletionPolicy` to define backup deletion actions, including the -deletion of the backup file from the bucket. The Deletion Policy value defaults -to `Orphan`. Set it to `Delete` to remove uploaded files in the bucket. more -information on the backup and restore process, review the [Spaces API -documentation][spaces-api-documentation]. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: daily-schedule -spec: - ttl: 168h # Backup is garbage collected after 7 days - deletionPolicy: Delete # Defaults to Orphan -``` - -### Garbage collect backups when the schedule gets deleted - -Set the `spec.useOwnerReferencesInBackup` to garbage collect associated backups when a shared schedule gets deleted. If set to true, backups are garbage collected when the schedule gets deleted. - -### Control plane selection - -To configure which control planes in a group you want to create a backup schedule for, use the `spec.controlPlaneSelector` field. You can either use `labelSelectors` or the `names` of a control plane directly. A control plane matches if any of the label selectors match. - -This example matches all control planes in the group that have `environment: production` as a label: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: my-backup-schedule -spec: - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production -``` - -You can use the more complex `matchExpressions` to match labels based on an expression. This example matches control planes that have label `environment: production` or `environment: staging`: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: my-backup-schedule -spec: - controlPlaneSelector: - labelSelectors: - - matchExpressions: - - { key: environment, operator: In, values: [production,staging] } -``` - -You can also specify the names of control planes directly: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: my-backup-schedule -spec: - controlPlaneSelector: - names: - - controlplane-dev - - controlplane-staging - - controlplane-prod -``` - - -## Configure a Shared Backup - - - -[SharedBackup][sharedbackup] is a [group-scoped][group-scoped-2] resource. You should create them in a group containing one or more control planes. This resource causes a backups to occur for control planes within its corresponding group. - -Below is an example of a Shared Backup that takes a backup of all control planes having `environment: production` labels: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackup -metadata: - name: my-backup - namespace: default -spec: - configRef: - kind: SharedBackupConfig - name: default - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production -``` - -### Exclude resources from the backup - -The `spec.excludedResources` field is an array of resource names to exclude from each backup. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackup -metadata: - name: my-backup -spec: - excludedResources: - - "xclusters.aws.platformref.upbound.io" - - "xdatabase.aws.platformref.upbound.io" - - "xrolepolicyattachment.iam.aws.crossplane.io" -``` - -:::warning -You must specify resource names in lowercase "resource.group" format (for example, `xclusters.aws.platformref.upbound.io`). Using only the resource kind (for example, `XCluster`) isn't supported. -::: - -### Set the time to live - -Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackup -metadata: - name: my-backup -spec: - ttl: 168h # Backup is garbage collected after 7 days -``` - - -### Garbage collect backups on Shared Backup deletion - - - -Set the `spec.useOwnerReferencesInBackup` to define whether to garbage collect associated backups when a shared backup gets deleted. If set to true, backups are garbage collected when the shared backup gets deleted. - -### Control plane selection - -To configure which control planes in a group you want to create a backup for, use the `spec.controlPlaneSelector` field. You can either use `labelSelectors` or the `names` of a control plane directly. A control plane matches if any of the label selectors match. - -This example matches all control planes in the group that have `environment: production` as a label: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackup -metadata: - name: my-backup -spec: - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production -``` - -You can use the more complex `matchExpressions` to match labels based on an expression. This example matches control planes that have label `environment: production` or `environment: staging`: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackup -metadata: - name: my-backup -spec: - controlPlaneSelector: - labelSelectors: - - matchExpressions: - - { key: environment, operator: In, values: [production,staging] } -``` - -You can also specify the names of control planes directly: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackup -metadata: - name: my-backup -spec: - controlPlaneSelector: - names: - - controlplane-dev - - controlplane-staging - - controlplane-prod -``` - -## Create a manual backup - -[Backup][backup] is a [group-scoped][group-scoped-3] resource that causes a single backup to occur for a control planes in its corresponding group. - -Below is an example of a manual Backup of a control plane: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: Backup -metadata: - name: my-backup - namespace: default -spec: - configRef: - kind: SharedBackupConfig - name: default - controlPlane: my-awesome-ctp - deletionPolicy: Delete -``` - -The backup specification `DeletionPolicy` defines backup deletion actions, -including the deletion of the backup file from the bucket. The `Deletion Policy` -value defaults to `Orphan`. Set it to `Delete` to remove uploaded files -in the bucket. -For more information on the backup and restore process, review the [Spaces API documentation][spaces-api-documentation-1]. - - -### Choose a control plane to backup - -The `spec.controlPlane` field defines which control plane to execute a backup against. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: Backup -metadata: - name: my-backup - namespace: default -spec: - controlPlane: my-awesome-ctp -``` - -If the control plane doesn't exist, the backup fails after multiple failed retry attempts. - -### Exclude resources from the backup - -The `spec.excludedResources` field is an array of resource names to exclude from the manual backup. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: Backup -metadata: - name: my-backup -spec: - excludedResources: - - "xclusters.aws.platformref.upbound.io" - - "xdatabase.aws.platformref.upbound.io" - - "xrolepolicyattachment.iam.aws.crossplane.io" -``` - -:::warning -You must specify resource names in lowercase "resource.group" format (for example, `xclusters.aws.platformref.upbound.io`). Using only the resource kind (for example, `XCluster`) isn't supported. -::: - -### Set the time to live - -Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: Backup -metadata: - name: my-backup -spec: - ttl: 168h # Backup is garbage collected after 7 days -``` - -## Restore a control plane from a backup - -You can restore a control plane's state from a backup. Below is an example of creating a new control plane from a previous backup called `restore-me`: - - -```yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - name: my-awesome-restored-ctp - namespace: default -spec: - restore: - source: - kind: Backup - name: restore-me -``` - - -[group-scoped]: /spaces/concepts/groups -[group-scoped-1]: /spaces/concepts/groups -[group-scoped-2]: /spaces/concepts/groups -[group-scoped-3]: /spaces/concepts/groups -[sharedbackupconfig]: /reference/apis/spaces-api/latest -[thanos-object-storage]: https://thanos.io/tip/thanos/storage.md/ -[sharedbackupschedule]: /reference/apis/spaces-api/latest -[cron-formatted]: https://en.wikipedia.org/wiki/Cron -[spaces-api-documentation]: /reference/apis/spaces-api/v1_9 -[sharedbackup]: /reference/apis/spaces-api/latest -[backup]: /reference/apis/spaces-api/latest -[spaces-api-documentation-1]: /reference/apis/spaces-api/v1_9 - - - diff --git a/spaces_versioned_docs/version-v1.14/howtos/cloud-spaces/_category_.json b/spaces_versioned_docs/version-v1.14/howtos/cloud-spaces/_category_.json deleted file mode 100644 index 1e1869a38..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/cloud-spaces/_category_.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "label": "Cloud Spaces", - "position": 1, - "collapsed": true, - "customProps": { - "plan": "standard" - } -} - - diff --git a/spaces_versioned_docs/version-v1.14/howtos/cloud-spaces/dedicated-spaces-deployment.md b/spaces_versioned_docs/version-v1.14/howtos/cloud-spaces/dedicated-spaces-deployment.md deleted file mode 100644 index ebad9493e..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/cloud-spaces/dedicated-spaces-deployment.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: Dedicated Spaces -sidebar_position: 4 -description: A guide to Upbound Dedicated Spaces -plan: business ---- - - -## Benefits - -Dedicated Spaces offer the following benefits: - -- **Single-tenancy** A control plane space where Upbound guarantees you're the only tenant operating in the environment. -- **Connectivity to your private network** Establish secure network connections between your Dedicated Cloud Space running in Upbound and your own resources behind your private network. -- **Reduced Overhead.** Offload day-to-day operational burdens to Upbound while focusing on your job of building your platform. - -## Architecture - -A Dedicated Space is a deployment of the Upbound Spaces software inside an -Upbound-controlled cloud account and network. The control planes you run. - -The diagram below illustrates the high-level architecture of Upbound Dedicated Spaces: - -![Upbound Managed Spaces arch](/img/managed-arch-gcp.png) - -## How to get access to Dedicated Spaces - -If you have an interest in Upbound Dedicated Spaces, contact -[Upbound][contact-us]. We can chat more about your -requirements and see if Dedicated Spaces are a good fit for you. - -[contact-us]: https://www.upbound.io/contact-us -[managed-space]: /spaces/howtos/self-hosted/managed-spaces-deployment diff --git a/spaces_versioned_docs/version-v1.14/howtos/cloud-spaces/gitops-on-upbound.md b/spaces_versioned_docs/version-v1.14/howtos/cloud-spaces/gitops-on-upbound.md deleted file mode 100644 index fa59a8dce..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/cloud-spaces/gitops-on-upbound.md +++ /dev/null @@ -1,318 +0,0 @@ ---- -title: GitOps with Upbound Control Planes -sidebar_position: 80 -description: An introduction to doing GitOps with control planes on Upbound Cloud Spaces -tier: "business" ---- - -:::info Deployment Model -This guide applies to **Upbound Cloud Spaces** (Dedicated and Managed Spaces). For self-hosted Spaces deployments, see [GitOps with ArgoCD in Self-Hosted Spaces](/spaces/howtos/self-hosted/gitops-with-argocd/). -::: - -GitOps is an approach for managing a system by declaratively describing desired resources' configurations in Git and using controllers to realize the desired state. Upbound's control planes are compatible with this pattern and it's strongly recommended you integrate GitOps in the platforms you build on Upbound. - - -## Integrate with Argo CD - - -[Argo CD][argo-cd] is a project in the Kubernetes ecosystem commonly used for GitOps. You can use it in tandem with Upbound control planes to achieve GitOps flows. The sections below explain how to integrate these tools with Upbound. - -### Generate a kubeconfig for your control plane - -Use the up CLI to [generate a kubeconfig][generate-a-kubeconfig] for your control plane. - -```bash -up ctx /// -f - > context.yaml -``` - -### Create an API token - - -You need a personal access token (PAT). You create PATs on a per-user basis in the Upbound Console. Go to [My Account - API tokens][my-account-api-tokens] and select Create New Token. Give the token a name and save the secret value to somewhere safe. - - -### Add the up CLI init container to Argo - -Create a new file called `up-plugin-values.yaml` and paste the following YAML: - -```yaml -controller: - volumes: - - name: up-plugin - emptyDir: {} - - name: up-home - emptyDir: {} - - volumeMounts: - - name: up-plugin - mountPath: /usr/local/bin/up - subPath: up - - name: up-home - mountPath: /home/argocd/.up - - initContainers: - - name: up-plugin - image: xpkg.upbound.io/upbound/up-cli:v0.39.0 - command: ["cp"] - args: - - /usr/local/bin/up - - /plugin/up - volumeMounts: - - name: up-plugin - mountPath: /plugin - -server: - volumes: - - name: up-plugin - emptyDir: {} - - name: up-home - emptyDir: {} - - volumeMounts: - - name: up-plugin - mountPath: /usr/local/bin/up - subPath: up - - name: up-home - mountPath: /home/argocd/.up - - initContainers: - - name: up-plugin - image: xpkg.upbound.io/upbound/up-cli:v0.39.0 - command: ["cp"] - args: - - /usr/local/bin/up - - /plugin/up - volumeMounts: - - name: up-plugin - mountPath: /plugin -``` - -### Install or upgrade Argo using the values file - -Install or upgrade Argo via Helm, including the values from the `up-plugin-values.yaml` file: - -```bash -helm upgrade --install -n argocd -f up-plugin-values.yaml --reuse-values argocd argo/argo-cd -``` - - -### Configure Argo CD - - -To configure Argo CD for Annotation resource tracking, edit the Argo CD ConfigMap in the Argo CD namespace. -Add `application.resourceTrackingMethod: annotation` to the data section as below. -This configuration turns off Argo CD auto pruning, preventing the deletion of Crossplane resources. - -Next, configure the [auto respect RBAC for the Argo CD controller][auto-respect-rbac-for-the-argo-cd-controller]. -By default, Argo CD attempts to discover some Kubernetes resource types that don't exist in a control plane. -You must configure Argo CD to respect the cluster's RBAC rules so that Argo CD can sync. -Add `resource.respectRBAC: normal` to the data section as below. - -```bash -apiVersion: v1 -kind: ConfigMap -metadata: - name: argocd-cm -data: - ... - application.resourceTrackingMethod: annotation - resource.respectRBAC: normal -``` - -:::tip -The `resource.respectRBAC` configuration above tells Argo to respect RBAC for _all_ cluster contexts. If you're using an Argo CD instance to manage more than only control planes, you should consider changing the `clusters` string match for the configuration to apply only to control planes. For example, if every control plane context name followed the convention of being named `controlplane-`, you could set the string match to be `controlplane-*` -::: - - -### Create a cluster context definition - - -Replace the variables and run the following script to configure a new Argo cluster context definition. - -To configure Argo for a control plane in a Connected Space, replace `stringData.server` with the ingress URL of the control plane. This URL is what's outputted when using `up ctx`. - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: my-control-plane - namespace: argocd - labels: - argocd.argoproj.io/secret-type: cluster -type: Opaque -stringData: - name: my-control-plane-context - server: https://.spaces.upbound.io/apis/spaces.upbound.io/v1beta1/namespaces//controlplanes//k8s - config: | - { - "execProviderConfig": { - "apiVersion": "client.authentication.k8s.io/v1", - "command": "up", - "args": [ "org", "token" ], - "env": { - "ORGANIZATION": "", - "UP_TOKEN": "" - } - }, - "tlsClientConfig": { - "insecure": false, - "caData": "" - } - } -``` - - -## GitOps for Upbound resources - - -Like any other cloud service, you can drive the lifecycle of Upbound Cloud resources with Crossplane. This lets you establish GitOps flows to declaratively create and manage: - -- [control plane groups][control-plane-groups] -- [control planes][control-planes] -- [Upbound IAM resources][upbound-iam-resources] - -Use a control plane installed with [provider-upbound][provider-upbound] and [provider-kubernetes][provider-kubernetes] to achieve this. - -### Provider-upbound - -[Provider-upbound][provider-upbound-2] is a Crossplane provider built by Upbound to interact with Upbound resources. use _provider-upbound_ to declaratively create and manage the lifecycle of IAM resources and repositories: - -- [Robots][robots] and their membership to teams -- [Teams][teams] -- [Repositories][repositories] and [permissions][permissions] on those repositories. - -:::tip -This provider defines managed resources for control planes, their auth, and permissions. These resources only applicable for customers who run in Upbound's **Legacy Spaces** control plane hosting environments. Customers should use provider-kubernetes explained below to manage the lifecycle of control planes with Crossplane. -::: - -### Provider-kubernetes - -[Provider-kubernetes][provider-kubernetes-3] is a Crossplane provider that defines an [Object][object] resource. Use _Objects_ as general-purpose resources to wrap _any_ Kubernetes resource for Crossplane to manage. - -Upbound [Space APIs][space-apis] are Kube-like APIs and have implemented support for most Kubernetes-style API concepts. You can use kubectl or any other Kubernetes-compatible tooling to interact with the API. This means you can use _provider-kubernetes_ to drive interactions with Space APIs. - -:::warning -When interacting with a Cloud Space's API, the Kubernetes [watch][watch] feature **isn't implemented.** Argo CD requires _watch_ support to function as expected, meaning you can't point Argo directly at a Cloud Space until it's implemented. -::: - -Use _provider-kubernetes_ to declaratively drive interactions with all [Space APIs][space-apis-1]. Wrap the desired API resource in an _Object_. See the example below for a control plane: - -```yaml -apiVersion: kubernetes.crossplane.io/v1alpha2 -kind: Object -metadata: - name: my-controlplane -spec: - forProvider: - manifest: - apiVersion: spaces.upbound.io/v1beta1 - kind: ControlPlane - metadata: - name: my-controlplane - namespace: default - spec: - crossplane: - autoUpgrade: - channel: Rapid -``` - -[Control plane groups][control-plane-groups-2] are a special case because they technically map to an underlying Kubernetes namespace. You should create a `kind: namespace` with the `spaces.upbound.io/group` label to create a control plane group in a Space. See the example below: - -```yaml -apiVersion: kubernetes.crossplane.io/v1alpha2 -kind: Object -metadata: - name: group1 -spec: - forProvider: - manifest: - apiVersion: v1 - kind: Namespace - metadata: - name: group1 - labels: - spaces.upbound.io/group: "true" - spec: {} -``` - -### Configure auth for provider-kubernetes - -Like any other Crossplane provider, _provider-kubernetes_ requires a valid [ProviderConfig][providerconfig] to authenticate with Upbound before interacting with its APIs. Follow the steps below to configure auth for a ProviderConfig on a control plane that you want to use to interact with Upbound resources. - -1. Define an environment variable for the name of your Upbound org account. Use `up org list` to retrieve this value. -```ini -export UPBOUND_ACCOUNT="" -``` - -2. Create a [personal access token][personal-access-token] and store it as an environment variable. -```shell -export UPBOUND_TOKEN="" -``` - -3. Log on to Upbound. -```shell -up login -``` - -4. Create a kubeconfig for the desired Cloud Space instance you want to interact with. -```shell -export CONTROLPLANE_CONFIG=/tmp/controlplane-kubeconfig -KUBECONFIG=$CONTROLPLANE_CONFIG up ctx $UPBOUND_ACCOUNT/upbound-gcp-us-west-1 # Replace this path with whichever Cloud Space you want to communicate with. -``` - -5. On the control plane you want to use to interact with Upbound resources, create a secret containing the credentials: -```shell -kubectl -n crossplane-system create secret generic cluster-config --from-file=kubeconfig=$CONTROLPLANE_CONFIG -kubectl -n crossplane-system create secret generic upbound-credentials --from-literal=token=$UPBOUND_TOKEN -``` - -6. Create a ProviderConfig that references the credentials created in the prior step. Create this resource in your control plane: -```yaml -apiVersion: kubernetes.crossplane.io/v1alpha1 -kind: ProviderConfig -metadata: - name: default -spec: - credentials: - source: Secret - secretRef: - namespace: crossplane-system - name: cluster-config - key: kubeconfig - identity: - type: UpboundTokens - source: Secret - secretRef: - name: upbound-credentials - namespace: crossplane-system - key: token -``` - -You can now create _Objects_ in the control plane which wrap Space APIs. - -[generate-a-kubeconfig]: /manuals/cli/concepts/contexts -[control-plane-groups]: /spaces/concepts/groups -[control-planes]: /spaces/concepts/control-planes -[upbound-iam-resources]: /manuals/platform/concepts/identity-management -[space-apis]: /reference/apis/spaces-api/v1_9 -[space-apis-1]: /reference/apis/spaces-api/v1_9 -[control-plane-groups-2]: /spaces/concepts/groups - - -[argo-cd]: https://argo-cd.readthedocs.io/en/stable/ -[my-account-api-tokens]: https://accounts.upbound.io/settings/tokens -[auto-respect-rbac-for-the-argo-cd-controller]: https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#auto-respect-rbac-for-controller -[spec-writeconnectionsecrettoref]: /reference/apis/spaces-api/latest -[auto-respect-rbac-for-the-argo-cd-controller-1]: https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#auto-respect-rbac-for-controller -[provider-upbound]: https://marketplace.upbound.io/providers/upbound/provider-upbound -[provider-kubernetes]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes -[provider-upbound-2]: https://marketplace.upbound.io/providers/upbound/provider-upbound -[robots]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/iam.upbound.io/Robot/v1alpha1 -[teams]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/iam.upbound.io/Team/v1alpha1 -[repositories]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/repository.upbound.io/Repository/v1alpha1 -[permissions]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/repository.upbound.io/Permission/v1alpha1 -[provider-kubernetes-3]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes -[object]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes/v0.17.0/resources/kubernetes.crossplane.io/Object/v1alpha2 -[watch]: https://kubernetes.io/docs/reference/using-api/api-concepts/#watch-bookmarks -[providerconfig]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes/v0.17.0/resources/kubernetes.crossplane.io/ProviderConfig/v1alpha1 -[personal-access-token]: https://accounts.upbound.io/settings/tokens diff --git a/spaces_versioned_docs/version-v1.14/howtos/control-plane-topologies.md b/spaces_versioned_docs/version-v1.14/howtos/control-plane-topologies.md deleted file mode 100644 index 9020e5a41..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/control-plane-topologies.md +++ /dev/null @@ -1,566 +0,0 @@ ---- -title: Control Plane Topologies -sidebar_position: 15 -description: Configure scheduling of composites to remote control planes ---- - -:::info API Version Information -This guide is for the Control Plane Topology feature, which is in **private preview**. interested customers with access to this feature, it applies to v1.12+. - -For related API specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). details on feature availability, see the . -::: - -:::important -This feature is in private preview for select customers in Upbound Spaces. If you're interested in this deployment mode, please [contact us](https://www.upbound.io/support/contact). -::: - -Upbound's _Control Plane Topology_ feature lets you build and deploy a platform -of multiple control planes. These control planes work together for a unified platform -experience. - - -With the _Topology_ feature, you can install resource APIs that are -reconciled by other control planes and configure the routing that occurs between -control planes. You can also build compositions that reference other resources -running on your control plane or elsewhere in Upbound. - -This guide explains how to use Control Plane Topology APIs to install, configure -remote APIs, and build powerful compositions that reference other resources. - -## Benefits - -The Control Plane Topology feature provides the following benefits: - -* Decouple your platform architecture into independent offerings to improve your platform's software development lifecycle. -* Install composite APIs from Configurations as CRDs which are fulfilled and reconciled by other control planes. -* Route APIs to other control planes by configuring an _Environment_ resource, which define a set of routable dimensions. - -## How it works - - -Imagine the scenario where you want to let a user reference a subnet when creating a database instance. To your control plane, the `kind: database` and `kind: subnet` are independent resources. To you as the composition author, these resources have an important relationship. It may be that: - -- you don't want your user to ever be able to create a database without specifying a subnet. -- you want to let them create a subnet when they create the database, if it doesn't exist. -- you want to allow them to reuse a subnet that got created elsewhere or gets shared by another user. - -In each of these scenarios, you must resort to writing complex composition logic -to handle each case. The problem is compounded when the resource exists in a -context separate from the current control plane's context. Imagine a scenario -where one control plane manages Database resources and a second control plane -manages networking resources. With the _Topology_ feature, you can offload these -concerns to Upbound machinery. - - -![Control Plane Topology feature arch](/img/topology-arch.png) - -## Prerequisites - -Enable the Control Plane Topology feature in the Space you plan to run your control plane in: - -- Cloud Spaces: Not available yet -- Connected Spaces: Space administrator must enable this feature -- Disconnected Spaces: Space administrator must enable this feature - - - -## Compose resources with _ReferencedObjects_ - - - -_ReferencedObject_ is a resource type available in an Upbound control plane that lets you reference other Kubernetes resources in Upbound. - -:::tip -This feature is useful for composing resources that exist in a -remote context, like another control plane. You can also use -_ReferencedObjects_ to resolve references to any other Kubernetes object -in the current control plane context. This could be a secret, another Crossplane -resource, or more. -::: - -### Declare the resource reference in your XRD - -To compose a _ReferencedObject_, you should start by adding a resource reference -in your Composite Resource Definition (XRD). The convention for the resource -reference follows the shape shown below: - -```yaml -Ref: - type: object - properties: - apiVersion: - type: string - default: "" - enum: [ "" ] - kind: - type: string - default: "" - enum: [ "" ] - grants: - type: array - default: [ "Observe" ] - items: - type: string - enum: [ "Observe", "Create", "Update", "Delete", "*" ] - name: - type: string - namespace: - type: string - required: - - name -``` - -The `Ref` should be the kind of resource you want to reference. The `apiVersion` and `kind` should be the associated API version and kind of the resource you want to reference. - -The `name` and `namespace` strings are inputs that let your users specify the resource instance. - -#### Grants - -The `grants` field is a special array that lets you give users the power to influence the behavior of the referenced resource. You can configure which of the available grants you let your user select and which it defaults to. Similar in behavior as [Crossplane management policies][crossplane-management-policies], each grant value does the following: - -- **Observe:** The composite may observe the state of the referenced resource. -- **Create:** The composite may create the referenced resource if it doesn't exist. -- **Update:** The composite may update the referenced resource. -- **Delete:** The composite may delete the referenced resource. -- **\*:** The composite has full control over the referenced resource. - -Here are some examples that show how it looks in practice: - -
- -Show example for defining the reference to another composite resource - -```yaml -apiVersion: apiextensions.crossplane.io/v1 -kind: CompositeResourceDefinition -metadata: - name: xsqlinstances.database.platform.upbound.io -spec: - type: object - properties: - parameters: - type: object - properties: - networkRef: - type: object - properties: - apiVersion: - type: string - default: "networking.platform.upbound.io" - enum: [ "networking.platform.upbound.io" ] - grants: - type: array - default: [ "Observe" ] - items: - type: string - enum: [ "Observe" ] - kind: - type: string - default: "Network" - enum: [ "Network" ] - name: - type: string - namespace: - type: string - required: - - name -``` - -
- - -
-Show example for defining the reference to a secret -```yaml -apiVersion: apiextensions.crossplane.io/v1 -kind: CompositeResourceDefinition -metadata: - name: xsqlinstances.database.platform.upbound.io -spec: - type: object - properties: - parameters: - type: object - properties: - secretRef: - type: object - properties: - apiVersion: - type: string - default: "v1" - enum: [ "v1" ] - grants: - type: array - default: [ "Observe" ] - items: - type: string - enum: [ "Observe", "Create", "Update", "Delete", "*" ] - kind: - type: string - default: "Secret" - enum: [ "Secret" ] - name: - type: string - namespace: - type: string - required: - - name -``` -
- -### Manually add the jsonPath - -:::important -This step is a known limitation of the preview. We're working on tooling that -removes the need for authors to do this step. -::: - -During the preview timeframe of this feature, you must add an annotation by hand -to the XRD. In your XRD's `metadata.annotations`, set the -`references.upbound.io/schema` annotation. It should be a JSON string in the -following format: - -```json -{ - "apiVersion": "references.upbound.io/v1alpha1", - "kind": "ReferenceSchema", - "references": [ - { - "jsonPath": ".spec.parameters.secretRef", - "kinds": [ - { - "apiVersion": "v1", - "kind": "Secret" - } - ] - } - ] -} -``` - -Flatten this JSON into a string and set the annotation on your XRD. View the -example below for an illustration: - -
-Show example setting the references.upbound.io/schema annotation -```yaml -apiVersion: apiextensions.crossplane.io/v1 -kind: CompositeResourceDefinition -metadata: - name: xthings.networking.acme.com - annotations: - references.upbound.io/schema: '{"apiVersion":"references.upbound.io/v1alpha1","kind":"ReferenceSchema","references":[{"jsonPath":".spec.secretRef","kinds":[{"apiVersion":"v1","kind":"Secret"}]},{"jsonPath":".spec.configMapRef","kinds":[{"apiVersion":"v1","kind":"ConfigMap"}]}]}' -``` -
- -
-Show example for setting multiples references in the references.upbound.io/schema annotation -```yaml -apiVersion: apiextensions.crossplane.io/v1 -kind: CompositeResourceDefinition -metadata: - name: xthings.networking.acme.com - annotations: - references.upbound.io/schema: '{"apiVersion":"references.upbound.io/v1alpha1","kind":"ReferenceSchema","references":[{"jsonPath":".spec.parameters.secretRef","kinds":[{"apiVersion":"v1","kind":"Secret"}]},{"jsonPath":".spec.parameters.configMapRef","kinds":[{"apiVersion":"v1","kind":"ConfigMap"}]}]}' -``` -
- - -You can use a VSCode extension like [vscode-pretty-json][vscode-pretty-json] to make this task easier. - - -### Compose a _ReferencedObject_ - -To pair with the resource reference declared in your XRD, you must compose the referenced resource. Use the _ReferencedObject_ resource type to bring the resource into your composition. _ReferencedObject_ has the following schema: - -```yaml -apiVersion: references.upbound.io/v1alpha1 -kind: ReferencedObject -spec: - managementPolicies: - - Observe - deletionPolicy: Orphan - composite: - apiVersion: - kind: - name: - jsonPath: .spec.parameters.secretRef -``` - -The `spec.composite.apiVersion` and `spec.composite.kind` should match the API version and kind of the `compositeTypeRef` declared in your composition. The `spec.composite.name` should be the name of the composite resource instance. - -The `spec.composite.jsonPath` should be the path to the root of the resource ref you declared in your XRD. - -
-Show example for composing a resource reference to a secret - -```yaml -apiVersion: apiextensions.crossplane.io/v1 -kind: Composition -metadata: - name: demo-composition -spec: - compositeTypeRef: - apiVersion: networking.acme.com/v1alpha1 - kind: XThing - mode: Pipeline - pipeline: - - step: patch-and-transform - functionRef: - name: crossplane-contrib-function-patch-and-transform - input: - apiVersion: pt.fn.crossplane.io/v1beta1 - kind: Resources - resources: - - name: secret-ref-object - base: - apiVersion: references.upbound.io/v1alpha1 - kind: ReferencedObject - spec: - managementPolicies: - - Observe - deletionPolicy: Orphan - composite: - apiVersion: networking.acme.com/v1alpha1 - kind: XThing - name: TO_BE_PATCHED - jsonPath: .spec.parameters.secretRef - patches: - - type: FromCompositeFieldPath - fromFieldPath: metadata.name - toFieldPath: spec.composite.name -``` -
- -By declaring a resource reference in your XRD, Upbound handles resolution of the desired resource. - -## Deploy APIs - -To configure routing resource requests between control planes, you need to deploy APIs in at least two control planes. - -### Deploy into a service-level control plane - -Package the APIs you build into a Configuration package an deploy it on a -control plane in an Upbound Space. In Upbound, it's common to refer to the -control plane where the Configuration package is deployed as a **service-level -control plane**. This control plane runs the controllers that processes the API -requests and provisions underlying resources. In a later section, you learn how -you can use _Topology_ features to [configure routing][configure-routing]. - -### Deploy as Remote APIs on a platform control plane - -You should use the same package source as deployed in the **service-level -control planes**, but this time deploy the Configuration in a separate control -plane as a _RemoteConfiguration_. The _RemoteConfiguration_ installs Kubernetes -CustomResourceDefinitions for the APIs defined in the Configuration package, but -no controllers get deployed. - -### Install a _RemoteConfiguration_ - -_RemoteConfiguration_ is a resource type available in an Upbound manage control -planes that acts like a sort of Crossplane [Configuration][configuration] -package. Unlike standard Crossplane Configurations, which install XRDs, -compositions, and functions into a desired control plane, _RemoteConfigurations_ -install only the CRDs for claimable composite resource types. - -#### Install directly - -Install a _RemoteConfiguration_ by defining the following and applying it to -your control plane: - -```yaml -apiVersion: pkg.upbound.io/v1alpha1 -kind: RemoteConfiguration -metadata: - name: -spec: - package: -``` - -#### Declare as a project dependency - -You can declare _RemoteConfigurations_ as dependencies in your control plane's -[project file][project-file]. Use the up CLI to add the dependency, providing -the `--remote` flag: - -```tsx live -up dep add --remote -``` - -This command adds a declaration in the `spec.apiDependencies` stanza of your -project's `upbound.yaml` as demonstrated below: - -```yaml -apiVersion: meta.dev.upbound.io/v1alpha1 -kind: Project -metadata: - name: service-controlplane -spec: - apiDependencies: - - configuration: xpkg.upbound.io/upbound/remote-configuration - version: '>=v0.0.0' - dependsOn: - - provider: xpkg.upbound.io/upbound/provider-kubernetes - version: '>=v0.0.0' -``` - -Like a Configuration, a _RemoteConfigurationRevision_ gets created when the -package gets installed on a control plane. Unlike Configurations, XRDs and -compositions **don't** get installed by a _RemoteConfiguration_. Only the CRDs -for claimable composite types get installed and Crossplane thereafter manages -their lifecycle. You can tell when a CRD gets installed by a -_RemoteConfiguration_ because it has the `internal.scheduling.upbound.io/remote: -true` label: - -```yaml -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: things.networking.acme.com - labels: - internal.scheduling.upbound.io/remote: "true" -``` - -## Use an _Environment_ to route resources - -_Environment_ is a resource type available in Upbound control planes that works -in tandem with resources installed by _RemoteConfigurations_. _Environment_ is a -namespace-scoped resource that lets you configure how to route remote resources -to other control planes by a set of user-defined dimensions. - -### Define a routing dimension - -To establish a routing dimensions between two control planes, you must do two -things: - -1. Annotate the service control plane with the name and value of a dimension. -2. Configure an environment on another control plane with a dimension matching the field and value of the service control plane. - -The example below demonstrates the creation of a service control plane with a -`region` dimension: - -```yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - labels: - dimension.scheduling.upbound.io/region: "us-east-1" - name: prod-1 - namespace: default -spec: -``` - -Upbound's Spaces controller keeps an inventory of all declared dimensions and -listens for control planes to route to them. - -### Create an _Environment_ - -Next, create an _Environment_ on a separate control plane, referencing the -dimension from before. The example below demonstrates routing all remote -resource requests in the `default` namespace of the control plane based on a -single `region` dimension: - -```yaml -apiVersion: scheduling.upbound.io/v1alpha1 -kind: Environment -metadata: - name: default - namespace: default -spec: - dimensions: - region: us-east-1 -``` - -You can specify whichever dimensions as you want. The example below demonstrates -multiple dimensions: - -```yaml -apiVersion: scheduling.upbound.io/v1alpha1 -kind: Environment -metadata: - name: default - namespace: default -spec: - dimensions: - region: us-east-1 - env: prod - offering: databases -``` - -In order for the routing controller to match, _all_ dimensions must match for a -given service control plane. - -You can specify dimension overrides on a per-resource group basis. This lets you -configure default routing rules for a given _Environment_ and override routing -on a per-offering basis. - -```yaml -apiVersion: scheduling.upbound.io/v1alpha1 -kind: Environment -metadata: - name: default - namespace: default -spec: - dimensions: - region: us-east-1 - resourceGroups: - - name: database.platform.upbound.io # database - dimensions: - region: "us-east-1" - env: "prod" - offering: "databases" - - name: networking.platform.upbound.io # networks - dimensions: - region: "us-east-1" - env: "prod" - offering: "networks" -``` - -### Confirm the configured route - -After you create an _Environment_ on a control plane, the routes selected get -reported in the _Environment's_ `.status.resourceGroups`. This is illustrated -below: - -```yaml -apiVersion: scheduling.upbound.io/v1alpha1 -kind: Environment -metadata: - name: default -... -status: - resourceGroups: - - name: database.platform.upbound.io # database - proposed: - controlPlane: ctp-1 - group: default - space: upbound-gcp-us-central1 - dimensions: - region: "us-east-1" - env: "prod" - offering: "databases" -``` - -If you don't see a response in the `.status.resourceGroups`, this indicates a -match wasn't found or an error establishing routing occurred. - -:::tip -There's no limit to the number of control planes you can route to. You can also -stack routing and form your own topology of control planes, with multiple layers -of routing. -::: - -### Limitations - - -Routing from one control plane to another is currently scoped to control planes -that exist in a single Space. You can't route resource requests to control -planes that exist on a cross-Space boundary. - - -[project-file]: /manuals/cli/howtos/project -[contact-us]: https://www.upbound.io/usage/support/contact -[crossplane-management-policies]: https://docs.crossplane.io/latest/managed-resources/managed-resources/#managementpolicies -[vscode-pretty-json]: https://marketplace.visualstudio.com/items?itemName=chrismeyers.vscode-pretty-json -[configure-routing]: #use-an-environment-to-route-resources -[configuration]: https://docs.crossplane.io/latest/packages/providers diff --git a/spaces_versioned_docs/version-v1.14/howtos/ctp-connector.md b/spaces_versioned_docs/version-v1.14/howtos/ctp-connector.md deleted file mode 100644 index b2cc48c49..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/ctp-connector.md +++ /dev/null @@ -1,508 +0,0 @@ ---- -title: Control Plane Connector -weight: 80 -description: A guide for how to connect a Kubernetes app cluster to a control plane in Upbound using the Control Plane connector feature -plan: "standard" ---- - - - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). Control Plane Connector functionality is consistent across versions. - -For related API specifications and resources, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: - -Control Plane Connector connects arbitrary Kubernetes application clusters outside the -Upbound Spaces to your control planes running in Upbound Spaces. -This lets you interact with your control plane's API from the app cluster. The claim APIs and the namespaced XR APIs -you define via CompositeResourceDefinitions (XRDs) in the control plane, are available in -your app cluster alongside Kubernetes workload APIs like Pod. Control Plane Connector -enables the same experience as a locally installed Crossplane. - -![control plane connector operations flow](/img/ConnectorFlow.png) - -### Control Plane Connector operations - -Control Plane Connector leverages the [Kubernetes API AggregationLayer][kubernetes-api-aggregationlayer] -to create an extension API server and serve the claim APIs and the namespaced XR APIs in the control plane. It -discovers the claim APIs and the namespaced XR APIs available in the control plane and registers corresponding -APIService resources on the app cluster. Those APIService resources refer to the -extension API server of Control Plane Connector. - -The claim APIs and the namespaced XR APIs are available in your Kubernetes cluster, just like all native -Kubernetes APIs. - -The Control Plane Connector processes every request targeting the claim APIs and the namespaced XR APIs and makes the -relevant requests to the connected control plane. - -Only the connected control plane stores and processes all claims and namespaced XRs created in the app -cluster, eliminating any storage use at the application cluster. The control plane -connector provisions a target namespace at the control plane for the app cluster and stores -all claims and namespaced XRs in this target namespace. - -For managing the claims and namespaced XRs, the Control Plane Connector creates a unique identifier for a -resource by combining input parameters from claims, including: -- `metadata.name` -- `metadata.namespace` -- `your cluster name` - - -It employs SHA-256 hashing to generate a hash value and then extracts the first -16 characters of that hash. This ensures the resulting identifier remains within -the 64-character limit in Kubernetes. - - - -For instance, if a claim named `my-bucket` exists in the test namespace in -`cluster-dev`, the system calculates the SHA-256 hash from -`my-bucket-x-test-x-00000000-0000-0000-0000-000000000000` and takes the first 16 -characters. The control plane side then names the claim `claim-c603e518969b413e`. - -For namespaced XRs, the process is similar, only the prefix is different. -The name becomes `nxr-c603e518969b413e`. - - -### Installation - - - - - -Log in with the up CLI: - -```bash -up login -``` - -Connect your app cluster to a namespace in an Upbound control plane with `up controlplane connector install `. This command creates a user token and installs the Control Plane Connector to your cluster. It's recommended you create a values file called `connector-values.yaml` and provide the following below. Select the tab according to which environment your control plane is running in. - - - - - - -```yaml -upbound: - # This is your org account in Upbound e.g. the name displayed after executing `up org list` - account: - # This is a personal access token generated in the Upbound Console - token: - -spaces: - # If your control plane is running in Upbound's GCP Cloud Space, else use upbound-aws-us-east-1.spaces.upbound.io - host: "upbound-gcp-us-west-1.spaces.upbound.io" - insecureSkipTLSVerify: true - controlPlane: - # The name of the control plane you want the Connector to attach to - name: - # The control plane group the control plane resides in - group: - # The namespace within the control plane to sync claims from the app cluster to. NOTE: This must be created before you install the connector. - claimNamespace: -``` - - - - - -1. Create a [kubeconfig][kubeconfig] for the control plane. Update your Upbound context to the path for your desired control plane. -```ini -up login -up ctx /upbound-gcp-us-central-1/default/your-control-plane -up ctx . -f - > context.yaml -``` - -2. Write it to a secret in the cluster where you plan to -install the Control Plane Connector to. -```ini -kubectl create secret generic my-controlplane-kubeconfig --from-file=context.yaml -``` - -3. Reference this secret in the -`spaces.controlPlane.kubeconfigSecret` field below. - -```yaml -spaces: - controlPlane: - # The namespace within the control plane to sync claims from the app cluster to. NOTE: This must be created before you install the connector. - claimNamespace: - kubeconfigSecret: - name: my-controlplane-kubeconfig - key: kubeconfig -``` - - - - - - -Provide the values file above when you run the CLI command: - - -```bash {copy-lines="3"} -up controlplane connector install my-control-plane my-app-ns-1 --file=connector-values.yaml -``` - -The Claim APIs and the namespaced XR APIs from your control plane are now visible in the cluster. -You can verify this with `kubectl api-resources`. - -```bash -kubectl api-resources -``` - -### Uninstall - -Disconnect an app cluster that you prior installed the Control Plane Connector on by -running the following: - -```bash -up ctp connector uninstall -``` - -This command uninstalls the helm chart for the Control Plane Connector from an app -cluster. It moves any claims in the app cluster into the control plane -at the specified namespace. - -:::tip -Make sure your kubeconfig's current context is pointed at the app cluster where -you want to uninstall Control Plane Connector from. -::: - - - - -It's recommended you create a values file called `connector-values.yaml` and -provide the following below. Select the tab according to which environment your -control plane is running in. - - - - - - -```yaml -upbound: - # This is your org account in Upbound e.g. the name displayed after executing `up org list` - account: - # This is a personal access token generated in the Upbound Console - token: - -spaces: - # Upbound GCP US-West-1 upbound-gcp-us-west-1.spaces.upbound.io - # Upbound AWS US-East-1 upbound-aws-us-east-1.spaces.upbound.io - # Upbound GCP US-Central-1 upbound-gcp-us-central-1.spaces.upbound.io - host: "" - insecureSkipTLSVerify: true - controlPlane: - # The name of the control plane you want the Connector to attach to - name: - # The control plane group the control plane resides in - group: - # The namespace within the control plane to sync claims from the app cluster to. - # NOTE: This must be created before you install the connector. - claimNamespace: -``` - - - - -Create a [kubeconfig][kubeconfig-1] for the -control plane. Write it to a secret in the cluster where you plan to -install the Control Plane Connector to. Reference this secret in the -`spaces.controlPlane.kubeconfigSecret` field below. - -```yaml -spaces: - controlPlane: - # The namespace within the control plane to sync claims from the app cluster to. NOTE: This must be created before you install the connector. - claimNamespace: - kubeconfigSecret: - name: my-controlplane-kubeconfig - key: kubeconfig -``` - - - - - - -Provide the values file above when you `helm install` the Control Plane Connector: - - -```bash -helm install --wait mcp-connector oci://xpkg.upbound.io/spaces-artifacts/mcp-connector -n kube-system -f connector-values.yaml -``` -:::tip -Create an API token from the Upbound user account settings page in the console by following [these instructions][these-instructions]. -::: - -### Uninstall - -You can uninstall Control Plane Connector with Helm by running the following: - -```bash -helm uninstall mcp-connector -``` - - - - - -### Example usage - -This example creates a control plane using [Configuration -EKS][configuration-eks]. `KubernetesCluster` is -available as a claim API in your control plane. The following is [an -example][an-example] -object you can create in your control plane. - -```yaml -apiVersion: k8s.starter.org/v1alpha1 -kind: KubernetesCluster -metadata: - name: my-cluster - namespace: default -spec: - id: my-cluster - parameters: - nodes: - count: 3 - size: small - services: - operators: - prometheus: - version: "34.5.1" - writeConnectionSecretToRef: - name: my-cluster-kubeconfig -``` - -After connecting your Kubernetes app cluster to the control plane, you -can create the `KubernetesCluster` object in your app cluster. Although your -local cluster has an Object, the actual resources is in your managed control -plane inside Upbound. - -```bash {copy-lines="3"} -# Applying the claim YAML above. -# kubectl is set up to talk with your Kubernetes cluster. -kubectl apply -f claim.yaml - - -kubectl get claim -A -NAME SYNCED READY CONNECTION-SECRET AGE -my-cluster True True my-cluster-kubeconfig 2m -``` - -Once Kubernetes creates the object, view the console to see your object. - -![Claim by connector in console](/img/ClaimInConsole.png) - -You can interact with the object through your cluster just as if it -lives in your cluster. - -### Migration to control planes - -This guide details the migration of a Crossplane installation to Upbound-managed -control planes using the Control Plane Connector to manage claims on an application -cluster. - -![migration flow application cluster to control plane](/img/ConnectorMigration.png) - -#### Export all resources - -Before proceeding, ensure that you have set the correct kubecontext for your application -cluster. - -```bash -up controlplane migration export --pause-before-export --output=my-export.tar.gz --yes -``` - -This command performs the following: -- Pauses all claim, composite, and managed resources before export. -- Scans the control plane for resource types. -- Exports Crossplane and native resources. -- Archives the exported state into `my-export.tar.gz`. - -Example output: -```bash -Exporting control plane state... - ✓ Pausing all claim resources before export... 1 resources paused! ⏸️ - ✓ Pausing all composite resources before export... 7 resources paused! ⏸️ - ✓ Pausing all managed resources before export... 34 resources paused! ⏸️ - ✓ Scanning control plane for types to export... 231 types found! 👀 - ✓ Exporting 231 Crossplane resources...125 resources exported! 📤 - ✓ Exporting 3 native resources...19 resources exported! 📤 ✓ Archiving exported state... archived to "my-export.tar.gz"! 📦 - -Successfully exported control plane state! -``` - -#### Import all resources - -The system restores the target control plane with the exported -resources, which serves as the destination for the Control Plane Connector. - - -Log into Upbound and select the correct context: - -```bash -up login -up ctx -up ctp create ctp-a -``` - -Output: -```bash -ctp-a created -``` - -Verify that the Crossplane version on both the application cluster and the new managed -control plane matches the core Crossplane version. - -Use the following command to import the resources: -```bash -up controlplane migration import -i my-export.tar.gz \ - --unpause-after-import \ - --mcp-connector-cluster-id=my-appcluster \ - --mcp-connector-claim-namespace=my-appcluster -``` - -This command: -- Note: `--mcp-connector-cluster-id` needs to be unique per application cluster -- Note: `--mcp-connector-claim-namespace` is the namespace the system creates - during the import -- Restores base resources -- Waits for XRDs and packages to establish -- Imports Claims, XRs resources -- Finalizes the import and resumes managed resources - -Example output: -```bash -Importing control plane state... - ✓ Reading state from the archive... Done! 👀 - ✓ Importing base resources... 56 resources imported!📥 - ✓ Waiting for XRDs... Established! ⏳ - ✓ Waiting for Packages... Installed and Healthy! ⏳ - ✓ Importing remaining resources... 88 resources imported! 📥 - ✓ Finalizing import... Done! 🎉 - ✓ Unpausing managed resources ... Done! ▶️ - -fully imported control plane state! -``` - -Verify Imported Claims - - -The Control Plane Connector renames all claims and adds additional labels to them. - -```bash -kubectl get claim -A -``` - -Example output: -```bash -NAMESPACE NAME SYNCED READY CONNECTION-SECRET AGE -my-appcluster cluster.aws.platformref.upbound.io/claim-e708ff592b974f51 True True platform-ref-aws-kubeconfig 3m17s -``` - -Inspect the labels: -```bash -kubectl get -n my-appcluster cluster.aws.platformref.upbound.io/claim-e708ff592b974f51 -o yaml | yq .metadata.labels -``` - -Example output: -```bash -mcp-connector.upbound.io/app-cluster: my-appcluster -mcp-connector.upbound.io/app-namespace: default -mcp-connector.upbound.io/app-resource-name: example -``` - -#### Cleanup the app cluster - -Remove all Crossplane-related resources from the application cluster, including: - -- Managed Resources -- Claims -- Compositions -- XRDs -- Packages (Functions, Configurations, Providers) -- Crossplane and all associated CRDs - - -#### Install Control Plane Connector - - -Follow the preceding installation guide and configure the `connector-values.yaml`: - -```yaml -# NOTE: clusterID needs to match --mcp-connector-cluster-id used in the import on the managed control Plane -clusterID: my-appcluster -upbound: - account: - token: - -spaces: - host: "" - insecureSkipTLSVerify: true - controlPlane: - name: - group: - # NOTE: This is the --mcp-connector-claim-namespace used during the import to the control plane - claimNamespace: -``` -Once the Control Plane Connector installs, verify that resources exist in the application -cluster: - -```bash -kubectl api-resources | grep platform -``` - -Example output: -```bash -awslbcontrollers aws.platform.upbound.io/v1alpha1 true AWSLBController -podidentities aws.platform.upbound.io/v1alpha1 true PodIdentity -sqlinstances aws.platform.upbound.io/v1alpha1 true SQLInstance -clusters aws.platformref.upbound.io/v1alpha1 true Cluster -osss observe.platform.upbound.io/v1alpha1 true Oss -apps platform.upbound.io/v1alpha1 true App -``` - -Restore claims from the control plane to the application cluster: - -```bash -kubectl get claim -A -``` - -Example output: -```bash -NAMESPACE NAME SYNCED READY CONNECTION-SECRET AGE -default cluster.aws.platformref.upbound.io/example True True platform-ref-aws-kubeconfig 127m -``` - -With this guide, you migrated your Crossplane installation to -Upbound-control planes. This ensures seamless integration with your -application cluster using the Control Plane Connector. - -### Connect multiple app clusters to a control plane - -Claims are store in a unique namespace in the Upbound control plane. -Every cluster creates a new control plane namespace. - -![Multi-cluster architecture with control plane connector](/img/ConnectorMulticlusterArch.png) - -There's no limit on the number of clusters connected to a single control plane. -Control plane operators can see all their infrastructure in a central control -plane. - -Without using control planes and Control Plane Connector, users have to install -Crossplane and providers for cluster. Each cluster requires configuration for -providers with necessary credentials. With a single control plane where multiple -clusters connected through Upbound tokens, you don't need to give out any cloud -credentials to the clusters. - - -[kubeconfig]: /manuals/cli/howtos/context-config/#generate-a-kubeconfig-for-a-control-plane-in-a-group -[kubeconfig-1]:/spaces/concepts/control-planes/#connect-directly-to-your-control-plane -[these-instructions]:/manuals/console/#create-a-personal-access-token -[kubernetes-api-aggregationlayer]: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/ -[configuration-eks]: https://github.com/upbound/configuration-eks -[an-example]: https://github.com/upbound/configuration-eks/blob/9f86b6d/.up/examples/cluster.yaml diff --git a/spaces_versioned_docs/version-v1.14/howtos/debugging-a-ctp.md b/spaces_versioned_docs/version-v1.14/howtos/debugging-a-ctp.md deleted file mode 100644 index 521271e40..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/debugging-a-ctp.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -title: Debugging issues on a control plane -sidebar_position: 70 -description: A guide for how to debug resources on a control plane running in Upbound. ---- - -This guide provides troubleshooting guidance for how to identify and fix issues on a control plane. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). Debugging methods are consistent across versions. - -For related control plane specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version-specific features, see the . -::: - -## Start from Upbound Console - - -The Upbound [Console][console] has a built-in control plane explorer experience -that surfaces status and events for the resources on your control plane. The -explorer is claim-based. Resources in this view exist only if they exist in the -reference chain originating from a claim. This view is a helpful starting point -if you are attempting to debug an issue originating from a claim. - -:::tip -If you directly create Crossplane Managed Resources (`MR`s) or Composite -Resources (`XR`s), they won't render in the explorer. -::: - -### Example - -The example below uses the control plane explorer view to inspect why a claim for an EKS Cluster isn't healthy. - -#### Check the health status of claims - -From the API type card, two claims branching from of it: one shows a healthy green icon, while the other shows an unhealthy red icon. - -![Use control plane explorer view to see status of claims](/img/debug-overview.png) - -Select `More details` on the unhealthy claim card and Upbound shows details for the claim. - -![Use control plane explorer view to see details of claims](/img/debug-claim-more-details.png) - -Looking at the three events for this claim: - -- **ConfigureCompositeResource**: this event indicates Upbound created the claimed Composite Resource (`XR`). - -- **BindCompositeResource**: this indicates the Composite Resource (`XR`) that's being "claimed" isn't ready yet. A claim doesn't show `HEALTHY` until the XR it references is ready. - -- **ConfigureCompositeResource**: the error saying, `cannot apply composite resource...the object has been modified; please apply your changes to the latest version and try again` is a generic event from Crossplane resources. It's safe to ignore this error. - -Next, look at the `status` field of the rendered YAML for the resource. - -![Use control plane explorer view to see status details of claims](/img/debug-claim-status.png) - -The status reports a similar message as the event stream: this claim is waiting for a Composite Resource to be ready. Based on this, investigate the Composite Resource referenced by this claim next. - -#### Check the health status of the Composite Resource - - -The control plane explorer only shows the claim cards by default. Selecting the claim card renders the rest of the Crossplane resource tree associated with the selected claim. - - -The previous claim expands into this screenshot: - -![Use control plane explorer view to expand tree of claim](/img/debug-claim-expansion.png) - -This renders the XR referenced by the claim (along with all its references). You can see the XR is showing the same unhealthy status icon in its card. Notice the XR has itself two nested XRs. One of the nested XRs shows a healthy green icon on its card, while the other shows an unhealthy red icon. Like the claim, a Composite Resource doesn't show healthy until all referenced resources also show healthy. - -#### Inspecting Managed Resources - -Select `more details` to inspect one of the unhealthy Managed Resources shows the following: - -![Use control plane explorer view to view events for an MR](/img/debug-mr-event.png) - -This event reveals it's unhealthy because it's waiting on a reference to another Managed Resource. Searching the rendered YAML of the MR for this resource shows the following: - -![Use control plane explorer view to view status for an MR](/img/debug-mr-status.png) - -The rendered YAML shows this MR is referencing a sibling MR that shares the same controller. The same parent XR created both of these managed resources. Inspect the sibling MR to see what its status is. - -![Use control plane explorer view to view status for a sibling MR](/img/debug-mr-dependency-status.png) - -The sibling MR event stream shows the Provider processed the resource create request. Ignore the `CannotInitalizeManagedResrouce` event. EKS clusters can take 15 minutes or more to provision in AWS. The root cause is everything is fine -- all the resources are still provisioning. Waiting longer and then looking at the control plane explorer again, shows all resources are healthy. reference, below is an example status field for a resource that's healthy and provisioned. - -```yaml -... -status: - atProvider: - id: team-b-app-cluster-bhwfb-hwtgs-20230403135452772300000008 - conditions: - - lastTransitionTime: '2023-04-03T13:56:35Z' - reason: Available - status: 'True' - type: Ready - - lastTransitionTime: '2023-04-03T13:54:02Z' - reason: ReconcileSuccess - status: 'True' - type: Synced - - lastTransitionTime: '2023-04-03T13:54:53Z' - reason: Success - status: 'True' - type: LastAsyncOperation - - lastTransitionTime: '2023-04-03T13:54:53Z' - reason: Finished - status: 'True' - type: AsyncOperation -``` - -### Control plane explorer limitations - -The control plane explorer view is currently designed around claims (`XC`s). The control plane explorer doesn't inspect other Crossplane resources. To inspect other Crossplane resources, use the `up` CLI. - -Some examples of Crossplane resources that require the `up` CLI - -- Managed Resources that aren't associated with a claim -- Composite Resources that aren't associated with a claim -- The status of _deleting_ resources -- ProviderConfigs -- Provider events - -## Use direct CLI access - -If your preference is to use a terminal instead of a GUI, Upbound supports direct access to the API server of the control plane. Use [`up ctx`][up-ctx] to connect directly to your control plane. - - -[console]: /manuals/console/upbound-console -[up-ctx]: /reference/cli-reference diff --git a/spaces_versioned_docs/version-v1.14/howtos/managed-service.md b/spaces_versioned_docs/version-v1.14/howtos/managed-service.md deleted file mode 100644 index 40b983a76..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/managed-service.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: Managed Upbound control planes -description: "Learn about the managed service capabilities of a Space" -sidebar_position: 10 ---- - -Control planes in Upbound are fully isolated [Upbound Crossplane][uxp] instances -that Upbound manages for you. This means: - -- the underlying lifecycle of infrastructure (compute, memory, and storage) required to power your instance. -- scaling of the infrastructure. -- the maintenance of the core Upbound Crossplane components that make up a control plane. - -This lets users focus on building their APIs and operating their control planes, -while Upbound handles the rest. Each control plane has its own dedicated API -server connecting users to their control plane. - -## Learn about Upbound control planes - -Read the [concept][ctp-concept] documentation to learn about Upbound control planes. - -[uxp]: /manuals/uxp/overview -[ctp-concept]: /spaces/concepts/control-planes \ No newline at end of file diff --git a/spaces_versioned_docs/version-v1.14/howtos/mcp-connector-guide.md b/spaces_versioned_docs/version-v1.14/howtos/mcp-connector-guide.md deleted file mode 100644 index 8a3866d07..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/mcp-connector-guide.md +++ /dev/null @@ -1,169 +0,0 @@ ---- -title: Consume control plane APIs in an app cluster with control plane connector -sidebar_position: 99 -description: A tutorial to configure a Space with Argo to declaratively create and - manage control planes ---- - -In this tutorial, you learn how to configure a Kubernetes app cluster to communicate with a control plane in an Upbound self-hosted Space. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Control plane connector functionality is consistent across versions. - -For related API specifications and resources, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . -::: - -The [control plane connector][control-plane-connector] bridges your Kubernetes application clusters---running outside of Upbound--to your control planes running in Upbound. This allows you to interact with your control plane's API right from the app cluster. The claim APIs you define via `CompositeResourceDefinitions` are available alongside Kubernetes workload APIs like `Pod`. In effect, control plane connector provides the same experience as a locally installed Crossplane. - -## Prerequisites - -To complete this tutorial, you need the following: - -- Have already deployed an Upbound Space. -- Have already deployed an Kubernetes cluster (referred to as `app cluster`). - -## Create a control plane - -Create a new control plane in your self-hosted Space. Run the following command in a terminal: - -```bash -up ctp create my-control-plane -``` - -Once the control plane is ready, connect to it. - -```bash -up ctp connect my-control-plane -``` - -For convenience, install a an Upbound [platform reference Configuration][platform-reference-configuration] from the marketplace. production scenarios, replace this with your own Crossplane Configurations or compositions. - -```bash -up ctp configuration install xpkg.upbound.io/upbound/platform-ref-aws:v1.4.0 -``` - -## Fetch the control plane's connection details - -Run the following command in a terminal: - -```shell -kubectl get secret kubeconfig-my-control-plane -n default -o jsonpath='{.data.kubeconfig}' | base64 -d > kubeconfig-my-control-plane.yaml -``` - -This command saves the kubeconfig for the control plane to a file in your working directory. - -## Install control plane connector in your app cluster - -Switch contexts to your Kubernetes app cluster. To install the control plane connector in your app cluster, you must first provide a secret containing your control plane's kubeconfig at install-time. Run the following command in a terminal: - -:::important -Make sure the following commands are executed against your **app cluster**, not your control plane. -::: - -```bash -kubectl create secret generic kubeconfig-my-control-plane -n kube-system --from-file=kubeconfig=./kubeconfig-my-control-plane.yaml -``` - -Set the environment variable below to configure which namespace _in your control plane_ you wish to sync the app cluster's claims to. - -```shell -export CONNECTOR_CTP_NAMESPACE=app-cluster-1 -``` - -Install the Control Plane Connector in the app cluster and point it to your control plane. - -```bash -up ctp connector install my-control-plane $CONNECTOR_CTP_NAMESPACE --control-plane-secret=kubeconfig-my-control-plane -``` - -## Inspect your app cluster - -After you install Control Plane Connector in the app cluster, you can now see APIs which live on the control plane. You can confirm this is the case by running the following command on your app cluster: - -```bash {copy-lines="1"} -kubectl api-resources | grep upbound - -# The output should look like this: -sqlinstances aws.platform.upbound.io/v1alpha1 true SQLInstance -clusters aws.platformref.upbound.io/v1alpha1 true Cluster -osss observe.platform.upbound.io/v1alpha1 true Oss -apps platform.upbound.io/v1alpha1 true App -``` - -## Claim a database instance on your app cluster - -Create a database claim against the `SQLInstance` API and observe resources get created by your control plane. Apply the following resources to your app cluster: - -```yaml -cat < --output - ``` - - The command exports your existing Crossplane control plane configuration/state into an archive file. - -::: note -By default, the export command doesn't make any changes to your existing Crossplane control plane state, leaving it intact. Use the `--pause-before-export` flag to pause the reconciliation on managed resources before exporting the archive file. - -This safety mechanism ensures the control plane you migrate state to doesn't assume ownership of resources before you're ready. -::: - -2. Use the control plane [create command][create-command] to create a managed -control plane in Upbound: - - ```bash - up controlplane create my-controlplane - ``` - -3. Use [`up ctx`][up-ctx] to connect to the control plane created in the previous step: - - ```bash - up ctx "///my-controlplane" - ``` - - The command configures your local `kubeconfig` to connect to the control plane. - -4. Run the following command to import the archive file into the control plane: - - ```bash - up controlplane migration import --input - ``` - -:::note -By default, the import command leaves the control plane in an inactive state by pausing the reconciliation on managed -resources. This pause gives you an opportunity to review the imported configuration/state before activating the control plane. -Use the `--unpause-after-import` flag to change the default behavior and activate the control plane immediately after -importing the archive file. -::: - - - -5. Review and validate the imported configuration/state. When you are ready, activate your managed - control plane by running the following command: - - ```bash - kubectl annotate managed --all crossplane.io/paused- - ``` - - At this point, you can delete the source Crossplane control plane. - -## CLI options - -### Filtering - -The migration tool captures the state of a Control Plane. The only filtering -supported is Kubernetes namespace and Kubernetes resource Type filtering. - -You can exclude namespaces using the `--exclude-namespaces` CLI option. This can prevent the CLI from including unwanted resources in the export. - -```bash ---exclude-namespaces=kube-system,kube-public,kube-node-lease,local-path-storage,... - -# A list of specific namespaces to exclude from the export. Defaults to 'kube-system', 'kube-public','kube-node-lease', and 'local-path-storage'. -``` - -You can exclude Kubernetes Resource types by using the `--exclude-resources` CLI option: - -```bash ---exclude-resources=EXCLUDE-RESOURCES,... - -# A list of resource types to exclude from the export in "resource.group" format. No resources are excluded by default. -``` - -For example, here's an example for excluding the CRDs installed by Crossplane functions (since they're not needed): - -```bash -up controlplane migration export \ - --exclude-resources=gotemplates.gotemplating.fn.crossplane.io,kclinputs.template.fn.crossplane.io -``` - -:::warning -You must specify resource names in lowercase "resource.group" format (for example, `gotemplates.gotemplating.fn.crossplane.io`). Using only the resource kind (for example, `GoTemplate`) isn't supported. -::: - - -:::tip Function Input CRDs - -Exclude function input CRDs (`inputs.template.fn.crossplane.io`, `resources.pt.fn.crossplane.io`, `gotemplates.gotemplating.fn.crossplane.io`, `kclinputs.template.fn.crossplane.io`) from migration exports. Upbound automatically recreates these resources during import. Function input CRDs typically have owner references to function packages and may have restricted RBAC access. Upbound installs these CRDs during the import when function packages are restored. - -::: - - -After export, users can also change the archive file to only include necessary resources. - -### Export non-Crossplane resources - -Use the `--include-extra-resources=` CLI option to select other CRD types to include in the export. - -### Set the kubecontext - -Currently `--context` isn't supported in the migration CLI. You should be able to use the `--kubeconfig` CLI option to use a file that's set to the correct context. example: - -```bash -up controlplane migration export --kubeconfig -``` - -Use this in tandem with `up ctx` to export a control plane's kubeconfig: - -```bash -up ctx --kubeconfig ~/.kube/config - -# To list the current contet -up ctx . --kubeconfig ~/.kube/config -``` - -## Export archive - -The migration CLI exports an archive upon successful completion. Below is an example export of a control plane that excludes several CRD types and skips the confirmation prompt. A file gets written to the working directory, unless you select another output file: - -
- -View the example export - -```bash -$ up controlplane migration export --exclude-resources=gotemplates.gotemplating.fn.crossplane.io,kclinputs.template.fn.crossplane.io --yes -Exporting control plane state... -✓ Scanning control plane for types to export... 121 types found! 👀 -✓ Exporting 121 Crossplane resources...60 resources exported! 📤 -✓ Exporting 3 native resources...8 resources exported! 📤 -✓ Archiving exported state... archived to "xp-state.tar.gz"! 📦 -``` - -
- - -When an export occurs, a file named `xp-state.tar.gz` by default gets created in the working directory. You can unzip the file and all the contents of the export are all text YAML files. - -- Each CRD (for example `vpcs.ec2.aws.upbound.io`) gets its own directory -which contains: - - A `metadata.yaml` file that contains Kubernetes Object Metadata - - A list of Kubernetes Categories the resource belongs to -- A `cluster` directory that contains YAML manifests for all resources provisioned -using the CRD. - -Sample contents for a Cluster with a single `XNetwork` Composite from -[configuration-aws-network][configuration-aws-network] is show below: - - -
- -View the example cluster content - -```bash -├── compositionrevisions.apiextensions.crossplane.io -│ ├── cluster -│ │ ├── kcl.xnetworks.aws.platform.upbound.io-4ca6a8a.yaml -│ │ └── xnetworks.aws.platform.upbound.io-9859a34.yaml -│ └── metadata.yaml -├── configurations.pkg.crossplane.io -│ ├── cluster -│ │ └── configuration-aws-network.yaml -│ └── metadata.yaml -├── deploymentruntimeconfigs.pkg.crossplane.io -│ ├── cluster -│ │ └── default.yaml -│ └── metadata.yaml -├── export.yaml -├── functions.pkg.crossplane.io -│ ├── cluster -│ │ ├── crossplane-contrib-function-auto-ready.yaml -│ │ ├── crossplane-contrib-function-go-templating.yaml -│ │ └── crossplane-contrib-function-kcl.yaml -│ └── metadata.yaml -├── internetgateways.ec2.aws.upbound.io -│ ├── cluster -│ │ └── borrelli-backup-test-xgl4q.yaml -│ └── metadata.yaml -├── mainroutetableassociations.ec2.aws.upbound.io -│ ├── cluster -│ │ └── borrelli-backup-test-t2qh7.yaml -│ └── metadata.yaml -├── namespaces -│ └── cluster -│ ├── crossplane-system.yaml -│ ├── default.yaml -│ └── upbound-system.yaml -├── providerconfigs.aws.upbound.io -│ ├── cluster -│ │ └── default.yaml -│ └── metadata.yaml -├── providerconfigusages.aws.upbound.io -│ ├── cluster -│ │ ├── 0a2a3ec6-ef13-45f9-9cf0-63af7f4a6b6b.yaml -...redacted -│ │ └── f7092b0f-3a78-4bfe-82c8-57e5085a9b11.yaml -│ └── metadata.yaml -├── providers.pkg.crossplane.io -│ ├── cluster -│ │ ├── upbound-provider-aws-ec2.yaml -│ │ └── upbound-provider-family-aws.yaml -│ └── metadata.yaml -├── routes.ec2.aws.upbound.io -│ ├── cluster -│ │ └── borrelli-backup-test-dt9cj.yaml -│ └── metadata.yaml -├── routetableassociations.ec2.aws.upbound.io -│ ├── cluster -│ │ ├── borrelli-backup-test-mr2sd.yaml -│ │ ├── borrelli-backup-test-ngq5h.yaml -│ │ ├── borrelli-backup-test-nrkgg.yaml -│ │ └── borrelli-backup-test-wq752.yaml -│ └── metadata.yaml -├── routetables.ec2.aws.upbound.io -│ ├── cluster -│ │ └── borrelli-backup-test-dv4mb.yaml -│ └── metadata.yaml -├── secrets -│ └── namespaces -│ ├── crossplane-system -│ │ ├── cert-token-signing-gateway-pub.yaml -│ │ ├── mxp-hostcluster-certs.yaml -│ │ ├── package-pull-secret.yaml -│ │ └── xgql-tls.yaml -│ └── upbound-system -│ └── aws-creds.yaml -├── securitygrouprules.ec2.aws.upbound.io -│ ├── cluster -│ │ ├── borrelli-backup-test-472f4.yaml -│ │ └── borrelli-backup-test-qftmw.yaml -│ └── metadata.yaml -├── securitygroups.ec2.aws.upbound.io -│ ├── cluster -│ │ └── borrelli-backup-test-w5jch.yaml -│ └── metadata.yaml -├── storeconfigs.secrets.crossplane.io -│ ├── cluster -│ │ └── default.yaml -│ └── metadata.yaml -├── subnets.ec2.aws.upbound.io -│ ├── cluster -│ │ ├── borrelli-backup-test-8btj6.yaml -│ │ ├── borrelli-backup-test-gbmrm.yaml -│ │ ├── borrelli-backup-test-m7kh7.yaml -│ │ └── borrelli-backup-test-nttt5.yaml -│ └── metadata.yaml -├── vpcs.ec2.aws.upbound.io -│ ├── cluster -│ │ └── borrelli-backup-test-7hwgh.yaml -│ └── metadata.yaml -└── xnetworks.aws.platform.upbound.io -├── cluster -│ └── borrelli-backup-test.yaml -└── metadata.yaml -43 directories, 87 files -``` - -
- - -The `export.yaml` file contains metadata about the export, including the configuration of the export, Crossplane information, and what's included in the export bundle. - -
- -View the export - -```yaml -version: v1alpha1 -exportedAt: 2025-01-06T17:39:53.173222Z -options: - excludedNamespaces: - - kube-system - - kube-public - - kube-node-lease - - local-path-storage - includedResources: - - namespaces - - configmaps - - secrets - excludedResources: - - gotemplates.gotemplating.fn.crossplane.io - - kclinputs.template.fn.crossplane.io -crossplane: - distribution: universal-crossplane - namespace: crossplane-system - version: 1.17.3-up.1 - featureFlags: - - --enable-provider-identity - - --enable-environment-configs - - --enable-composition-functions - - --enable-usages -stats: - total: 68 - nativeResources: - configmaps: 0 - namespaces: 3 - secrets: 5 - customResources: - amicopies.ec2.aws.upbound.io: 0 - amilaunchpermissions.ec2.aws.upbound.io: 0 - amis.ec2.aws.upbound.io: 0 - availabilityzonegroups.ec2.aws.upbound.io: 0 - capacityreservations.ec2.aws.upbound.io: 0 - carriergateways.ec2.aws.upbound.io: 0 - compositeresourcedefinitions.apiextensions.crossplane.io: 0 - compositionrevisions.apiextensions.crossplane.io: 2 - compositions.apiextensions.crossplane.io: 0 - configurationrevisions.pkg.crossplane.io: 0 - configurations.pkg.crossplane.io: 1 -...redacted -``` - -
- -### Skipped resources - -Along with to the resources excluded via CLI options, the following resources aren't -included in the backup: - -- The `kube-root-ca.crt` ConfigMap, since this is cluster-specific -- Resources directly managed via Helm (ArgoCD's helm implementation, which templates -Helm resources and then applies them, get included in the backup). The migration creates the exclusion list by looking for: - - Any Resource with the label `"app.kubernetes.io/managed-by" == "Helm"` - - Kubernetes Secrets with the label prefix `helm.sh/release`. example, `helm.sh/release.v1` -- Resources installed via a Crossplane package. These have an `ownerReference` with -a prefix `pkg.crossplane.io`. The expectation is that during import, the Crossplane Package Manager bears responsibility for installing the resources. -- Crossplane Locks: Any `Lock.pkg.crossplane.io` resource isn't included in the -export. - -## Restore - -The following is an example of a successful import run. At the end of the import, all Managed Resources are in a paused state. - -
- -View the migration import - -```bash -$ up controlplane migration import -Importing control plane state... -✓ Reading state from the archive... Done! 👀 -✓ Importing base resources... 18 resources imported! 📥 -✓ Waiting for XRDs... Established! ⏳ -✓ Waiting for Packages... Installed and Healthy! ⏳ -✓ Importing remaining resources... 50 resources imported! 📥 -✓ Finalizing import... Done! 🎉 -``` - -
- -Your scenario may involve migrating resources which already exist through other automation on the platform. When executing an import in these circumstances, the importer applies the new manifests to the cluster. If the resource already exists, the restore sets fields to what's in the backup. - -The importer restores all resources in the export archive. Managed Resources get imported with the `crossplane.io/paused: "true"` annotation set. Use the `--unpause-after-import` CLI argument to automatically un-pause resources that got -paused during backup, or remove the annotation manually. - -### Restore order - -The importer restores based on Kubernetes types. The restore order doesn't include parent/child relationships. - -Because Crossplane Composites create new Managed Resources if not present on the cluster, all -Claims, Composites and Managed Resources get imported in a paused state. You can un-pause them after the restore completes. - -The first step of import is installing Base Resources into the cluster. These resources (such has -packages and XRDs) must be ready before proceeding with the import. -Base Resources are: - -- Kubernetes Resources - - ConfigMaps - - Namespaces - - Secrets -- Crossplane Resources - - ControllerConfigs: `controllerconfigs.pkg.crossplane.io` - - DeploymentRuntimeConfigs: `deploymentruntimeconfigs.pkg.crossplane.io` - - StoreConfigs: `storeconfigs.secrets.crossplane.io` -- Crossplane Packages - - Providers: `providers.pkg.crossplane.io` - - Functions: `functions.pkg.crossplane.io` - - Configurations: `configurations.pkg.crossplane.io` - -Restore waits for the base resources to be `Ready` before moving on to the next step. Next, restore walks through the archive and restores all the manifests present. - -During import, the `crossplane.io/paused` annotation gets added to Managed Resources, Claims -and Composites. - -To manually un-pause managed resources after an import, remove the annotation by running: - -```bash -kubectl annotate managed --all crossplane.io/paused- -``` - -You can also run import again with the `--unpause-after-import` flag to remove the annotations. - -```bash -up controlplane migration import --unpause-after-import -``` - -### Restoring resource status - -The importer applies the status of all resources during import. The importer determines if the CRD version has a status field defined based on the stored CRD version. - - -[cli-command]: /reference/cli-reference -[up-cli]: /reference/cli-reference -[up-cli-1]: /manuals/cli/overview -[create-command]: /reference/cli-reference -[up-ctx]: /reference/cli-reference -[configuration-aws-network]: https://marketplace.upbound.io/configurations/upbound/configuration-aws-network diff --git a/spaces_versioned_docs/version-v1.14/howtos/observability.md b/spaces_versioned_docs/version-v1.14/howtos/observability.md deleted file mode 100644 index 8fc5c3278..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/observability.md +++ /dev/null @@ -1,395 +0,0 @@ ---- -title: Observability -sidebar_position: 50 -description: A guide for how to use the integrated observability pipeline feature - in a Space. -plan: "enterprise" ---- - - - -This guide explains how to configure observability in Upbound Spaces. Upbound -provides integrated observability features built on -[OpenTelemetry][opentelemetry] to collect, process, and export logs, metrics, -and traces. - -Upbound Spaces offers two levels of observability: - -1. **Space-level observability** - Observes the cluster infrastructure where Spaces software is installed (Self-Hosted only) -2. **Control plane observability** - Observes workloads running within individual control planes - - - - - -:::info API Version Information & Version Selector -This guide applies to all supported versions (v1.9-v1.15+). Observability features evolved: - -- **v1.9-v1.12**: SharedTelemetryConfig available (alpha until v1.11) -- **v1.11+**: Observability promoted to stable with logs export support -- **v1.14+**: Both space-level and control-plane observability GA - -**View API Reference for Your Version**: -| Version | Status | Link | -|---------|--------|------| -| v1.15 | Latest | [Spaces API v1.15](../../../next/reference/apis/spaces-api/) | -| v1.14 | Supported | [Spaces API v1.14](../../../1.14/reference/apis/spaces-api/) | -| v1.13 | Supported | [Spaces API v1.13](../../../1.13/reference/apis/spaces-api/) | -| v1.12 | Supported | [Spaces API v1.12](../../../1.12/reference/apis/spaces-api/) | -| v1.11 | Supported | [Spaces API v1.11](../../../1.11/reference/apis/spaces-api/) | -| v1.10 | Limited | [Spaces API v1.10](../../../1.10/reference/apis/spaces-api/) | -| v1.9 | EOL | [Spaces API v1.9](../../../1.9/reference/apis/spaces-api/) | - -For version support policy and feature availability, see and . -::: - -:::important -**Space-level observability** (available since v1.6.0, GA in v1.14.0): -- Disabled by default -- Requires manual enablement and configuration -- Self-Hosted Spaces only - -**Control plane observability** (available since v1.13.0, GA in v1.14.0): -- Enabled by default -- No additional configuration required -::: - - - - -## Prerequisites - - -**Control plane observability** is enabled by default. No additional setup is -required. - - - -### Self-hosted Spaces - -1. **Enable the observability feature** when installing Spaces: - ```bash - up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - ... - --set "observability.enabled=true" - ``` - -Set `features.alpha.observability.enabled=true` instead if using Spaces version -before `v1.14.0`. - -2. **Install OpenTelemetry Operator** (required for Space-level observability): - ```bash - kubectl apply -f https://github.com/open-telemetry/opentelemetry-operator/releases/download/v0.116.0/opentelemetry-operator.yaml - ``` - - :::important - If running Spaces `v1.11` or later, use OpenTelemetry Operator `v0.110.0` or later due to breaking changes. - ::: - - -## Space-level Observability - -Space-level observability is only available for self-hosted Spaces and allows -administrators to observe the cluster infrastructure. - -### Configuration - -Configure Space-level observability using the `spacesCollector` value in your -Spaces Helm chart: - -```yaml -observability: - spacesCollector: - config: - exporters: - otlphttp: - endpoint: "" - headers: - api-key: YOUR_API_KEY - exportPipeline: - logs: - - otlphttp - metrics: - - otlphttp -``` - -This configuration exports metrics and logs from: - -- Crossplane installation -- Spaces infrastructure (controller, API, router, etc.) - -### Router metrics - -The Spaces router uses Envoy as a reverse proxy and automatically exposes -metrics when you enable Space-level observability. These metrics provide -visibility into: - -- Traffic routing to control planes and services -- Request status codes, timeouts, and retries -- Circuit breaker state preventing cascading failures -- Client connection patterns and request volume -- Request latency (P50, P95, P99) - -For more information about available metrics, example queries, and how to enable -this feature, see the [Space-level observability guide][space-level-o11y]. - -## Control plane observability - -Control plane observability collects telemetry data from workloads running -within individual control planes using `SharedTelemetryConfig` resources. - -The pipeline deploys [OpenTelemetry Collectors][opentelemetry-collectors] per -control plane, defined by a `SharedTelemetryConfig` at the group level. -Collectors pass data to external observability backends. - -:::important -From Spaces `v1.13` and beyond, telemetry only includes user-facing control -plane workloads (Crossplane, providers, functions). - -Self-hosted users can include system workloads (`api-server`, `etcd`) by setting -`observability.collectors.includeSystemTelemetry=true` in Helm. -::: - -:::important -Spaces validates `SharedTelemetryConfig` resources before applying them by -sending telemetry to configured exporters. self-hosted Spaces, ensure that -`spaces-controller` can reach the exporter endpoints. -::: - -### `SharedTelemetryConfig` - -`SharedTelemetryConfig` is a group-scoped custom resource that defines telemetry -configuration for control planes. - -#### New Relic example - -```yaml -apiVersion: observability.spaces.upbound.io/v1alpha1 -kind: SharedTelemetryConfig -metadata: - name: newrelic - namespace: default -spec: - controlPlaneSelector: - labelSelectors: - - matchLabels: - org: foo - exporters: - otlphttp: - endpoint: https://otlp.nr-data.net - headers: - api-key: YOUR_API_KEY - exportPipeline: - metrics: [otlphttp] - traces: [otlphttp] - logs: [otlphttp] -``` - -#### Datadog Example - -```yaml -apiVersion: observability.spaces.upbound.io/v1alpha1 -kind: SharedTelemetryConfig -metadata: - name: datadog - namespace: default -spec: - controlPlaneSelector: - labelSelectors: - - matchLabels: - org: foo - exporters: - datadog: - api: - site: ${DATADOG_SITE} - key: ${DATADOG_API_KEY} - exportPipeline: - metrics: [datadog] - traces: [datadog] - logs: [datadog] -``` - -### Control plane selection - -Use `spec.controlPlaneSelector` to specify which control planes should use the -telemetry configuration. - -#### Label-based selection - -```yaml -spec: - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production -``` - -#### Expression-based selection - -```yaml -spec: - controlPlaneSelector: - labelSelectors: - - matchExpressions: - - { key: environment, operator: In, values: [production,staging] } -``` - -#### Name-based selection - -```yaml -spec: - controlPlaneSelector: - names: - - controlplane-dev - - controlplane-staging - - controlplane-prod -``` - -### Manage sensitive data - -:::important -Available from Spaces `v1.10` -::: - -Store sensitive data in Kubernetes secrets and reference them in your -`SharedTelemetryConfig`: - -1. **Create the secret:** - ```bash - kubectl create secret generic sensitive -n \ - --from-literal=apiKey='YOUR_API_KEY' - ``` - -2. **Reference in SharedTelemetryConfig:** - ```yaml - apiVersion: observability.spaces.upbound.io/v1alpha1 - kind: SharedTelemetryConfig - metadata: - name: newrelic - spec: - configPatchSecretRefs: - - name: sensitive - key: apiKey - path: exporters.otlphttp.headers.api-key - controlPlaneSelector: - labelSelectors: - - matchLabels: - org: foo - exporters: - otlphttp: - endpoint: https://otlp.nr-data.net - headers: - api-key: dummy # Replaced by secret value - exportPipeline: - metrics: [otlphttp] - traces: [otlphttp] - logs: [otlphttp] - ``` - -### Telemetry processing - -:::important -Available from Spaces `v1.11` -::: - -Configure processing pipelines to transform telemetry data using the [transform -processor][transform-processor]. - -#### Add labels to metrics - -```yaml -spec: - processors: - transform: - error_mode: ignore - metric_statements: - - context: datapoint - statements: - - set(attributes["newLabel"], "someLabel") - processorPipeline: - metrics: [transform] -``` - -#### Remove labels - -From metrics: -```yaml -processors: - transform: - metric_statements: - - context: datapoint - statements: - - delete_key(attributes, "kubernetes_namespace") -``` - -From logs: -```yaml -processors: - transform: - log_statements: - - context: log - statements: - - delete_key(attributes, "log.file.name") -``` - -#### Modify log messages - -```yaml -processors: - transform: - log_statements: - - context: log - statements: - - set(attributes["original"], body) - - set(body, Concat(["log message:", body], " ")) -``` - -### Monitor status - -Check the status of your `SharedTelemetryConfig`: - -```bash -kubectl get stc -NAME SELECTED FAILED PROVISIONED AGE -datadog 1 0 1 63s -``` - -- `SELECTED`: Number of control planes selected -- `FAILED`: Number of control planes that failed provisioning -- `PROVISIONED`: Number of successfully running collectors - -For detailed status information: - -```bash -kubectl describe stc -``` - -## Supported exporters - -Both Space-level and control plane observability support: -- `datadog` -. Datadog integration -- `otlphttp` - General-purpose exporter (used by New Relic, among others) -- `debug` -. troubleshooting - -## Considerations - -- **Control plane conflicts**: Each control plane can only use one `SharedTelemetryConfig`. Multiple configs selecting the same control plane conflict. -- **Custom collector image**: Both Space-level and control plane observability use the same custom OpenTelemetry Collector image with supported exporters. -- **Resource scope**: `SharedTelemetryConfig` resources are group-scoped, allowing different telemetry configurations per group. - -For more advanced configuration options, review the [Helm chart -reference][helm-chart-reference] and [OpenTelemetry Transformation Language -documentation][opentelemetry-transformation-language]. - - -[opentelemetry]: https://opentelemetry.io/ -[opentelemetry-collectors]: https://opentelemetry.io/docs/collector/ -[opentelemetry-collector-configuration]: https://opentelemetry.io/docs/collector/configuration/#exporters -[opentelemetry-operator]: https://opentelemetry.io/docs/kubernetes/operator/ -[transform-processor]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/transformprocessor/README.md -[opentelemetry-transformation-language]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl -[space-level-o11y]: /spaces/howtos/self-hosted/space-observability -[helm-chart-reference]: /reference/helm-reference -[opentelemetry-transformation-language-functions]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/ottlfuncs/README.md -[opentelemetry-transformation-language-contexts]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts -[guide-on-ottl]: https://betterstack.com/community/guides/observability/ottl/#a-brief-overview-of-the-ottl-grammar diff --git a/spaces_versioned_docs/version-v1.14/howtos/query-api.md b/spaces_versioned_docs/version-v1.14/howtos/query-api.md deleted file mode 100644 index 78163de2f..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/query-api.md +++ /dev/null @@ -1,320 +0,0 @@ ---- -title: Query API -sidebar_position: 40 -description: Use the `up` CLI to query objects and resources ---- - - - - -Upbound's Query API allows users to inspect objects and resources within their control planes. The read-only `up alpha query` and `up alpha get` CLI commands allow you to gather information on your control planes in a fast and efficient package. These commands follow the [`kubectl` conventions][kubectl-conventions] for filtering, sorting, and retrieving information from your Space. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). Query API is available in Cloud Spaces since v1.6 and Self-Hosted since v1.8. - -For detailed deployment procedures, see [Deploy Query API infrastructure - Self-Hosted](./self-hosted/query-api.md). version compatibility details, see the . -::: - - - -## Using the Query API - - -The Query API allows you to retrieve control plane information faster than traditional `kubectl` commands. This feature lets you debug your Crossplane resources with the CLI or within the Upbound Console's enhanced management views. - -### Query within a single control plane - -Use the `up alpha get` command to retrieve information about objects within the current control plane context. This command uses the **Query** endpoint and targets the current control plane. - -To switch between control plane groups, use the [`up ctx` ][up-ctx] and change to your desired context with an interactive prompt or specify with your control plane path: - -```shell -up ctx /// -``` - -You can query within a single control plane with the [`up alpha get` command][up-alpha-get-command] to return more information about a given object within the current kubeconfig context. - -The `up alpha get` command can query resource types and aliases to return objects in your control plane. - -```shell -up alpha get managed -NAME READY SYNCED AGE -custom-account1-5bv5j-sa True True 15m -custom-cluster1-bq6dk-net True True 15m -custom-account1-5bv5j-subnet True True 15m -custom-cluster1-bq6dk-nodepool True True 15m -custom-cluster1-bq6dk-cluster True True 15m -custom-account1-5bv5j-net True True 15m -custom-cluster1-bq6dk-subnet True True 15m -custom-cluster1-bq6dk-sa True True 15m -``` - -The [`-A` flag][a-flag] queries for objects across all namespaces. - -```shell -up alpha get configmaps -A -NAMESPACE NAME AGE -crossplane-system uxp-versions-config 18m -crossplane-system universal-crossplane-config 18m -crossplane-system kube-root-ca.crt 18m -upbound-system kube-root-ca.crt 18m -kube-system kube-root-ca.crt 18m -kube-system coredns 18m -default kube-root-ca.crt 18m -kube-node-lease kube-root-ca.crt 18m -kube-public kube-root-ca.crt 18m -kube-system kube-apiserver-legacy-service-account-token-tracking 18m -kube-system extension-apiserver-authentication 18m -``` - -To query for [multiple resource types][multiple-resource-types], you can add the name or alias for the resource as a comma separated string. - -```shell -up alpha get providers,providerrevisions - -NAME HEALTHY REVISION IMAGE STATE DEP-FOUND DEP-INSTALLED AGE -providerrevision.pkg.crossplane.io/crossplane-contrib-provider-nop-ecc25c121431 True 1 xpkg.upbound.io/crossplane-contrib/provider-nop:v0.2.1 Active 18m -NAME INSTALLED HEALTHY PACKAGE AGE -provider.pkg.crossplane.io/crossplane-contrib-provider-nop True True xpkg.upbound.io/crossplane-contrib/provider-nop:v0.2.1 18m -``` - -### Query multiple control planes - -The [`up alpha query` command][up-alpha-query-command] returns a list of objects of any kind within all the control planes in your Space. This command uses either the **SpaceQuery** or **GroupQuery** endpoints depending on your query scope. The `-A` flag switches the query context from the group level to the entire Space - -The `up alpha query` command accepts resources and aliases to return objects across your group or Space. - -```shell -up alpha query crossplane - -NAME ESTABLISHED OFFERED AGE -compositeresourcedefinition.apiextensions.crossplane.io/xnetworks.platform.acme.co True True 20m -compositeresourcedefinition.apiextensions.crossplane.io/xaccountscaffolds.platform.acme.co True True 20m - - -NAME XR-KIND XR-APIVERSION AGE -composition.apiextensions.crossplane.io/xaccountscaffolds.platform.acme.co XAccountScaffold platform.acme.co/v1alpha1 20m -composition.apiextensions.crossplane.io/xnetworks.platform.acme.co XNetwork platform.acme.co/v1alpha1 20m - - -NAME REVISION XR-KIND XR-APIVERSION AGE -compositionrevision.apiextensions.crossplane.io/xaccountscaffolds.platform.acme.co-5ae9da5 1 XAccountScaffold platform.acme.co/v1alpha1 20m -compositionrevision.apiextensions.crossplane.io/xnetworks.platform.acme.co-414ce80 1 XNetwork platform.acme.co/v1alpha1 20m - -NAME READY SYNCED AGE -nopresource.nop.crossplane.io/custom-cluster1-bq6dk-subnet True True 19m -nopresource.nop.crossplane.io/custom-account1-5bv5j-net True True 19m - -## Output truncated... - -``` - - -The [`--sort-by` flag][sort-by-flag] allows you to return information to your specifications. You can construct your sort order in a JSONPath expression string or integer. - - -```shell -up alpha query crossplane -A --sort-by="{.metadata.name}" - -CONTROLPLANE NAME AGE -default/test deploymentruntimeconfig.pkg.crossplane.io/default 10m - -CONTROLPLANE NAME AGE TYPE DEFAULT-SCOPE -default/test storeconfig.secrets.crossplane.io/default 10m Kubernetes crossplane-system -``` - -To query for multiple resource types, you can add the name or alias for the resource as a comma separated string. - -```shell -up alpha query namespaces,configmaps -A - -CONTROLPLANE NAME AGE -default/test namespace/upbound-system 15m -default/test namespace/crossplane-system 15m -default/test namespace/kube-system 16m -default/test namespace/default 16m - -CONTROLPLANE NAMESPACE NAME AGE -default/test crossplane-system configmap/uxp-versions-config 15m -default/test crossplane-system configmap/universal-crossplane-config 15m -default/test crossplane-system configmap/kube-root-ca.crt 15m -default/test upbound-system configmap/kube-root-ca.crt 15m -default/test kube-system configmap/coredns 16m -default/test default configmap/kube-root-ca.crt 16m - -## Output truncated... - -``` - -The Query API also allows you to return resource types with specific [label columns][label-columns]. - -```shell -up alpha query composite -A --label-columns=crossplane.io/claim-namespace - -CONTROLPLANE NAME SYNCED READY COMPOSITION AGE CLAIM-NAMESPACE -query-api-test/test xeks.argo.discover.upbound.io/test-k7xbk False xeks.argo.discover.upbound.io 51d default - -CONTROLPLANE NAME EXTERNALDNS SYNCED READY COMPOSITION AGE CLAIM-NAMESPACE -spaces-clusters/controlplane-query-api-test-spaces-playground xexternaldns.externaldns.platform.upbound.io/spaces-cluster-0-xd8v2-lhnl7 6.34.2 True True xexternaldns.externaldns.platform.upbound.io 19d default -default/query-api-test xexternaldns.externaldns.platform.upbound.io/space-awg-kine-f7dxq-nkk2q 6.34.2 True True xexternaldns.externaldns.platform.upbound.io 55d default - -## Output truncated... - -``` - -### Query API request format - -The CLI can also return a version of your query request with the [`--debug` flag][debug-flag]. This flag returns the API spec request for your query. - -```shell -up alpha query composite -A -d - -apiVersion: query.spaces.upbound.io/v1alpha1 -kind: SpaceQuery -metadata: - creationTimestamp: null -spec: - cursor: true - filter: - categories: - - composite - controlPlane: {} - limit: 500 - objects: - controlPlane: true - table: {} - page: {} -``` - -For more complex queries, you can interact with the Query API like a Kubernetes-style API by creating a query and applying it with `kubectl`. - -The example below is a query for `claim` resources in every control plane from oldest to newest and returns specific information about those claims. - - -```yaml -apiVersion: query.spaces.upbound.io/v1alpha1 -kind: SpaceQuery -spec: - filter: - categories: - - claim - order: - - creationTimestamp: Asc - cursor: true - count: true - objects: - id: true - controlPlane: true - object: - kind: true - apiVersion: true - metadata: - name: true - uid: true - spec: - containers: - image: true -``` - - -The Query API is served by the Spaces API endpoint. You can use `up ctx` to -switch the kubectl context to the Spaces API ingress. After that, you can use -`kubectl create` and receive the `response` for your query parameters. - - -```shell -kubectl create -f spaces-query.yaml -o yaml -``` - -Your `response` should look similar to this example: - -```yaml {copy-lines="none"} -apiVersion: query.spaces.upbound.io/v1alpha1 -kind: SpaceQuery -metadata: - creationTimestamp: "2024-08-08T14:41:46Z" - name: default -response: - count: 3 - cursor: - next: "" - page: 0 - pageSize: 100 - position: 0 - objects: - - controlPlane: - name: query-api-test - namespace: default - id: default/query-api-test/823b2781-7e70-4d91-a6f0-ee8f455d67dc - object: - apiVersion: spaces.platform.upbound.io/v1alpha1 - kind: Space - metadata: - name: space-awg-kine - resourceVersion: "803868" - uid: 823b2781-7e70-4d91-a6f0-ee8f455d67dc - spec: {} - - controlPlane: - name: test-1 - namespace: test - id: test/test-1/08a573dd-851a-42cc-a600-b6f6ed37ee8d - object: - apiVersion: argo.discover.upbound.io/v1alpha1 - kind: EKS - metadata: - name: test-1 - resourceVersion: "4270320" - uid: 08a573dd-851a-42cc-a600-b6f6ed37ee8d - spec: {} - - controlPlane: - name: controlplane-query-api-test-spaces-playground - namespace: spaces-clusters - id: spaces-clusters/controlplane-query-api-test-spaces-playground/b5a6770f-1f85-4d09-8990-997c84bd4159 - object: - apiVersion: spaces.platform.upbound.io/v1alpha1 - kind: Space - metadata: - name: spaces-cluster-0 - resourceVersion: "1408337" - uid: b5a6770f-1f85-4d09-8990-997c84bd4159 - spec: {} -``` - - -## Query API Explorer - - - -import CrdDocViewer from '@site/src/components/CrdViewer'; - -### Query - -The Query resource allows you to query objects in a single control plane. - - - -### GroupQuery - -The GroupQuery resource allows you to query objects across a group of control planes. - - - -### SpaceQuery - -The SpaceQuery resource allows you to query objects across all control planes in a space. - - - - - - -[documentation]: /spaces/howtos/self-hosted/query-api -[up-ctx]: /reference/cli-reference -[up-alpha-get-command]: /reference/cli-reference -[a-flag]: /reference/cli-reference -[multiple-resource-types]: /reference/cli-reference -[up-alpha-query-command]: /reference/cli-reference -[sort-by-flag]: /reference/cli-reference -[label-columns]: /reference/cli-reference -[debug-flag]: /reference/cli-reference -[kubectl-conventions]: https://kubernetes.io/docs/reference/kubectl/generated/kubectl_get/ diff --git a/spaces_versioned_docs/version-v1.14/howtos/secrets-management.md b/spaces_versioned_docs/version-v1.14/howtos/secrets-management.md deleted file mode 100644 index 88e730ae5..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/secrets-management.md +++ /dev/null @@ -1,719 +0,0 @@ ---- -title: Secrets Management -sidebar_position: 20 -description: A guide for how to configure synchronizing external secrets into control - planes in a Space. ---- - -Upbound's _Shared Secrets_ is a built in secrets management feature that -provides an integrated way to manage secrets across your platform. It allows you -to store sensitive data like passwords and certificates for your managed control -planes as secrets in an external secret store. - -This feature is a wrapper around the [External Secrets Operator (ESO)][external-secrets-operator-eso] that pulls secrets from external vaults and distributes them across your platform. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). The SharedSecretStore and SharedExternalSecret CRDs have been stable since v1.9. - -For API specifications and configuration details, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: - -## Benefits - -The Shared Secrets feature allows you to: - -* Access secrets from a variety of external secret stores without operation overhead -* Configure synchronization for multiple control planes in a group -* Store and manage all your secrets centrally -* Use Shared Secrets across all Upbound environments(Cloud and Disconnected Spaces) -* Synchronize secrets across groups of control planes while maintaining clear security boundaries -* Manage secrets at scale programmatically while ensuring proper isolation and access control - -## Understanding the Architecture - -The Shared Secrets feature uses a hierarchical approach to centrally manage -secrets and effectively control their distribution. - -![Shared Secrets workflow diagram](/img/shared-secrets-workflow.png) - -1. The flow begins at the group level, where you define your secret sources and distribution rules -2. These rules automatically create corresponding resources in your control planes -3. In each control plane, specific namespaces receive the secrets -4. Changes at the group level automatically propagate through this chain - -## Component configuration - -Upbound Shared Secrets consists of two components: - -1. **SharedSecretStore**: Defines connections to external secret providers -2. **SharedExternalSecret**: Specifies which secrets to synchronize and where - - -### Connect to an External Vault - - -The `SharedSecretStore` component is the connection point to your external -secret vaults. It provisions ClusterSecretStore resources into control planes -within the group. - - -#### AWS Secrets Manager - - - -In this example, you'll create a `SharedSecretStore` to connect to AWS -Secrets Manager in `us-west-2`. Then apply access to all control planes labeled with -`environment: production`, and make these secrets available in the `default` and -`crossplane-system` namespaces. - - -You can configure access to AWS Secrets Manager using static credentials or -workload identity. - -:::important -While the underlying ESO API supports more auth methods, static credentials are currently the only supported auth method in Cloud Spaces. -::: - -##### Static credentials - -1. Use the AWS CLI to create access credentials. - - -2. Create your access credentials. -```ini -# Create a text file with AWS credentials -cat > aws-credentials.txt << EOF -[default] -aws_access_key_id = -aws_secret_access_key = -EOF -``` - -3. Next,store the access credentials in a secret in the namespace you want to have access to the `SharedSecretStore`. -```shell -kubectl create secret \ - generic aws-credentials \ - -n default \ - --from-file=creds=./aws-credentials.txt -``` - -4. Create a `SharedSecretStore` custom resource file called `secretstore.yaml`. - Paste the following configuration: -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: aws-secrets -spec: - # Define which control planes should receive this configuration - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production - - # Define which namespaces within those control planes can access secrets - namespaceSelector: - names: - - default - - crossplane-system - - # Configure the connection to AWS Secrets Manager - provider: - aws: - service: SecretsManager - region: us-west-2 - auth: - secretRef: - accessKeyIDSecretRef: - name: aws-credentials - key: access-key-id - secretAccessKeySecretRef: - name: aws-credentials - key: secret-access-key -``` - - - -##### Workload Identity with IRSA - - - -You can also use AWS IAM Roles for Service Accounts (IRSA) depending on your -organizations needs: - -1. Ensure you have deployed the Spaces software into an IRSA-enabled EKS cluster. -2. Follow the AWS instructions to create an IAM OIDC provider with your EKS OIDC - provider URL. -3. Determine the Spaces-generated `controlPlaneID` of your control plane: -```shell -kubectl get controlplane -o jsonpath='{.status.controlPlaneID}' -``` - -4. Create an IAM trust policy in your AWS account to match the control plane. -```yaml -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "arn:aws:iam:::oidc-provider/" - }, - "Action": "sts:AssumeRoleWithWebIdentity", - "Condition": { - "StringEquals": { - ":aud": "sts.amazonaws.com", - ":sub": [ -"system:serviceaccount:mxp--system:external-secrets-controller"] - } - } - } - ] -} -``` - -5. Update your Spaces deployment to annotate the SharedSecrets service account - with the role ARN. -```shell -up space upgrade ... \ - --set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."eks\.amazonaws\.com/role-arn"="" -``` - -6. Create a SharedSecretStore and reference the SharedSecrets service account: -```ini {copy-lines="all"} -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: aws-sm - namespace: default -spec: - provider: - aws: - service: SecretsManager - region: - auth: - jwt: - serviceAccountRef: - name: external-secrets-controller - controlPlaneSelector: - names: - - - namespaceSelector: - names: - - default -``` - -When you create a `SharedSecretStore` the underlying mechanism: - -1. Applies at the group level -2. Determines which control planes should receive this configuration by the `controlPlaneSelector` -3. Automatically creates a ClusterSecretStore inside each identified control plane -4. Maintains a connection in each control plane with the ClusterSecretStore - credentials and configuration from the parent SharedSecretStore - -Upbound automatically generates a ClusterSecretStore in each matching control -plane when you create a SharedSecretStore. - -```yaml {copy-lines="none"} -# Automatically created in each matching control plane -apiVersion: external-secrets.io/v1beta1 -kind: ClusterSecretStore -metadata: - name: aws-secrets # Name matches the parent SharedSecretStore -spec: - provider: - upboundspaces: - storeRef: - name: aws-secret -``` - -When you create the SharedSecretStore controller, it replaces the provider with -a special provider called `upboundspaces`. This provider references the -SharedSecretStore object in the Spaces API. This avoids copying the actual cloud -credentials from Spaces to each control plane. - -This workflow allows you to configure the store connection only once at the -group level and automatically propagates to each control plane. Individual control -planes can use the store without exposure to the group-level configuration and -updates all child ClusterSecretStores when updated. - - -#### Azure Key Vault - - -:::important -While the underlying ESO API supports more auth methods, static credentials are currently the only supported auth method in Cloud Spaces. -::: - -##### Static credentials - -1. Use the Azure CLI to create a service principal and authentication file. -2. Create a service principal and save credentials in a file: -```json -{ - "appId": "myAppId", - "displayName": "myServicePrincipalName", - "password": "myServicePrincipalPassword", - "tenant": "myTentantId" -} -``` - -3. Store the credentials as a Kubernetes secret: -```shell -kubectl create secret \ - generic azure-secret-sp \ - -n default \ - --from-file=creds=./azure-credentials.json -``` - -4. Create a SharedSecretStore referencing these credentials: -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: azure-kv -spec: - provider: - azurekv: - tenantId: "" - vaultUrl: "" - authSecretRef: - clientId: - name: azure-secret-sp - key: ClientID - clientSecret: - name: azure-secret-sp - key: ClientSecret - controlPlaneSelector: - names: - - - namespaceSelector: - names: - - default -``` - -##### Workload Identity - - -You can also use Entra Workload Identity Federation to access Azure Key Vault -without needing to manage secrets. - -To use Entra Workload ID with AKS: - - -1. Deploy the Spaces software into a [workload identity-enabled AKS cluster][workload-identity-enabled-aks-cluster]. -2. Retrieve the OIDC issuer URL of the AKS cluster: -```ini -az aks show --name "" \ - --resource-group "" \ - --query "oidcIssuerProfile.issuerUrl" \ - --output tsv -``` - -3. Use the Azure CLI to make a managed identity: -```ini -az identity create \ - --name "" \ - --resource-group "" \ - --location "" \ - --subscription "" -``` - -4. Look up the managed identity's client ID: -```ini -az identity show \ - --resource-group "" \ - --name "" \ - --query 'clientId' \ - --output tsv -``` - -5. Update your Spaces deployment to annotate the SharedSecrets service account with the associated Entra application client ID from the previous step: -```ini -up space upgrade ... \ - --set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."azure\.workload\.identity/client-id"="" \ - --set-string controlPlanes.sharedSecrets.pod.customLabels."azure\.workload\.identity/use"="true" -``` - -6. Determine the Spaces-generated `controlPlaneID` of your control plane. When you deploy a `kind: controlplane` in a Space, the Spaces software deploys a set of pods in a new namespace following the format `mxp--system`. -```ini -kubectl get controlplane -o jsonpath='{.status.controlPlaneID}' -``` - -7. Create a federated identity credential. -```ini -FEDERATED_IDENTITY_CREDENTIAL_NAME= -USER_ASSIGNED_IDENTITY_NAME= -RESOURCE_GROUP= -AKS_OIDC_ISSUER= -CONTROLPLANE_ID= -az identity federated-credential create --name ${FEDERATED_IDENTITY_CREDENTIAL_NAME} --identity-name "${USER_ASSIGNED_IDENTITY_NAME}" --resource-group "${RESOURCE_GROUP}" --issuer "${AKS_OIDC_ISSUER}" --subject system:serviceaccount:"mxp-${CONTROLPLANE_ID}-system:external-secrets-controller" --audience api://AzureADTokenExchange -``` - -8. Assign the `Key Vault Secrets User` role to the user-assigned managed identity that you created earlier. This step gives the managed identity permission to read secrets from the key vault: -```ini -az role assignment create \ - --assignee-object-id "${IDENTITY_PRINCIPAL_ID}" \ - --role "Key Vault Secrets User" \ - --scope "${KEYVAULT_RESOURCE_ID}" \ - --assignee-principal-type ServicePrincipal -``` - -:::important -You must manually restart a workload's pod when you add the annotation to the running pod's service account. The Entra workload identity mutating admission webhook requires a restart to inject the necessary environment. -::: - -8. Create a `SharedSecretStore`. Replace `vaultURL` with the URL of your Azure Key Vault instance. Replace `identityId` with the client ID of the managed identity created earlier: -```yaml {copy-lines="all"} -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: azure-kv -spec: - provider: - azurekv: - authType: WorkloadIdentity - vaultUrl: "" - controlPlaneSelector: - names: - - - namespaceSelector: - names: - - default -``` - - - - -#### Google Cloud Secret Manager - - - -You can configure access to Google Cloud Secret Manager using static credentials or workload identity. Below are instructions for configuring either. See the [ESO provider API][eso-provider-api] for more information. - -:::important -While the underlying ESO API supports more auth methods, static credentials are currently the only supported auth method in Cloud Spaces. -::: - -##### Static credentials - -1. Use the [GCP CLI][gcp-cli] to create access credentials. -2. Save the output in a file called `gcp-credentials.json`. -3. Store the access credentials in a secret in the same namespace as the `SharedSecretStore`. - ```shell {label="kube-create-secret",copy-lines="all"} - kubectl create secret \ - generic gcpsm-secret \ - -n default \ - --from-file=creds=./gcp-credentials.json - ``` - -4. Create a `SharedSecretStore`, referencing the secret created earlier. Replace `projectID` with your GCP Project ID: -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: gcp-sm -spec: - provider: - gcpsm: - auth: - secretRef: - secretAccessKeySecretRef: - name: gcpsm-secret - key: creds - projectID: - controlPlaneSelector: - names: - - - namespaceSelector: - names: - - default -``` - -:::tip -The example above maps a Shared Secret Store into a single namespace of a single control plane. Read [control plane selection][control-plane-selection] and [namespace selection][namespace-selection] to learn how to map into one or more namespaces of one or more control planes. -::: - - -##### Workload identity with Service Accounts to IAM Roles - - -To configure, grant the `roles/iam.workloadIdentityUser` role to the Kubernetes -service account in the control plane namespace to impersonate the IAM service -account. - -1. Ensure you've deployed Spaces on a [Workload Identity Federation-enabled][workload-identity-federation-enabled] GKE cluster. -2. Determine the Spaces-generated `controlPlaneID` of your control plane. When you deploy a `kind: controlplane` in a Space, the Spaces software deploys a set of pods in a new namespace following the format `mxp--system`. -```ini -kubectl get controlplane -o jsonpath='{.status.controlPlaneID}' -``` - -3. Create a GCP IAM service account with the [GCP CLI][gcp-cli-1]: -```ini -gcloud iam service-accounts create \ - --project= -``` - -4. Grant the IAM service account the role to access GCP Secret Manager: -```ini -SA_NAME= -IAM_SA_PROJECT_ID= -gcloud projects add-iam-policy-binding IAM_SA_PROJECT_ID \ - --member "serviceAccount:SA_NAME@IAM_SA_PROJECT_ID.iam.gserviceaccount.com" \ - --role roles/secretmanager.secretAccessor -``` - -5. When you enable the Shared Secrets feature, a service account gets created in each control plane for the External Secrets Operator. Apply a [GCP IAM policy binding][gcp-iam-policy-binding] to associate this service account with the desired GCP IAM role. -```ini -PROJECT_ID= -PROJECT_NUMBER= -CONTROLPLANE_ID= -gcloud projects add-iam-policy-binding projects/${PROJECT_ID} \ - --role "roles/iam.workloadIdentityUser" \ - --member=principal://iam.googleapis.com/projects/${PROJECT_NUMBER}/locations/global/workloadIdentityPools/${PROJECT_ID}.svc.id.goog/subject/ns/mxp-${CONTROLPLANE_ID}-system/sa/external-secrets-controller -``` - -6. Update your Spaces deployment to annotate the SharedSecrets service account with GCP IAM service account's identifier: -```ini -up space upgrade ... \ - --set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."iam\.gke\.io/gcp-service-account"="" -``` - -7. Create a `SharedSecretStore`. Replace `projectID` with your GCP Project ID: -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: gcp-sm -spec: - provider: - gcpsm: - projectID: - controlPlaneSelector: - names: - - - namespaceSelector: - names: - - default -``` - -:::tip -The example above maps a Shared Secret Store into a single namespace of a single control plane. Read [control plane selection][control-plane-selection-1] and [namespace selection][namespace-selection-2] to learn how to map into one or more namespaces of one or more control planes. -::: - -### Manage your secret distribution - -After you create your SharedSecretStore, you can define which secrets to -distribute using SharedExternalSecret: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedExternalSecret -metadata: - name: database-credentials - namespace: default -spec: - # Select the same control planes as your SharedSecretStore - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production - - externalSecretSpec: - refreshInterval: 1h - secretStoreRef: - name: aws-secrets # References the SharedSecretStore name - kind: ClusterSecretStore - target: - name: db-credentials - data: - - secretKey: username - remoteRef: - key: prod/database/credentials - property: username - - secretKey: password - remoteRef: - key: prod/database/credentials - property: password -``` - -This configuration: - -* Pulls database credentials from your external secret provider -* Creates secrets in all production control planes -* Refreshes the secrets every hour -* Creates a secret called `db-credentials` in each control plane - -When you create a SharedExternalSecret at the group level, Upbound's system -creates a template for the corresponding ClusterExternalSecrets in each selected -control plane. - -The example below simulates the ClusterExternalSecret that Upbound creates: - -```yaml -# Inside each matching control plane: -apiVersion: external-secrets.io/v1beta1 -kind: ClusterExternalSecret -metadata: - name: database-credentials -spec: - refreshInterval: 1h - secretStoreRef: - name: aws-secrets - kind: ClusterSecretStore - data: - - secretKey: username - remoteRef: - key: prod/database/credentials - property: username -``` - -The hierarchy in this configuration is: - -1. SharedExternalSecret (group level) defines what secrets to distribute -2. ClusterExternalSecret (control plane level) manages the distribution within - each control plane - -3. Kubernetes Secrets (namespace level) are created in specified namespaces - - -#### Control plane selection - -To configure which control planes in a group you want to project a SecretStore into, use the `spec.controlPlaneSelector` field. You can either use `labelSelectors` or the `names` of a control plane directly. A control plane matches if any of the label selectors match. - -This example matches all control planes in the group that have `environment: production` as a label: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: my-secret-store -spec: - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production -``` - -You can use the more complex `matchExpressions` to match labels based on an expression. This example matches control planes that have label `environment: production` or `environment: staging`: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: my-secret-store -spec: - controlPlaneSelector: - labelSelectors: - - matchExpressions: - - { key: environment, operator: In, values: [production,staging] } -``` - -You can also specify the names of control planes directly: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: my-secret-store -spec: - controlPlaneSelector: - names: - - controlplane-dev - - controlplane-staging - - controlplane-prod -``` - - -#### Namespace selection - -To configure which namespaces **within each matched control plane** to project the secret store into, use `spec.namespaceSelector` field. The projected secret store only appears in the namespaces matching the provided selector. You can either use `labelSelectors` or the `names` of namespaces directly. A control plane matches if any of the label selectors match. - -**For all control planes matched by** `spec.controlPlaneSelector`, This example matches all namespaces in each selected control plane that have `team: team1` as a label: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: my-secret-store -spec: - namespaceSelector: - labelSelectors: - - matchLabels: - team: team1 -``` - -You can use the more complex `matchExpressions` to match labels based on an expression. This example matches namespaces that have label `team: team1` or `team: team2`: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: my-secret-store -spec: - namespaceSelector: - labelSelectors: - - matchExpressions: - - { key: team, operator: In, values: [team1,team2] } -``` - -You can also specify the names of namespaces directly: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: my-secret-store -spec: - namespaceSelector: - names: - - team1-namespace - - team2-namespace -``` - -## Configure secrets directly in a control plane - - -The above explains using group-scoped resources to project secrets into multiple control planes. You can also use ESO API types directly in a control plane as you would in standalone Crossplane or Kubernetes. - - -See the [ESO documentation][eso-documentation] for a full guide on using the API types. - -## Best practices - -When you configure secrets management in your Upbound environment, keep the -following best practices in mind: - -**Use consistent labeling schemes** across your control planes for predictable -and manageable secret distribution. - -**Organize your secrets** in your external provider using a hierarchical -structure that mirrors your control plane organization. - -**Set appropriate refresh intervals** based on your security requires and the -nature of the secrets. - -**Use namespace selection sparingly** to limit secret distribution to only the -namespaces that need them. - -**Use separate tokens for each environment.** Keep them in distinct -SharedSecretStores. Users could bypass SharedExternalSecret selectors by -creating ClusterExternalSecrets directly in control planes. This grants access to all -secrets available to that token. - -**Document your secret management architecture**, including which control planes -should receive which secrets. - -[control-plane-selection]: #control-plane-selection -[namespace-selection]: #namespace-selection -[control-plane-selection-1]: #control-plane-selection -[namespace-selection-2]: #namespace-selection - -[external-secrets-operator-eso]: https://external-secrets.io -[workload-identity-enabled-aks-cluster]: https://learn.microsoft.com/en-us/azure/aks/workload-identity-deploy-cluster -[eso-provider-api]: https://external-secrets.io/latest/provider/google-secrets-manager/ -[gcp-cli]: https://cloud.google.com/iam/docs/creating-managing-service-account-keys -[workload-identity-federation-enabled]: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#enable_on_clusters_and_node_pools -[gcp-cli-1]: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#kubernetes-sa-to-iam -[gcp-iam-policy-binding]: https://cloud.google.com/sdk/gcloud/reference/iam/service-accounts/add-iam-policy-binding -[eso-documentation]: https://external-secrets.io/latest/introduction/getting-started/ diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/_category_.json b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/_category_.json deleted file mode 100644 index 5bf23bb0a..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/_category_.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "label": "Self-Hosted Spaces", - "position": 2, - "collapsed": true, - "customProps": { - "plan": "business" - } - -} - - diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/administer-features.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/administer-features.md deleted file mode 100644 index ce878014e..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/administer-features.md +++ /dev/null @@ -1,121 +0,0 @@ ---- -title: Administer features -sidebar_position: 12 -description: Enable and disable features in Spaces ---- - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Feature flags and their availability may vary by version. - -For detailed feature availability across versions, see the . -::: - -This guide shows how to enable or disable features in your self-hosted Space. - -## Shared secrets - -**Status:** Preview - -This feature is enabled by default in Cloud Spaces. - -To enable this feature in a self-hosted Space, set -`features.alpha.sharedSecrets.enabled=true` when installing the Space: - -```bash -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - ... - --set "features.alpha.sharedSecrets.enabled=true" \ -``` - - -## Observability - -**Status:** GA -**Available from:** Spaces v1.13+ - -This feature is enabled by default in Cloud Spaces. - - - -To enable this feature in a self-hosted Space, set -`observability.enabled=true` (`features.alpha.observability.enabled=true` before `v1.14.0`) when installing the Space: - -```bash -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - ... - --set "observability.enabled=true" \ -``` - -The observability feature collects telemetry data from user-facing control -plane workloads like: - -* Crossplane -* Providers -* Functions - -Self-hosted Spaces users can add control plane system workloads such as the -`api-server`, `etcd` by setting the -`observability.collectors.includeSystemTelemetry` Helm flag to true. - -### Sensitive data - -To avoid exposing sensitive data in the `SharedTelemetryConfig` resource, use -Kubernetes secrets to store the sensitive data and reference the secret in the -`SharedTelemetryConfig` resource. - -Create the secret in the same namespace/group as the `SharedTelemetryConfig` -resource. The example below uses `kubectl create secret` to create a new secret: - -```bash -kubectl create secret generic sensitive -n \ - --from-literal=apiKey='YOUR_API_KEY' -``` - -Next, reference the secret in the `SharedTelemetryConfig` resource: - -```yaml -apiVersion: observability.spaces.upbound.io/v1alpha1 -kind: SharedTelemetryConfig -metadata: - name: newrelic -spec: - configPatchSecretRefs: - - name: sensitive - key: apiKey - path: exporters.otlphttp.headers.api-key - controlPlaneSelector: - labelSelectors: - - matchLabels: - org: foo - exporters: - otlphttp: - endpoint: https://otlp.nr-data.net - headers: - api-key: dummy # This value is replaced by the secret value, can be omitted - exportPipeline: - metrics: [otlphttp] - traces: [otlphttp] - logs: [otlphttp] -``` - -The `configPatchSecretRefs` field in the `spec` specifies the secret `name`, -`key`, and `path` values to inject the secret value in the -`SharedTelemetryConfig` resource. - -## Shared backups - -As of Spaces `v.12.0`, this feature is enabled by default. - -To disable in a self-hosted Space, pass the `features.alpha.sharedBackup.enabled=false` as a Helm chart value. -`--set "features.alpha.sharedBackup.enabled=false"` - -## Query API - -**Status:** Preview -The Query API is available in the Cloud Space offering and enabled by default. - -Query API is required for self-hosted deployments with connected Spaces. See the -related [documentation][documentation] -to enable this feature. - -[documentation]: /spaces/howtos/query-api/ diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/attach-detach.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/attach-detach.md deleted file mode 100644 index 1465921cf..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/attach-detach.md +++ /dev/null @@ -1,198 +0,0 @@ ---- -title: Connect or disconnect a Space -sidebar_position: 12 -description: Enable and connect self-hosted Spaces to the Upbound console ---- -:::info API Version Information -This guide applies to **Spaces v1.8.0 and later** (Self-Hosted only). Connection to Upbound console requires Query API and RBAC to be enabled. - -For version-specific features and requirements, see the . Query API setup details, see [Deploy Query API infrastructure](./query-api.md). -::: - -:::important -This feature is in preview. Starting in Spaces `v1.8.0` and later, you must -deploy and [enable the Query API][enable-the-query-api] and [enable Upbound -RBAC][enable-upbound-rbac] to connect a Space to Upbound. -::: - -[Upbound][upbound] allows you to connect self-hosted Spaces and enables a streamlined operations and debugging experience in your Console. - -## Usage - -### Connect - -Before you begin, make sure you have: - -- An existing Upbound [organization][organization] in Upbound SaaS. -- The `up` CLI installed and logged into your organization -- `kubectl` installed with the kubecontext of your self-hosted Space cluster. -- A `token.json` license, provided by your Upbound account representative. -- You enabled the [Query API][query-api] in the self-hosted Space. - -Create a new `UPBOUND_SPACE_NAME`. If you don't create a name, `up` automatically generates one for you: - -```ini -export UPBOUND_SPACE_NAME=your-self-hosted-space -``` - -#### With up CLI - -:::tip -The command tries to connect the Space to the org account context pointed at by your `up` CLI profile. Make sure you've logged into Upbound SaaS with `up login -a ` before trying to connect the Space. -::: - -Connect the Space to the Console: - -```bash -up space connect "${UPBOUND_SPACE_NAME}" -``` - -This command installs a Connect agent, creates a service account, and configures permissions in your Upbound cloud organization in the `upbound-system` namespace of your Space. - -#### With Helm - -Export your Upbound org account name to an environment variable called `UPBOUND_ORG_NAME`. You can see this value by running `up org list` after logging on to Upbound. - -```ini -export UPBOUND_ORG_NAME=your-org-name -``` - -Create a new robot token and export it to an environment variable called `UPBOUND_TOKEN`: - -```bash -up robot create "${UPBOUND_SPACE_NAME}" --description="Robot used for authenticating Space '${UPBOUND_SPACE_NAME}' with Upbound Connect" -export UPBOUND_TOKEN=$(up robot token create "$UPBOUND_SPACE_NAME" "$UPBOUND_SPACE_NAME" --file - | jq -r '.token') -``` - -:::note -Follow the [`jq` installation guide][jq-install] if your machine doesn't include -it by default. -::: - -Create a secret containing the robot token: - -```bash -kubectl create secret -n upbound-system generic connect-token --from-literal=token=${UPBOUND_TOKEN} -``` - -Specify your username and password for the helm OCI registry: - -```bash -jq -r .token $SPACES_TOKEN_PATH | helm registry login xpkg.upbound.io -u $(jq -r .accessId $SPACES_TOKEN_PATH) --password-stdin -``` - -In the same cluster where you installed the Spaces software, install the Upbound connect agent with your token secret. - -```bash -helm -n upbound-system upgrade --install agent \ - oci://xpkg.upbound.io/spaces-artifacts/agent \ - --version "0.0.0-441.g68777b9" \ - --set "image.repository=xpkg.upbound.io/spaces-artifacts/agent" \ - --set "registration.image.repository=xpkg.upbound.io/spaces-artifacts/register-init" \ - --set "imagePullSecrets[0].name=upbound-pull-secret" \ - --set "registration.enabled=true" \ - --set "space=${UPBOUND_SPACE_NAME}" \ - --set "organization=${UPBOUND_ORG_NAME}" \ - --set "tokenSecret=connect-token" \ - --wait -``` - - -#### View your Space in the Console - - -Go to the [Upbound Console][upbound-console], log in, and choose the newly connected Space from the Space selector dropdown. - -![A screenshot of the Upbound Console space selector dropdown](/img/attached-space.png) - -:::note -You can only connect a self-hosted Space to a single organization at a time. -::: - -### Disconnect - -#### With up CLI - -To disconnect a self-hosted Space or a deleted self-hosted Space, run the following command: - -```bash -up space disconnect "${UPBOUND_SPACE_NAME}" -``` - -If the Space still exists, this command uninstalls the Connect agent and deletes the associated service account and permissions. - -#### With Helm - -To disconnect a self-hosted Space or a deleted self-hosted Space, run the following command: - -```bash -helm delete -n upbound-system agent -``` - -Clean up the robot token you created for this self-hosted Space: - -```bash -up robot delete "${UPBOUND_SPACE_NAME}" --force -``` - -## Security model - -### Architecture - -![An architectural diagram of a self-hosted Space attached to Upbound](/img/console-attach-architecture.jpg) - -:::note -This diagram illustrates a self-hosted Space running in AWS connected to the global Upbound Console. The same model applies to a Space running in AKS, GKE, or other Kubernetes environments. -::: - -### Data path - -Upbound uses a Pub/Sub model over TLS to communicate between Upbound's global -console and your self-hosted Space. Self-hosted Spaces establishes a secure -connection with `connect.upbound.io`. `api.upbound.io`, and `auth.upbound.io` and subscribes to an -endpoint. - -:::important -Add `connect.upbound.io`, `api.upbound.io`, and `auth.upbound.io` to your organization's list of -allowed endpoints. -::: - -The -Upbound Console communicates to the Space through that endpoint. The data flow -is: - -1. Users sign in to the Upbound Console, redirecting to authenticate with an organization's configured Identity Provider via SSO. -2. Once authenticated, actions in the Console, like listing control planes or specific resource types from a control plane. These requests post as messages to the Upbound Connect service. -3. A user's self-hosted Space polls the Upbound Connect service periodically for new messages, verifies the authenticity of the message, and fulfills the request contained. -4. A user's self-hosted Space returns the results of the request to the Upbound Connect service and the Console renders the results in the user's browser session. - -**Upbound never stores data originated from a self-hosted Space.** The data is transient and only exposed in the user's browser session. The Console needs this data to render your resources and control planes in the UI. - -### Data transmitted - -Users interact with the Upbound Console to generate request queries to the Upbound Connect Service while exploring, managing, or debugging a self-hosted Space. These requests send data back to the user's browser session in the Console, including: - -* Metadata for the Space -* Metadata for control planes in the state -* Configuration manifests for various resource types within your Space: Crossplane managed resources, composite resources, composite resource claims, Upbound shared secrets, Upbound shared backups, Crossplane providers, ProviderConfigs, Configurations, and Crossplane Composite Functions. - -:::important -This data only concerns resource configuration. The data _inside_ the managed -resource in your Space isn't visible at any point. -::: - -**Upbound can't see your data.** Upbound doesn't have access to session-based data rendered for your users in the Upbound Console. Upbound has no information about your self-hosted Space, other than that you've connected a self-hosted Space. - -### Threat vectors - -Only users with editor or administrative permissions can make changes using the Console like creating or deleting control planes or groups. - - -[enable-the-query-api]: /spaces/howtos/self-hosted/query-api -[enable-upbound-rbac]: /manuals/platform/concepts/authorization/upbound-rbac -[upbound]: /manuals/console/upbound-console -[organization]: /manuals/platform/concepts/identity-management/organizations -[query-api]: /spaces/howtos/self-hosted/query-api -[jq-install]: https://jqlang.org/download/ - -[upbound-console]: https://console.upbound.io diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/billing.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/billing.md deleted file mode 100644 index 145ff9f03..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/billing.md +++ /dev/null @@ -1,307 +0,0 @@ ---- -title: Self-Hosted Space Billing -sidebar_position: 50 -description: A guide for how billing works in an Upbound Space ---- - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Billing models may evolve between versions; see Capacity Licensing for alternative models. - -For version-specific features and capacity-based licensing details, see the . reference specifications, see [Capacity Licensing](/spaces/howtos/self-hosted/capacity-licensing). -::: - -Spaces are a self-hosting feature of Upbound's [flagship product][flagship-product] for platform teams to deploy control planes in their self-managed environments. You can install Spaces into any Kubernetes cluster in your own cloud account, on-premises data center, or on the edge. The pricing usage-based and requires an Upbound account and subscription. The billing unit is a `Loop`. - - -:::info -This guide describes the traditional usage-based billing model using object storage. disconnected or air-gapped environments, consider [Capacity Licensing](/spaces/howtos/self-hosted/capacity-licensing), which provides a simpler fixed-capacity model with local usage tracking. -::: - -## Billing details - -Spaces **aren't connected** to Upbound's global service. To enable proper billing, the Spaces software ships a controller whose responsibility is to collect billing data from your Spaces deployment. The collection and storage of your billing data happens expressly locally within your environment; no data is automatically emitted back to Upbound's global service. This data gets written to object storage of your choice. AWS, Azure, and GCP are currently supported. The Spaces software exports billing usage software every ~15 seconds. - -Spaces customers must periodically provide the billing data to Upbound. Contact your Upbound sales representative to learn more. - - - -## AWS S3 - - - -Configure billing to write to an S3 bucket by providing the following values at install-time. Create an S3 bucket if you don't already have one. - -### IAM policy - -You must create an IAM policy and attach it to the IAM user (for static credentials) or IAM role (for assumed -roles). - -The policy example below enables the necessary S3 permissions: - -```json -{ - "Sid":"EnableS3Permissions", - "Effect":"Allow", - "Action": [ - "s3:PutObject", - "s3:GetObject", - "s3:ListBucket", - "s3:DeleteObject" - ], - "Resource": [ - "arn:aws:s3:::your-bucket-name/*", - "arn:aws:s3:::your-bucket-name" - ] -}, -{ - "Sid": "ListBuckets", - "Effect": "Allow", - "Action": "s3:ListAllMyBuckets", - "Resource": "*" -} -``` - -### Authentication with static credentials - -In your Spaces install cluster, create a secret in the `upbound-system` -namespace. This secret must contain keys `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`. - -```bash -kubectl create secret generic billing-credentials -n upbound-system \ - --from-literal=AWS_ACCESS_KEY_ID= \ - --from-literal=AWS_SECRET_ACCESS_KEY= -``` - -Install the Space software, providing the billing details to the other required values. - - - - - - -```bash {hl_lines="2-6"} -helm -n upbound-system upgrade --install spaces ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=aws" \ - --set "billing.storage.aws.region=" \ - --set "billing.storage.aws.bucket=" \ - --set "billing.storage.secretRef.name=billing-credentials" - ... -``` - - - - - -```bash {hl_lines="2-6"} -up space init ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=aws" \ - --set "billing.storage.aws.region=" \ - --set "billing.storage.aws.bucket=" \ - --set "billing.storage.secretRef.name=billing-credentials" - ... -``` - - - - - - - -### Authentication with an IAM role - - -To use short-lived credentials with an assumed IAM role, create an IAM role with -established trust to the `vector`-serviceaccount in all `mxp-*-system` -namespaces. - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "arn:aws:iam::12345678912:oidc-provider/oidc.eks.eu-west-2.amazonaws.com/id/YOUROIDCPROVIDERID" - }, - "Action": "sts:AssumeRoleWithWebIdentity", - "Condition": { - "StringLike": { - "oidc.eks.eu-west-2.amazonaws.com/id/YOUROIDCPROVIDERID:sub": "system:serviceaccount:mxp-*-system:vector" - } - } - } - ] -} -``` - -For more information about workload identities, review the [Workload-identity -Configuration documentation][workload-identity-configuration-documentation] - - - - - - -```bash {hl_lines="2-7"} -helm -n upbound-system upgrade --install spaces ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=aws" \ - --set "billing.storage.aws.region=" \ - --set "billing.storage.aws.bucket=" \ - --set "billing.storage.secretRef.name=" \ - --set "controlPlanes.vector.serviceAccount.customAnnotations[eks.amazonaws.com/role-arn]=" - ... -``` - - - - - -```bash {hl_lines="2-7"} -up space init ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=aws" \ - --set "billing.storage.aws.region=" \ - --set "billing.storage.aws.bucket=" \ - --set "billing.storage.secretRef.name=" \ - --set "controlPlanes.vector.serviceAccount.customAnnotations[eks.amazonaws.com/role-arn]=" - ... -``` - - - - - - -*Note*: You must set `billing.storage.secretRef.name` to an empty string when using an assumed role. - - -## Azure blob storage - -Configure billing to write to a blob in Azure by providing the following values at install-time. Create a storage account and container if you don't already have one. - -Then, on the cluster where you installed the Spaces software, create a secret in `upbound-system`. This secret must contain keys `AZURE_TENANT_ID`, `AZURE_CLIENT_ID`, and `AZURE_CLIENT_SECRET`. Make sure to replace the values with details generated from your Azure account. - -```bash -kubectl create secret generic billing-credentials -n upbound-system \ - --from-literal=AZURE_TENANT_ID= \ - --from-literal=AZURE_CLIENT_ID= \ - --from-literal=AZURE_CLIENT_SECRET= -``` - -Install the Space software, providing the billing details to the other required values. - - - - - - -```bash {hl_lines="2-6"} -helm -n upbound-system upgrade --install spaces ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=azure" \ - --set "billing.storage.azure.storageAccount=" \ - --set "billing.storage.azure.container=" \ - --set "billing.storage.secretRef.name=billing-credentials" - ... -``` - - - - - -```bash {hl_lines="2-6"} -up space init ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=azure" \ - --set "billing.storage.azure.storageAccount=" \ - --set "billing.storage.azure.container=" \ - --set "billing.storage.secretRef.name=billing-credentials" - ... -``` - - - - - - - -## GCP Cloud Storage Buckets - - -Configure billing to write to a Cloud Storage bucket in GCP by providing the following values at install-time. Create a bucket if you don't already have one. - -Then, on the cluster where you installed the Spaces software, create a secret in `upbound-system`. This secret must contain the key `google_application_credentials`. Make sure to replace the value with a GCP service account key JSON generated from your GCP account. - -```bash -kubectl create secret generic billing-credentials -n upbound-system \ - --from-literal=google_application_credentials= -``` - -Install the Space software, providing the billing details to the other required values. - - - - - - -```bash {hl_lines="2-5"} -helm -n upbound-system upgrade --install spaces ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=gcp" \ - --set "billing.storage.gcp.bucket=" \ - --set "billing.storage.secretRef.name=billing-credentials" - ... -``` - - - - - -```bash {hl_lines="2-5"} -up space init ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=gcp" \ - --set "billing.storage.gcp.bucket=" \ - --set "billing.storage.secretRef.name=billing-credentials" - ... -``` - - - - - -## Export billing data to send to Upbound - -To prepare the billing data to send to Upbound, do the following: - -Ensure the current context of your kubeconfig points at the Spaces cluster. Then, run the [export][export] command. - - -:::important -Your current CLI must have read access to the bucket to run this command. -::: - - -The example below exports billing data stored in AWS: - -```bash -up space billing export --provider=aws \ - --bucket=spaces-billing-bucket \ - --account=your-upbound-org \ - --billing-month=2024-07 \ - --force-incomplete -``` - -The command creates a billing report that's zipped up in your current working directory. Send the output to your Upbound sales representative. - - -You can find full instructions and command options in the up [CLI reference][cli-reference] docs. - - -[export]: /reference/cli-reference -[cli-reference]: /reference/cli-reference -[flagship-product]: https://www.upbound.io/platform -[workload-identity-configuration-documentation]: https://docs.upbound.io/operate/accounts/authentication/oidc-configuration diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/capacity-licensing.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/capacity-licensing.md deleted file mode 100644 index a1dc6c101..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/capacity-licensing.md +++ /dev/null @@ -1,591 +0,0 @@ ---- -title: Capacity Licensing -sidebar_position: 60 -description: A guide for capacity-based licensing in self-hosted Spaces -plan: "enterprise" ---- - - - - - -This guide explains how to configure and monitor capacity-based licensing in -self-hosted Upbound Spaces. Capacity licensing provides a simplified billing -model for disconnected or air-gapped environments where automated usage -reporting isn't possible. - -:::info -Spaces `v1.15` and later support Capacity Licensing as an -alternative to the traditional usage-based billing model described in the -[Self-Hosted Space Billing][space-billing] guide. -::: - -## Overview - -Capacity licensing allows organizations to purchase a fixed capacity of -resources upfront. The Spaces software tracks usage locally and provides -visibility into consumption against your purchased capacity, all without -requiring external connectivity to Upbound's services. - -### Key concepts - -- **Resource Hours**: The primary billing unit representing all resources - managed by Crossplane over time. This includes managed resources, - composites (XRs), claims (XRCs), and all composed resources - essentially - everything Crossplane manages. The system aggregates resource counts over each - hour using trapezoidal integration to accurately account for changes in - resource count throughout the hour. -- **Operations**: The number of Operations invoked by Crossplane. -- **License Capacity**: The total amount of resource hours and operations included in your license. -- **Usage Tracking**: Continuous monitoring of consumption with real-time utilization percentages. - -### How it works - -1. Upbound provides you with a license file containing your purchased capacity -2. You configure a `SpaceLicense` in your Spaces cluster -3. The metering system automatically: - - Collects measurements from all control planes every minute - - Aggregates usage data into hourly intervals - - Stores usage data in a local PostgreSQL database - - Updates the `SpaceLicense` status with current consumption - -## Prerequisites - -### PostgreSQL database - -Capacity licensing requires a PostgreSQL database to store usage measurements. You can use: - -- An existing PostgreSQL instance -- A managed PostgreSQL service (AWS RDS, Azure Database, Google Cloud SQL) -- A PostgreSQL instance deployed in your cluster - -The database must be: - -- Accessible from the Spaces cluster -- Configured with a dedicated database and credentials - -#### Example: Deploy PostgreSQL with CloudNativePG - -If you don't have an existing PostgreSQL instance, you can deploy one in your -cluster using [CloudNativePG] (CNPG). CNPG is a Kubernetes operator that -manages PostgreSQL clusters. - -1. Install the CloudNativePG operator: - -```bash -kubectl apply -f https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.24/releases/cnpg-1.24.1.yaml -``` - -2. Create a PostgreSQL cluster for metering: - -```yaml -apiVersion: postgresql.cnpg.io/v1 -kind: Cluster -metadata: - name: metering-postgres - namespace: upbound-system -spec: - instances: 1 - imageName: ghcr.io/cloudnative-pg/postgresql:16 - bootstrap: - initdb: - database: metering - owner: metering - postInitApplicationSQL: - - ALTER ROLE "metering" CREATEROLE; - storage: - size: 5Gi - # Optional: Configure resources for production use - # resources: - # requests: - # memory: "512Mi" - # cpu: "500m" - # limits: - # memory: "1Gi" - # cpu: "1000m" ---- -apiVersion: v1 -kind: Secret -metadata: - name: metering-postgres-app - namespace: upbound-system - labels: - cnpg.io/reload: "true" -stringData: - username: metering - password: "your-secure-password-here" -type: kubernetes.io/basic-auth -``` - -```bash -kubectl apply -f metering-postgres.yaml -``` - -3. Wait for the cluster to be ready: - -```bash -kubectl wait --for=condition=ready cluster/metering-postgres -n upbound-system --timeout=5m -``` - -4. You can access the PostgreSQL cluster at `metering-postgres-rw.upbound-system.svc.cluster.local:5432`. - -:::tip -For production deployments, consider: -- Increasing `instances` to 3 for high availability -- Configuring [backups] to object storage -- Setting appropriate resource requests and limits -- Using a dedicated storage class with good I/O performance -::: - -### License file - -Contact your Upbound sales representative to obtain a license file for your organization. The license file contains: -- Your unique license ID -- Purchased capacity (resource hours and operations) -- License validity period -- Any usage restrictions (such as cluster UUID pinning) - -## Configuration - -### Step 1: Create database credentials secret - -Create a Kubernetes secret containing your PostgreSQL password using the pgpass format: - -```bash -# Create a pgpass file with format: hostname:port:database:username:password -# Note: The database name and username must be 'metering' -# For CNPG clusters, use the read-write service endpoint: -rw..svc.cluster.local -echo "metering-postgres-rw.upbound-system.svc.cluster.local:5432:metering:metering:your-secure-password-here" > pgpass - -# Create the secret -kubectl create secret generic metering-postgres-credentials \ - -n upbound-system \ - --from-file=pgpass=pgpass - -# Clean up the pgpass file -rm pgpass -``` - -The secret must contain a single key: -- **`pgpass`**: PostgreSQL password file in the format `hostname:port:metering:metering:password` - -:::note -The database name and username are fixed as `metering`. Ensure your PostgreSQL instance has a database named `metering` with a user `metering` that has appropriate permissions. - -If you deployed PostgreSQL using CNPG as shown in the example above, the password should match what you set in the `metering-postgres-app` secret. -::: - -:::tip -For production environments, consider using external secret management solutions: -- [External Secrets Operator][eso] -- Cloud-specific secret managers (AWS Secrets Manager, Azure Key Vault, GCP Secret Manager) -::: - -### Step 2: Enable metering in Spaces - -Enable the metering feature when installing or upgrading Spaces: - - - - - -```bash {hl_lines="2-7"} -helm -n upbound-system upgrade --install spaces ... \ - --set "metering.enabled=true" \ - --set "metering.storage.postgres.connection.url=metering-postgres-rw.upbound-system.svc.cluster.local:5432" \ - --set "metering.storage.postgres.connection.credentials.secret.name=metering-postgres-credentials" \ - --set "metering.interval=1m" \ - --set "metering.workerCount=10" \ - --set "metering.aggregationInterval=1h" \ - --set "metering.measurementRetentionDays=30" - ... -``` - - - - - -```bash {hl_lines="2-7"} -up space init ... \ - --set "metering.enabled=true" \ - --set "metering.storage.postgres.connection.url=metering-postgres-rw.upbound-system.svc.cluster.local:5432" \ - --set "metering.storage.postgres.connection.credentials.secret.name=metering-postgres-credentials" \ - --set "metering.interval=1m" \ - --set "metering.workerCount=10" \ - --set "metering.aggregationInterval=1h" \ - --set "metering.measurementRetentionDays=30" - ... -``` - - - - - -#### Configuration options - -| Option | Default | Description | -|--------|---------|-------------| -| `metering.enabled` | `false` | Enable the metering feature | -| `metering.storage.postgres.connection.url` | - | PostgreSQL host and port (format: `host:port`, required) | -| `metering.storage.postgres.connection.credentials.secret.name` | - | Name of the secret containing PostgreSQL credentials (required) | -| `metering.storage.postgres.connection.sslmode` | `require` | SSL mode for PostgreSQL connection (`disable`, `allow`, `prefer`, `require`, `verify-ca`, `verify-full`) | -| `metering.storage.postgres.connection.ca.name` | - | Name of the secret containing CA certificate for TLS connections (optional) | -| `metering.interval` | `1m` | How often to collect measurements from control planes | -| `metering.workerCount` | `10` | Number of parallel workers for measurement collection | -| `metering.aggregationInterval` | `1h` | How often to aggregate measurements into hourly usage data | -| `metering.measurementRetentionDays` | `30` | Days to retain raw measurements (0 = indefinite) | - - -#### Database sizing and retention - -The metering system uses two PostgreSQL tables to track usage: - -**Raw measurements table** (`measurements`): -- Stores point-in-time snapshots collected every measurement interval (default: 1 minute) -- One row per control plane per interval -- Affected by the `measurementRetentionDays` setting -- Used for detailed auditing and troubleshooting - -**Aggregated usage table** (`hourly_usage`): -- Stores hourly aggregated resource hours and operations per license -- One row per hour per license -- Never deleted (required for accurate license tracking) -- Grows much slower than raw measurements - -##### Storage sizing guidelines - -Estimate your PostgreSQL storage needs based on these factors: - - -| Deployment Size | Control Planes | Measurement Interval | Retention Days | Raw Measurements | Indexes & Overhead | Total Storage | -|----------------|----------------|---------------------|----------------|------------------|-------------------|---------------| -| Small | 10 | 1m | 30 | ~85 MB | ~40 MB | **~125 MB** | -| Medium | 50 | 1m | 30 | ~430 MB | ~215 MB | **~645 MB** | -| Large | 200 | 1m | 30 | ~1.7 GB | ~850 MB | **~2.5 GB** | -| Large (90-day retention) | 200 | 1m | 90 | ~5.2 GB | ~2.6 GB | **~7.8 GB** | - -The aggregated hourly usage table adds minimal overhead (~50 KB per year per license). - -**Formula for custom calculations**: -``` -Daily measurements per control plane = (24 * 60) / interval_minutes -Total rows = control_planes × daily_measurements × retention_days -Storage (MB) ≈ (total_rows × 200 bytes) / 1,048,576 × 1.5 (with indexes) -``` - -##### Retention behavior - -The `measurementRetentionDays` setting controls retention of raw measurement data: - -- **Default: 30 days** - Balances audit capabilities with storage efficiency -- **Set to 0**: Disables cleanup, retains all raw measurements indefinitely -- **Cleanup runs**: Every aggregation interval (default: hourly) -- **What's kept forever**: Aggregated hourly usage data (needed for license tracking) -- **What's cleaned up**: Raw point-in-time measurements older than retention period - -**Recommendations**: -- **30 days**: For most troubleshooting and short-term auditing -- **60 to 90 days**: For environments requiring extended audit trails -- **Unlimited (0)**: Only for environments with ample storage or specific compliance requirements - -:::note -Increasing retention period linearly increases storage requirements for raw measurements. The aggregated hourly data is always retained regardless of this setting. -::: - -### Step 3: Apply your license - -Use the `up` CLI to apply your license file: - -```bash -up space license apply /path/to/license.json -``` - -This command automatically: -- Creates a secret containing your license file in the `upbound-system` namespace -- Creates the `SpaceLicense` resource configured to use that secret - -:::tip -You can specify a different namespace for the license secret using the `--namespace` flag: -```bash -up space license apply /path/to/license.json --namespace my-namespace -``` -::: - -
-Alternative: Manual kubectl approach - -If you prefer not to use the `up` CLI, you can manually create the resources: - -1. Create the license secret: - -```bash -kubectl create secret generic space-license \ - -n upbound-system \ - --from-file=license.json=/path/to/license.json -``` - -2. Create the SpaceLicense resource: - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceLicense -metadata: - name: space -spec: - secretRef: - name: space-license - namespace: upbound-system - key: license.json -``` - -```bash -kubectl apply -f spacelicense.yaml -``` - -:::important -You **must** name the `SpaceLicense` resource `space`. This resource is a singleton and only one can exist in the cluster. -::: - -
- -## Monitoring usage - -### Check license status - -Use the `up` CLI to view your license details and current usage: - -```bash -up space license show -``` - -Example output: - -``` -Spaces License Status: Valid (License is valid) - -Created: 2024-01-01T00:00:00Z -Expires: 2025-01-01T00:00:00Z - -Plan: enterprise - -Resource Hour Limit: 1000000 -Operation Limit: 500000 - -Enabled Features: -- spaces -- query-api -- backup-restore -``` - -The output shows: -- License validity status and any validation messages -- Creation and expiration dates -- Your commercial plan tier -- Capacity limits for resource hours and operations -- Enabled features in your license -- Any restrictions (such as cluster UUID pinning) - -
-Alternative: View detailed status with kubectl - -For detailed information including usage statistics, use kubectl: - -```bash -kubectl get spacelicense space -o yaml -``` - -Example output showing usage data: - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceLicense -metadata: - name: space -spec: - secretRef: - name: space-license - namespace: upbound-system -status: - conditions: - - type: LicenseValid - status: "True" - reason: Valid - message: "License is valid" - id: "lic_abc123xyz" - plan: "enterprise" - capacity: - resourceHours: 1000000 - operations: 500000 - usage: - resourceHours: 245680 - operations: 12543 - resourceHoursUtilization: "24.57%" - operationsUtilization: "2.51%" - firstMeasurement: "2024-01-15T10:00:00Z" - lastMeasurement: "2024-02-10T14:30:00Z" - createdAt: "2024-01-01T00:00:00Z" - expiresAt: "2025-01-01T00:00:00Z" - enabledFeatures: - - "spaces" - - "query-api" - - "backup-restore" -``` - -
- -### Understanding the status fields - -| Field | Description | -|-------|-------------| -| `status.id` | Unique license identifier | -| `status.plan` | Your commercial plan (community, standard, enterprise) | -| `status.capacity` | Total capacity included in your license | -| `status.usage.resourceHours` | Total resource hours consumed | -| `status.usage.operations` | Total operations performed | -| `status.usage.resourceHoursUtilization` | Percentage of resource hours capacity used | -| `status.usage.operationsUtilization` | Percentage of operations capacity used | -| `status.usage.firstMeasurement` | When usage tracking began | -| `status.usage.lastMeasurement` | Most recent usage update | -| `status.expiresAt` | License expiration date | - -### Monitor with kubectl - -Watch your license utilization in real-time: - -```bash -kubectl get spacelicense space -w -``` - -Short output format: - -``` -NAME PLAN VALID REASON AGE -space enterprise True Valid 45d -``` - -## Managing licenses - -### Updating your license - -To update your license with a new license file (for example, when renewing or upgrading capacity), apply the new license: - -```bash -up space license apply /path/to/new-license.json -``` - -This command replaces the existing license secret and updates the SpaceLicense resource. - -### Removing a license - -To remove a license: - -```bash -up space license remove -``` - -This command: -- Prompts for confirmation before proceeding -- Removes the license secret - -To skip the confirmation prompt, use the `--force` flag: - -```bash -up space license remove --force -``` - -## Troubleshooting - -### License not updating - -If the license status doesn't update with usage data: - -1. **Check metering controller logs**: - ```bash - kubectl logs -n upbound-system deployment/spaces-controller -c metering - ``` - -2**Check if the system captures your measurements**: - - ```bash - # Connect to PostgreSQL and query the measurements table - kubectl exec -it -- psql -U -d \ - -c "SELECT COUNT(*) FROM measurements WHERE timestamp > NOW() - INTERVAL '1 hour';" - ``` - -### High utilization warnings - -If you're approaching your capacity limits: - -1. **Review resource usage** by control plane to identify high consumers -2. **Contact your Upbound sales representative** to discuss capacity expansion -3. **Optimize managed resources** by cleaning up unused resources - -### License validation failures - -If your license shows as invalid: - -1. **Check expiration date**: `kubectl get spacelicense space -o jsonpath='{.status.expiresAt}'` -2. **Verify license file integrity**: Ensure the secret contains valid JSON -3. **Check for cluster UUID restrictions**: Upbound pins some licenses to - specific clusters -4. **Review controller logs** for detailed error messages - -## Differences from traditional billing - -### Capacity licensing - -- ✅ Works in disconnected environments -- ✅ Provides real-time usage visibility -- ✅ No manual data export required -- ✅ Requires PostgreSQL database -- ✅ Fixed capacity model - -### Traditional billing (object storage) - - -- ❌ Requires periodic manual export -- ❌ Delayed visibility into usage -- ✅ Works with S3/Azure Blob/GCS -- ❌ Requires cloud storage access -- ✅ Pay-as-you-go model - -## Best practices - -### Database management - -1. **Regular backups**: Back up your metering database regularly to preserve usage history -2. **Monitor database size**: Set appropriate retention periods to manage storage growth -3. **Use managed databases**: Consider managed PostgreSQL services for production -4. **Connection pooling**: Use connection pooling for better performance at scale - -### License management - -1. **Monitor utilization**: Set up alerts before reaching 80% capacity -2. **Plan renewals early**: Start renewal discussions 60 days before expiration -3. **Track grace periods**: Note the `gracePeriodEndsAt` date for planning -4. **Secure license files**: Treat license files as sensitive credentials - -### Operational monitoring - -1. **Set up dashboards**: Create Grafana dashboards for usage trends -2. **Enable alerting**: Configure alerts for high utilization and expiration -3. **Regular audits**: Periodically review usage patterns across control planes -4. **Capacity planning**: Use historical data to predict future capacity needs - -## Next steps - -- Learn about [Observability] to monitor your Spaces deployment -- Explore [Backup and Restore][backup-restore] to protect your control plane data -- Review [Self-Hosted Space Billing][space-billing] for the traditional billing model -- Contact [Upbound Sales][sales] to discuss capacity licensing options - - -[space-billing]: /spaces/howtos/self-hosted/billing -[CloudNativePG]: https://cloudnative-pg.io/ -[backups]: https://cloudnative-pg.io/documentation/current/backup_recovery/ -[backup-restore]: /spaces/howtos/backup-and-restore -[sales]: https://www.upbound.io/contact -[eso]: https://external-secrets.io/ -[Observability]: /spaces/howtos/observability - - diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/certs.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/certs.md deleted file mode 100644 index e517c250e..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/certs.md +++ /dev/null @@ -1,274 +0,0 @@ ---- -title: Istio Ingress Gateway With Custom Certificates -sidebar_position: 20 -description: Install self hosted spaces using istio ingress gateway in a Kind cluster ---- - -:::important -Prerequisites - -- Spaces Token available in a file -- `docker login xpkg.upbound.io -u -p ` -- [`istioctl`][istioctl] installation -- `jq` installation -::: - -This document describes the installation of a self hosted space on an example `kind` -cluster along with Istio Ingress Gateway and certificates. The service mesh and certificates -installation is transferable to self hosted spaces in arbitrary clouds. - -## Create a kind cluster - -```shell -cat < -## Install Istio - - - -:::important -This is an example and not recommended for use in production. -::: - - -1. Create the `istio-values.yaml` file - -```shell -cat > istio-values.yaml << 'EOF' -apiVersion: install.istio.io/v1alpha1 -kind: IstioOperator -spec: - hub: gcr.io/istio-release - components: - ingressGateways: - - enabled: true - name: istio-ingressgateway - k8s: - nodeSelector: - ingress-ready: "true" - overlays: - - apiVersion: apps/v1 - kind: Deployment - name: istio-ingressgateway - patches: - - path: spec.template.spec.containers.[name:istio-proxy].ports - value: - - containerPort: 8080 - hostPort: 80 - - containerPort: 8443 - hostPort: 443 -EOF -``` - -2. Install istio via `istioctl` - -```shell -istioctl install -f istio-values.yaml -``` - -## Create a self-signed Certificate via cert-manager - -:::important -This Certificate manifest creates a self-signed certificate for a proof of concept -environment and isn't recommended for production use cases. -::: - -1. Create the upbound-system namespace - -```shell -kubectl create namespace upbound-system -``` - -2. Create a self-signed certificate - -```shell -cat < -## Create an Istio Gateway and VirtualService - - - - -Configure an Istio Gateway and VirtualService to use TLS passthrough. - - -```shell -cat < spaces-values.yaml << 'EOF' -# Configure spaces-router to use the TLS secret created by cert-manager. -externalTLS: - tlsSecret: - name: example-tls-secret - caBundleSecret: - name: example-tls-secret - key: ca.crt -ingress: - provision: false - # Allow Istio Ingress Gateway to communicate to the spaces-router - namespaceLabels: - kubernetes.io/metadata.name: istio-system - podLabels: - app: istio-ingressgateway - istio: ingressgateway -EOF -``` - -2. Set the required environment variables - -```shell -# Update these according to your account/token file -export SPACES_TOKEN_PATH= -export UPBOUND_ACCOUNT= -# Replace SPACES_ROUTER_HOST with your Spaces ingress hostname -export SPACES_ROUTER_HOST="proxy.upbound-127.0.0.1.nip.io" -export SPACES_VERSION="1.14.1" -``` - -3. Create an image pull secret for Spaces - -```shell -kubectl -n upbound-system create secret docker-registry upbound-pull-secret \ - --docker-server=https://xpkg.upbound.io \ - --docker-username="$(jq -r .accessId $SPACES_TOKEN_PATH)" \ - --docker-password="$(jq -r .token $SPACES_TOKEN_PATH)" -``` - -4. Install the Spaces helm chart - -```shell -# Login to xpkg.upbound.io -jq -r .token $SPACES_TOKEN_PATH | helm registry login xpkg.upbound.io -u $(jq -r .accessId $SPACES_TOKEN_PATH) --password-stdin - -# Install spaces helm chart -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "${SPACES_VERSION}" \ - --set "ingress.host=${SPACES_ROUTER_HOST}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "authentication.hubIdentities=true" \ - --set "authorization.hubRBAC=true" \ - --wait -f spaces-values.yaml -``` - -## Validate the installation - -Successful access of the `up` command to interact with your self hosted space validates the -certificate installation. - -- `up ctx .` - -You can also issue control plane creation, list and deletion commands. - -- `up ctp create cert-test` -- `up ctp list` -- `up ctx disconnected/kind-kind/default/cert-test && kubectl get namespace` -- `up ctp delete cert-test` - -:::note -If `up` can't connect to your control plane, follow [this guide to create a new profile][up-profile]. -::: - -## Troubleshooting - -Examine your certificate with `openssl`: - -```shell -openssl s_client -connect proxy.upbound-127.0.0.1.nip.io:443 -showcerts -``` - -[istioctl]: https://istio.io/latest/docs/ops/diagnostic-tools/istioctl/ -[up-profile]: /manuals/cli/howtos/profile-config/ diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/configure-ha.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/configure-ha.md deleted file mode 100644 index ddf36c55e..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/configure-ha.md +++ /dev/null @@ -1,450 +0,0 @@ ---- -title: Production Scaling and High Availability -description: Configure your Self-Hosted Space for production -sidebar_position: 5 ---- - - - -This guide explains how to configure an existing Upbound Space deployment for -production operation at scale. - -Use this guide when you're ready to deploy production scaling, high availability, -and monitoring in your Space. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. - -For API specifications on ControlPlane resources and configurations, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . -::: - -## Prerequisites - -Before you begin scaling your Spaces deployment, make sure you have: - - -* A working Space deployment -* Cluster administrator access -* An understanding of load patterns and growth in your organization -* A familiarity with node affinity, tainting, and Horizontal Pod Autoscaling - (HPA) - - -## Production scaling strategy - - -In this guide, you will: - - - -* Create dedicated node pools for different component types -* Configure high-availability to ensure there are no single points of failure -* Set dynamic scaling for variable workloads -* Optimize your storage and component operations -* Monitor your deployment health and performance - -## Spaces architecture - -The basic Spaces workflow follows the pattern below: - - -![Spaces workflow][spaces-workflow] - -## Node architecture - -You can mitigate resource contention and improve reliability by separating system -components into dedicated node pools. - -### `etcd` dedicated nodes - -`etcd` performance directly impacts your entire Space, so isolate it for -consistent performance. - -1. Create a dedicated `etcd` node pool - - **Requirements:** - - **Minimum**: 3 nodes for HA - - **Instance type**: General purpose with high network throughput/low latency - - **Storage**: High performance storage (`etcd` is I/O sensitive) - -2. Taint `etcd` nodes to reserve them - - ```bash - kubectl taint nodes target=etcd:NoSchedule - ``` - -3. Configure `etcd` storage - - `etcd` is sensitive to storage I/O performance. Review the [`etcd` scaling - documentation][scaling] - for specific storage guidance. - -### API server dedicated nodes - -API servers handle all control plane requests and should run on dedicated -infrastructure. - -1. Create dedicated API server nodes - - **Requirements:** - - **Minimum**: 2 nodes for HA - - **Instance type**: Compute-optimized, memory-optimized, or general-purpose - - **Scaling**: Scale vertically based on API server load patterns - -2. Taint API server nodes - - ```bash - kubectl taint nodes target=apiserver:NoSchedule - ``` - -### Configure cluster autoscaling - -Enable cluster autoscaling for all node pools. - -For AWS EKS clusters, Upbound recommends using [`Karpenter`][karpenter] for -improved bin-packing and instance type selection. - -For GCP GKE clusters, follow the [GKE autoscaling][gke-autoscaling] guide. - -For Azure AKS clusters, follow the [AKS autoscaling][aks-autoscaling] guide. - - -## Configure high availability - -Ensure control plane components can survive node and zone failures. - -### Enable high availability mode - -1. Configure control planes for high availability - - ```yaml - controlPlanes: - ha: - enabled: true - ``` - - This configures control plane pods to run with multiple replicas and - associated pod disruption budgets. - -### Configure component distribution - -1. Set up API server pod distribution - - ```yaml - controlPlanes: - vcluster: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: target - operator: In - values: - - apiserver - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - vcluster - topologyKey: "kubernetes.io/hostname" - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - vcluster - topologyKey: topology.kubernetes.io/zone - weight: 100 - ``` - -2. Configure `etcd` pod distribution - - ```yaml - controlPlanes: - etcd: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: target - operator: In - values: - - etcd - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - vcluster-etcd - topologyKey: "kubernetes.io/hostname" - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - vcluster-etcd - topologyKey: topology.kubernetes.io/zone - weight: 100 - ``` - -### Configure tolerations - -Allow control plane pods to schedule on the tainted dedicated nodes (available -in Spaces v1.14+). - -1. Add tolerations for `etcd` pods - - ```yaml - controlPlanes: - etcd: - tolerations: - - key: "target" - operator: "Equal" - value: "etcd" - effect: "NoSchedule" - ``` - -2. Add tolerations for API server pods - - ```yaml - controlPlanes: - vcluster: - tolerations: - - key: "target" - operator: "Equal" - value: "apiserver" - effect: "NoSchedule" - ``` - - -## Configure autoscaling for Spaces components - - -Set up the Spaces system components to handle variable load automatically. - -### Scale API and `apollo` services - -1. Configure minimum replicas for availability - - ```yaml - api: - replicaCount: 2 - - features: - alpha: - apollo: - enabled: true - replicaCount: 2 - ``` - - Both services support horizontal and vertical scaling based on load patterns. - -### Configure router autoscaling - -The `spaces-router` is the entry point for all traffic and needs intelligent -scaling. - - -1. Enable Horizontal Pod Autoscaler - - ```yaml - router: - hpa: - enabled: true - minReplicas: 2 - maxReplicas: 8 - targetCPUUtilizationPercentage: 80 - targetMemoryUtilizationPercentage: 80 - ``` - -2. Monitor scaling factors - - **Router scaling behavior:** - - **Vertical scaling**: Scales based on number of control planes - - **Horizontal scaling**: Scales based on request volume - - **Resource monitoring**: Monitor CPU and memory usage - - - -### Configure controller scaling - -The `spaces-controller` manages Space-level resources and requires vertical -scaling. - -1. Configure adequate resources with headroom - - ```yaml - controller: - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "2000m" - memory: "4Gi" - ``` - - **Important**: The controller can spike when reconciling large numbers of - control planes, so provide adequate headroom for resource spikes. - -## Set up production storage - - -### Configure Query API database - - -1. Use a managed PostgreSQL database - - **Recommended services:** - - [AWS RDS][rds] - - [Google Cloud SQL][gke-sql] - - [Azure Database for PostgreSQL][aks-sql] - - **Requirements:** - - Minimum 400 IOPS performance - - -## Monitoring - - - -Monitor key metrics to ensure healthy scaling and identify issues quickly. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -### Control plane health - -Track these `spaces-controller` metrics: - -1. **Total control planes** - - ``` - spaces_control_plane_exists - ``` - - Tracks the total number of control planes in the system. - -2. **Degraded control planes** - - ``` - spaces_control_plane_degraded - ``` - - Returns control planes that don't have a `Synced`, `Ready`, and - `Healthy` state. - -3. **Stuck control planes** - - ``` - spaces_control_plane_stuck - ``` - - Control planes stuck in a provisioning state. - -4. **Deletion issues** - - ``` - spaces_control_plane_deletion_stuck - ``` - - Control planes stuck during deletion. - -### Alerting - -Configure alerts for critical scaling and health metrics: - -- **High error rates**: Alert when 4xx/5xx response rates exceed thresholds -- **Control plane health**: Alert when degraded or stuck control planes exceed acceptable counts - -## Architecture overview - -**Spaces System Components:** - -- **`spaces-router`**: Entry point for all endpoints, dynamically builds routes to control plane API servers -- **`spaces-controller`**: Reconciles Space-level resources, serves webhooks, works with `mxp-controller` for provisioning -- **`spaces-api`**: API for managing groups, control planes, shared secrets, and telemetry objects (accessed only through spaces-router) -- **`spaces-apollo`**: Hosts the Query API, connects to PostgreSQL database populated by `apollo-syncer` pods - - -**Control Plane Components (per control plane):** -- **`mxp-controller`**: Handles provisioning tasks, serves webhooks, installs UXP and `XGQL` -- **`XGQL`**: GraphQL API powering console views -- **`kube-state-metrics`**: Collects usage metrics for billing (updated by `mxp-controller` when CRDs change) -- **`vector`**: Works with `kube-state-metrics` to send usage data to external storage for billing -- **`apollo syncer`**: Syncs `etcd` data into PostgreSQL for the Query API - - -### `up ctx` workflow - - - up ctx workflow diagram - - -### Access a control plane API server via kubectl - - - kubectl workflow diagram - - -### Query API/Apollo - - - query API workflow diagram - - -## See also - -* [Upbound Spaces deployment requirements][deployment] -* [Upbound `etcd` scaling resources][scaling] - -[up-ctx-workflow]: /img/up-ctx-workflow.png -[kubectl]: /img/kubectl-workflow.png -[query-api]: /img/query-api-workflow.png -[spaces-workflow]: /img/up-basic-flow.png -[rds]: https://aws.amazon.com/rds/postgresql/ -[gke-sql]: https://cloud.google.com/kubernetes-engine/docs/tutorials/stateful-workloads/postgresql -[aks-sql]: https://learn.microsoft.com/en-us/azure/aks/deploy-postgresql-ha?tabs=azuredisk -[deployment]: https://docs.upbound.io/spaces/howtos/self-hosted/deployment-reqs/ -[karpenter]: https://docs.aws.amazon.com/eks/latest/best-practices/karpenter.html -[gke-autoscaling]: https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-autoscaler -[aks-autoscaling]: https://learn.microsoft.com/en-us/azure/aks/cluster-autoscaler-overview -[scaling]: https://docs.upbound.io/deploy/self-hosted-spaces/scaling-resources#scaling-etcd-storage diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/controllers.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/controllers.md deleted file mode 100644 index 692740638..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/controllers.md +++ /dev/null @@ -1,389 +0,0 @@ ---- -title: Controllers -weight: 250 -description: A guide to how to wrap and deploy an Upbound controller into control planes on Upbound. ---- - -:::important -This feature is in private preview for select customers in Upbound Spaces. If you're interested in this feature, please [contact us](https://www.upbound.io/contact-us). -::: - -Upbound's _Controllers_ feature lets you build and deploy control plane software from the Kubernetes ecosystem. With the _Controllers_ feature, you're not limited to just managing resource types defined by Crossplane. Now you can create resources from _CustomResourceDefinitions_ defined by other Kubernetes ecosystem tooling. - -This guide explains how to bundle and deploy control plane software from the Kubernetes ecosystem on a control plane in Upbound. - -## Benefits - -The Controllers feature provides the following benefits: - -* Deploy control plane software from the Kubernetes ecosystem. -* Use your control plane's package manager to handle the lifecycle of the control plane software and define dependencies between package. -* Build powerful compositions that combine both Crossplane and Kubernetes _CustomResources_. - -## How it works - -A _Controller_ is a package type that bundles control plane software from the Kubernetes ecosystem. Examples of such software includes: - -- Kubernetes policy engines -- CI/CD tooling -- Your own private custom controllers defined by your organization - -You build a _Controller_ package by wrapping a helm chart along with its requisite _CustomResourceDefinitions_. Your _Controller_ package gets pushed to an OCI registry, and from there you can apply it to a control plane like you would any other Crossplane package. Your control plane's package manager is responsible for managing the lifecycle of the software once applied. - -## Prerequisites - -Enable the Controllers feature in the Space you plan to run your control plane in: - -- Cloud Spaces: Not available yet -- Connected Spaces: Space administrator must enable this feature -- Disconnected Spaces: Space administrator must enable this feature - -Packaging a _Controller_ requires [up CLI][cli] `v0.39.0` or later. - - - -## Build a _Controller_ package - - - -_Controllers_ are a package type that get administered by your control plane's package manager. - -### Prepare the package - -To define a _Controller_, you need a Helm chart. This guide assumes the control plane software you want to build into a _Controller_ already has a Helm chart available. - -Start by making a working directory to assemble the necessary parts: - -```ini -mkdir controller-package -cd controller-package -``` - -Inside the working directory, pull the Helm chart: - -```shell -export CHART_REPOSITORY= -export CHART_NAME= -export CHART_VERSION= - -helm pull $CHART_NAME --repo $CHART_REPOSITORY --version $CHART_VERSION -``` - -Be sure to update the Helm chart repository, name, and version with your own. - -Move the Helm chart into its own folder: - -```ini -mkdir helm -mv $CHART_NAME-$CHART_VERSION.tgz helm/chart.tgz -``` - -Unpack the CRDs from the Helm chart into their own directory: - -```shell -export RELEASE_NAME= -export RELEASE_NAMESPACE= - -mkdir crds -helm template $RELEASE_NAME helm/chart.tgz -n $RELEASE_NAMESPACE --include-crds | \ - yq e 'select(.kind == "CustomResourceDefinition")' - | \ - yq -s '("crds/" + .metadata.name + ".yaml")' - -``` -Be sure to update the Helm release name, and namespace with your own. - -:::info -The instructions above assume your CRDs get deployed as part of your Helm chart. If they're deployed another way, you need to manually copy your CRDs instead. -::: - -Create a `crossplane.yaml` with your controller metadata: - -```yaml -cat < crossplane.yaml -apiVersion: meta.pkg.upbound.io/v1alpha1 -kind: Controller -metadata: - annotations: - friendly-name.meta.crossplane.io: Controller - meta.crossplane.io/description: | - A brief description of what the controller does. - meta.crossplane.io/license: Apache-2.0 - meta.crossplane.io/maintainer: - meta.crossplane.io/readme: | - An explanation of your controller. - meta.crossplane.io/source: - name: -spec: - packagingType: Helm - helm: - releaseName: - releaseNamespace: - # Value overrides for the helm release can be provided below. - # values: - # foo: bar -EOF -``` - -Your controller's file structure should look like this: - -```ini -. -├── crds -│ ├── your-crd.yaml -│ ├── second-crd.yaml -│ └── another-crd.yaml -├── crossplane.yaml -└── helm - └── chart.tgz -``` - -### Package and push the _Controller_ - -At the root of your controller's working directory, build the contents into an xpkg: - -```ini -up xpkg build -``` - -This causes an xpkg to get saved to your current directory with a name like `controller-f7091386b4c0.xpkg`. - -Push the package to your desired OCI registry: - -```shell -export UPBOUND_ACCOUNT= -export CONTROLLER_NAME= -export CONTROLLER_VERSION= -export XPKG_FILENAME= - -up xpkg push xpkg.upbound.io/$UPBOUND_ACCOUNT/$CONTROLLER_NAME:$CONTROLLER_VERSION -f $XPKG_FILENAME -``` - - - -## Deploy a _Controller_ package - - - -:::important -_Controllers_ are only installable on control planes running Crossplane `v1.19.0` or later. -::: - -Set your kubecontext to the desired control plane in Upbound. Change the package path to the OCI registry you pushed it to. Then, deploy the _Controller_ directly: - -```shell -export CONTROLLER_NAME= -export CONTROLLER_VERSION= - -cat < crossplane.yaml -apiVersion: meta.pkg.upbound.io/v1alpha1 -kind: Controller -metadata: - annotations: - friendly-name.meta.crossplane.io: Controller ArgoCD - meta.crossplane.io/description: | - The ArgoCD Controller enables continuous delivery and declarative configuration - management for Kubernetes applications using GitOps principles. - meta.crossplane.io/license: Apache-2.0 - meta.crossplane.io/maintainer: Upbound Maintainers - meta.crossplane.io/readme: | - ArgoCD is a declarative GitOps continuous delivery tool for Kubernetes that - follows the GitOps methodology to manage infrastructure and application - configurations. - meta.crossplane.io/source: https://github.com/argoproj/argo-cd - name: argocd -spec: - packagingType: Helm - helm: - releaseName: argo-cd - releaseNamespace: argo-system - # values: - # foo: bar -EOF -``` - -Your controller's file structure should look like this: - -```ini -. -├── crds -│ ├── applications.argoproj.io.yaml -│ ├── applicationsets.argoproj.io.yaml -│ └── appprojects.argoproj.io.yaml -├── crossplane.yaml -└── helm - └── chart.tgz -``` - -### Package and push controller-argocd - -At the root of your controller's working directory, build the contents into an xpkg: - -```ini -up xpkg build -``` - -This causes an xpkg to get saved to your current directory with a name like `argocd-f7091386b4c0.xpkg`. - -Push the package to your desired OCI registry: - -```shell -export UPBOUND_ACCOUNT= -export CONTROLLER_NAME=controller-argocd -export CONTROLLER_VERSION=v7.8.8 -export XPKG_FILENAME= - -up xpkg push --create xpkg.upbound.io/$UPBOUND_ACCOUNT/$CONTROLLER_NAME:$CONTROLLER_VERSION -f $XPKG_FILENAME -``` - -### Deploy controller-argocd to a control plane - -Set your kubecontext to the desired control plane in Upbound. Change the package path to the OCI registry you pushed it to. Then, deploy the _Controller_ directly: - -```ini -cat < - -## Frequently asked questions - -
-Can I package any software or are there any prerequisites to be a Controller? - -We define a *Controller* as a software that has at least one Custom Resource Definition (CRD) and a Kubernetes controller for that CRD. This is the minimum requirement to be a *Controller*. We have some checks to enforce this at packaging time. - -
- -
-How can I package my software as a Controller? - -Currently, we support Helm charts as the underlying package format for *Controllers*. As long as you have a Helm chart, you can package it as a *Controller*. - -If you don't have a Helm chart, you can't deploy the software. We only support Helm charts as the underlying package format for *Controllers*. We may extend this to support other packaging formats like Kustomize in the future. - -
- -
-Can I package Crossplane XRDs/Compositions as a Helm chart to deploy as a Controller? - -This is not recommended. For packaging Crossplane XRDs/ and Compositions, we recommend using the `Configuration` package format. A helm chart only with Crossplane XRDs/Compositions does not qualify as a *Controller*. - -
- -
-How can I override the Helm values when deploying a Controller? - -Overriding the Helm values is possible at two levels: -- During packaging time, in the package manifest file. -- At runtime, using a `ControllerRuntimeConfig` resource (similar to Crossplane `DeploymentRuntimeConfig`). - -
- -
-How can I configure the helm release name and namespace for the controller? - -Right now, it is not possible to configure this at runtime. The package author configures release name and namespace during packaging, so it is hardcoded inside the package. Unlike a regular application that is deployed by a Helm chart, *Controllers* can only be deployed once in a given control plane, so, we hope it should be ok to rely on predefined release names and namespaces. We may consider exposing these in `ControllerRuntimeConfig` later, but, we would like to keep it opinionated unless there are strong reasons to do so. - -
- -
-Can I deploy more than one instance of a Controller package? - -No, this is not possible. Remember, a *Controller* package introduces CRDs which are cluster-scoped objects. Just like one cannot deploy more than one instance of the same Crossplane Provider package today, it is not possible to deploy more than one instance of a *Controller*. - -
- -
-Do I need a specific Crossplane version to run Controllers? - -Yes, you need to use Crossplane v1.19.0 or later to use *Controllers*. This is because of the changes in the Crossplane codebase to support third-party package formats in dependencies. - -Spaces `v1.12.0` supports Crossplane `v1.19` in the *Rapid* release channel. - -
- -
-Can I deploy Controllers outside of an Upbound control plane? With UXP? - -No, *Controllers* are a proprietary package format and are only available for control planes running in Spaces hosting environments in Upbound. - -
- - -[cli]: /manuals/uxp/overview - diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/ctp-audit-logs.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/ctp-audit-logs.md deleted file mode 100644 index 52f52c776..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/ctp-audit-logs.md +++ /dev/null @@ -1,549 +0,0 @@ ---- -title: Control plane audit logging ---- - -This guide explains how to enable and configure audit logging for control planes -in Self-Hosted Upbound Spaces. - -Starting in Spaces `v1.14.0`, each control plane contains an API server that -supports audit log collection. You can use audit logging to track creation, -updates, and deletions of Crossplane resources. Control plane audit logs -use observability features to collect audit logs with `SharedTelemetryConfig` and -send logs to an OpenTelemetry (`OTEL`) collector. - -:::info API Version Information -This guide applies to **Spaces v1.14.0 and later** (Self-Hosted only). Audit logging is not available in earlier versions. - -For API specifications on observability resources, see the [Spaces API Reference - v1.14](../../../../../1.14/reference/apis/spaces-api/). details on observability evolution across versions, see the . -::: - -## Prerequisites - -Before you begin, make sure you have: - -* Spaces `v1.14.0` or greater -* Admin access to your Spaces host cluster -* `kubectl` configured to access the host cluster -* `helm` installed -* `yq` installed -* `up` CLI installed and logged in to your organization - -## Enable observability - - -Observability graduated to General Available in `v1.14.0` but is disabled by -default. - - - - - -### Before `v1.14` -To enable the GA Observability feature, upgrade your Spaces installation to `v1.14.0` -or later and update your installation setting to the new flag: - -```diff -helm upgrade spaces upbound/spaces -n upbound-system \ -- --set "features.alpha.observability.enabled=true" -+ --set "observability.enabled=true" -``` - - - -### After `v1.14` - -To enable the GA Observability feature for `v1.14.0` and later, pass the feature -flag: - -```sh -helm upgrade spaces upbound/spaces -n upbound-system \ - --set "observability.enabled=true" - -``` - - - - -To confirm Observability is enabled, run the `helm get values` command: - - -```shell -helm get values --namespace upbound-system spaces | yq .observability -``` - -Your output should return: - -```shell-noCopy - enabled: true -``` - -## Install an observability backend - -:::note -If you already have an observability backend in your environment, skip to the -next section. -::: - - -For this guide, you'll use Grafana's `docker-otel-lgtm` bundle to validate audit log -generation. production environments, configure a dedicated observability -backend like Datadog, Splunk, or an enterprise-grade Grafana stack. - - - -First, make sure your `kubectl` context points to your Spaces host cluster: - -```shell -kubectl config current-context -``` - -The output should return your cluster name. - -Next, install `docker-otel-lgtm` as a deployment using port-forwarding to -connect to Grafana. Create a manifest file and paste the -following configuration: - -```yaml title="otel-lgtm.yaml" -apiVersion: v1 -kind: Namespace -metadata: - name: observability ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app: otel-lgtm - name: otel-lgtm - namespace: observability -spec: - ports: - - name: grpc - port: 4317 - protocol: TCP - targetPort: 4317 - - name: http - port: 4318 - protocol: TCP - targetPort: 4318 - - name: grafana - port: 3000 - protocol: TCP - targetPort: 3000 - selector: - app: otel-lgtm ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: otel-lgtm - labels: - app: otel-lgtm - namespace: observability -spec: - replicas: 1 - selector: - matchLabels: - app: otel-lgtm - template: - metadata: - labels: - app: otel-lgtm - spec: - containers: - - name: otel-lgtm - image: grafana/otel-lgtm - ports: - - containerPort: 4317 - - containerPort: 4318 - - containerPort: 3000 -``` - -Next, apply the manifest: - -```shell -kubectl apply --filename otel-lgtm.yaml -``` - -Your output should return the resources: - -```shell -namespace/observability created - service/otel-lgtm created - deployment.apps/otel-lgtm created -``` - -To verify your resources deployed, use `kubectl get` to display resources with -an `ACTIVE` or `READY` status. - -Next, forward the Grafana port: - -```shell -kubectl port-forward svc/otel-lgtm --namespace observability 3000:3000 -``` - -Now you can access the Grafana UI at http://localhost:3000. - - -## Create an audit-enabled control plane - -To enable audit logging for a control plane, you need to label it so the -`SharedTelemetryConfig` can identify and apply audit settings. This section -creates a new control plane with the `audit-enabled: "true"` label. The -`audit-enabled: "true"` label marks this control plane for audit logging. The -`SharedTelemetryConfig` (created in the next section) finds control planes with -this label and enables audit logging on them. - -Create a new manifest file and paste the configuration below: - -
-```yaml title="ctp-audit.yaml" -apiVersion: v1 -kind: Namespace -metadata: - name: audit-test ---- -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - labels: - audit-enabled: "true" - name: ctp1 - namespace: audit-test -spec: - writeConnectionSecretToRef: - name: kubeconfig-ctp1 - namespace: audit-test -``` -
- -The `metadata.labels` section contains the `audit-enabled` setting. - -Apply the manifest: - -```shell -kubectl apply --filename ctp-audit.yaml -``` - -Confirm your control plane reaches the `READY` status: - -```shell -kubectl get --filename ctp-audit.yaml -``` - -## Create a `SharedTelemetryConfig` - -The `SharedTelemetryConfig` applies to all control plane objects in a namespace -and enables audit logging and routes logs to your `OTEL` endpoint. - -Create a `SharedTelemetryConfig` manifest file and paste the configuration -below: - -
-```yaml title="sharedtelemetryconfig.yaml" -apiVersion: observability.spaces.upbound.io/v1alpha1 -kind: SharedTelemetryConfig -metadata: - name: apiserver-audit - namespace: audit-test -spec: - apiServer: - audit: - enabled: true - exporters: - otlphttp: - endpoint: http://otel-lgtm.observability:4318 - exportPipeline: - logs: [otlphttp] - controlPlaneSelector: - labelSelectors: - - matchLabels: - audit-enabled: "true" -``` -
- -This configuration: - -* Sets `apiServer.audit.enabled` to `true` -* Configures the `otlphttp` exporter to point to the `docker-otel-lgtm` service -* Uses `controlPlaneSelector` to match any control plane in the namespace with the `audit-enabled` label set to `true` - -:::note -You can configure the `SharedTelemetryConfig` to select control planes in -several ways. more information on control plane selection, see the [control -plane selection][ctp-selection] documentation. -::: - -Apply the `SharedTelemetryConfig`: - -```shell -kubectl apply --filename sharedtelemetryconfig.yaml -``` - -Confirm the configuration selected the control plane: - -```shell -kubectl get --filename sharedtelemetryconfig.yaml -``` - -The output should return `SELECTED` as `1` and `VALIDATED` as `TRUE`. - -For more detailed status information, use `kubectl get`: - -```shell -kubectl get --filename sharedtelemetryconfig.yaml --output yaml | yq .status -``` - -## Generate and monitor audit events - -You enabled telemetry on your new control plane and can now generate events to -test the audit logging. This guide uses the `nop-provider` to simulate resource -operations. - -Switch your `up` context to the new control plane: - -```shell -up ctx /// -``` - -Create a new Provider manifest: - -```yaml title="provider-nop.yaml" -apiVersion: pkg.crossplane.io/v1 - kind: Provider - metadata: - name: crossplane-contrib-provider-nop - spec: - package: xpkg.upbound.io/crossplane-contrib/provider-nop:v0.4.0 -``` - -Apply the provider manifest: - -```shell -kubectl apply --filename provider-nop.yaml -``` - -Verify the provider installed and returns `HEALTHY` status as `TRUE`. - -Apply an example resource to kick off event generation: - - -```shell -kubectl apply --filename https://raw.githubusercontent.com/crossplane-contrib/provider-nop/refs/heads/main/examples/nopresource.yaml -``` - -In your Grafana dashboard, navigate to **Drilldown** > **Logs** under the -Grafana menu. - - -Filter for `controlplane-audit` log messages. - -Create a query to find `create` events on `nopresources` by filtering: - -* The `verb` field for `create` events -* The `objectRef_resource` field to match the Kind `nopresources` - -Review the audit log results. The log stream displays: - -*The client applying the create operation -* The resource kind -* Client details -* The response code - -Expand the example below for an audit log entry: - -
- Audit log entry - -```json -{ - "level": "Metadata", - "auditID": "51bbe609-14ad-4874-be78-1289c10d506a", - "stage": "ResponseComplete", - "requestURI": "/apis/nop.crossplane.io/v1alpha1/nopresources?fieldManager=kubectl-client-side-apply&fieldValidation=Strict", - "verb": "create", - "user": { - "username": "kubernetes-admin", - "groups": ["system:masters", "system:authenticated"] - }, - "impersonatedUser": { - "username": "upbound:spaces:host:masterclient", - "groups": [ - "system:authenticated", - "upbound:controlplane:admin", - "upbound:spaces:host:system:masters" - ] - }, - "sourceIPs": ["10.244.0.135", "127.0.0.1"], - "userAgent": "kubectl/v1.32.2 (darwin/arm64) kubernetes/67a30c0", - "objectRef": { - "resource": "nopresources", - "name": "example", - "apiGroup": "nop.crossplane.io", - "apiVersion": "v1alpha1" - }, - "responseStatus": { "metadata": {}, "code": 201 }, - "requestReceivedTimestamp": "2025-09-19T23:03:24.540067Z", - "stageTimestamp": "2025-09-19T23:03:24.557583Z", - "annotations": { - "authorization.k8s.io/decision": "allow", - "authorization.k8s.io/reason": "RBAC: allowed by ClusterRoleBinding \"controlplane-admin\" of ClusterRole \"controlplane-admin\" to Group \"upbound:controlplane:admin\"" - } - } -``` -
- -## Customize the audit policy - -Spaces `v1.14.0` includes a default audit policy. You can customize this policy -by creating a configuration file and passing the values to -`observability.collectors.apiServer.auditPolicy` in the helm values file. - -An example custom audit policy: - -```yaml -observability: - controlPlanes: - apiServer: - auditPolicy: | - apiVersion: audit.k8s.io/v1 - kind: Policy - rules: - # ============================================================================ - # RULE 1: Exclude health check and version endpoints - # ============================================================================ - - level: None - nonResourceURLs: - - '/healthz*' - - '/readyz*' - - /version - # ============================================================================ - # RULE 2: ConfigMaps - Write operations only - # ============================================================================ - - level: Metadata - resources: - - group: "" - resources: - - configmaps - verbs: - - create - - update - - patch - - delete - omitStages: - - RequestReceived - - ResponseStarted - # ============================================================================ - # RULE 3: Secrets - ALL operations - # ============================================================================ - - level: Metadata - resources: - - group: "" - resources: - - secrets - verbs: - - get - - list - - watch - - create - - update - - patch - - delete - omitStages: - - RequestReceived - - ResponseStarted - # ============================================================================ - # RULE 4: Global exclusion of read-only operations - # ============================================================================ - - level: None - verbs: - - get - - list - - watch - # ========================================================================== - # RULE 5: Exclude standard Kubernetes resources from write operation logging - # ========================================================================== - - level: None - resources: - - group: "" - - group: "apps" - - group: "networking.k8s.io" - - group: "policy" - - group: "rbac.authorization.k8s.io" - - group: "storage.k8s.io" - - group: "batch" - - group: "autoscaling" - - group: "metrics.k8s.io" - - group: "node.k8s.io" - - group: "scheduling.k8s.io" - - group: "coordination.k8s.io" - - group: "discovery.k8s.io" - - group: "events.k8s.io" - - group: "flowcontrol.apiserver.k8s.io" - - group: "internal.apiserver.k8s.io" - - group: "authentication.k8s.io" - - group: "authorization.k8s.io" - - group: "admissionregistration.k8s.io" - verbs: - - create - - update - - patch - - delete - # ============================================================================ - # RULE 6: Catch-all for ALL custom resources and any missed resources - # ============================================================================ - - level: Metadata - verbs: - - create - - update - - patch - - delete - omitStages: - - RequestReceived - - ResponseStarted - # ============================================================================ - # RULE 7: Final catch-all - exclude everything else - # ============================================================================ - - level: None - omitStages: - - RequestReceived - - ResponseStarted -``` -You can apply this policy during Spaces installation or upgrade using the helm values file. - -Audit policies use rules evaluated in order from top to bottom where the first -matching rule applies. Control plane audit policies follow Kubernetes conventions and use the -following logging levels: - -* **None** - Don't log events matching this rule -* **Metadata** - Log request metadata (user, timestamp, resource, verb) but not request or response bodies -* **Request** - Log metadata and request body but not response body -* **RequestResponse** - Log metadata, request body, and response body - -For more information, review the Kubernetes [Auditing] documentation. - -## Disable audit logging - -You can disable audit logging on a control plane by removing it from the -`SharedTelemetryConfig` selector or by deleting the `SharedTelemetryConfig`. - -### Disable for specific control planes - -Remove the `audit-enabled` label from control planes that should stop sending audit logs: - -```bash -kubectl label controlplane --namespace audit-enabled- -``` - -The `SharedTelemetryConfig` no longer selects this control plane, and audit log collection stops. - -### Disable for all control planes - -Delete the `SharedTelemetryConfig` to stop audit logging for all control planes it manages: - -```bash -kubectl delete sharedtelemetryconfig --namespace -``` - -[ctp-selection]: /spaces/howtos/observability/#control-plane-selection -[Auditing]: https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/ diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/declarative-ctps.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/declarative-ctps.md deleted file mode 100644 index 2c3e5331b..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/declarative-ctps.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -title: Declaratively create control planes -sidebar_position: 99 -description: A tutorial to configure a Space with Argo to declaratively create and - manage control planes ---- - -In this tutorial, you learn how to configure [Argo CD][argo-cd] to communicate with a self-hosted Space. This flow allows you to declaratively create and manage control planes from Git. Argo CD is a continuous delivery tool for Kubernetes that you can use to drive GitOps flows for your control plane infrastructure. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. - -For API specifications on ControlPlane resources and their declarative creation, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . -::: - -## Prerequisites - -To complete this tutorial, you need the following: - -- Have already deployed an Upbound Space. -- Have already deployed an instance of Argo CD on a Kubernetes cluster. - -## Connect your Space to Argo CD - -Fetch the kubeconfig for the Space cluster, the Kubernetes cluster where you installed the Upbound Spaces software. You must add the Space cluster as a context to Argo. - -```ini -export SPACES_CLUSTER_SERVER="https://url" -export SPACES_CLUSTER_NAME="cluster" -``` - -Switch contexts to the Kubernetes cluster where you've installed Argo. Create a secret on the Argo cluster whose data contains the connection details of the Space cluster. - -:::important -Make sure the following commands are executed against your **Argo** cluster, not your Space cluster. -::: - -Run the following command in a terminal: - -```yaml -cat < -When you install a Crossplane provider on a control plane, memory gets consumed -according to the number of custom resources it defines. Upbound [Official Provider families][official-provider-families] provide higher fidelity control -to platform teams to install providers for only the resources they need, -reducing the bloat of needlessly installing unused custom resources. Still, you -must factor provider memory usage into your calculations to ensure you've -rightsized the memory available in your Spaces cluster. - - -:::important -Be careful not to conflate `managed resource` with `custom resource definition`. -The former is an "instance" of an external resource in Crossplane, while the -latter defines the API schema of that resource. -::: - -It's estimated that each custom resource definition consumes ~3 MB of memory. -The calculation is: - -```bash -number_of_managed_resources_defined_in_provider x 3 MB = memory_required -``` - -For example, if you plan to use [provider-aws-ec2][provider-aws-ec2], [provider-aws-s3][provider-aws-s3], and [provider-aws-iam][provider-aws-iam], the resulting calculation is: - -```bash -provider-aws-ec2: 98 x 3 MB = 294 MB -provider-aws-s3: 23 x 3 MB = 69 MB -provider-aws-iam 22 x 3 MB = 66 MB ---- -total memory: 429 MB -``` - -In this scenario, you should budget ~430 MB of memory for provider usage on this control plane. - -:::tip -Do this calculation for each provider you plan to install on your control plane. -Then do this calculation for each control plane you plan to run in your Space. -::: - - -#### Total memory usage - -Add the memory usage from the previous sections. Given the preceding examples, -they result in a recommendation to budget ~1 GB memory for each control plane -you plan to run in the Space. - -:::important - -The 1 GB recommendation is an example. -You should input your own provider requirements to arrive at a final number for -your own deployment. - -::: - -### CPU considerations - -#### Managed resource CPU usage - -The number of managed resources under management by a control plane is the largest contributing factor for CPU usage in a Space. CPU usage scales linearly according to the number of managed resources under management by your control plane. In Upbound's testing, CPU usage requirements _does_ vary from provider to provider. Using the Upbound Official Provider families as a baseline: - - -| Provider | MR create operation (CPU core seconds) | MR update or reconciliation operation (CPU core seconds) | -| ---- | ---- | ---- | -| provider-family-aws | 10 | 2 to 3 | -| provider-family-gcp | 7 | 1.5 | -| provider-family-azure | 7 to 10 | 1.5 to 3 | - - -When resources are in a non-ready state, Crossplane providers reconcile often (as fast as every 15 seconds). Once a resource reaches `READY`, each Crossplane provider defaults to a 10 minute poll interval. Given this, a 16-core machine has `16x10x60 = 9600` CPU core seconds available. Interpreting this table: - -- A single control plane that needs to create 100 AWS MRs concurrently would consume 1000 CPU core seconds, or about 1.5 cores. -- A single control plane that continuously reconciles 100 AWS MRs once they've reached a `READY` state would consume 300 CPU core seconds, or a little under half a core. - -Since `provider-family-aws` has the highest recorded numbers for CPU time required, you can use that as an upper limit in your calculations. - -Using these calculations and extrapolating values, given a 16 core machine, it's recommended you don't exceed a single control plane managing 1000 MRs. Suppose you plan to run 10 control planes, each managing 1000 MRs. You want to make sure your node pool has capacity for 160 cores. If you are using a machine type that has 16 cores per machine, that would mean having a node pool of size 10. If you are using a machine type that has 32 cores per machine, that would mean having a node pool of size 5. - -#### Cloud API latency - -Oftentimes, you are using Crossplane providers to talk to external cloud APIs. Those external cloud APIs often have global API rate limits (examples: [Azure limits][azure-limits], [AWS EC2 limits][aws-ec2-limits]). - -For Crossplane providers built on [Upjet][upjet] (such as Upbound Official Provider families), these providers use Terraform under the covers. They expose some knobs (such as `--max-reconcile-rate`) you can use to tweak reconciliation rates. - -### Resource buffers - -The guidance in the preceding sections explains how to calculate CPU and memory usage requirements for: - -- a set of control planes in a Space -- tuned to the number of providers you plan to use -- according to the number of managed resource instances you plan to have managed by your control planes - -Upbound recommends budgeting an extra buffer of 20% to your resource capacity calculations. The numbers shared in the preceding sections don't account for peaks or surges since they're based off average measurements. Upbound recommends budgeting this buffer to account for these things. - -## Deploying more than one Space - -You are welcome to deploy more than one Space. You just need to make sure you have a 1:1 mapping of Space to Kubernetes clusters. Spaces are by their nature constrained to a single Kubernetes Cluster, which are regional entities. If you want to offer control planes in multiple cloud environments or multiple public clouds entirely, these are justifications for deploying >1 Spaces. - -## Cert-manager - -A Spaces deployment uses the [Certificate Custom Resource] from cert-manager to -provision certificates within the Space. This establishes a nice API boundary -between what your platform may need and the Certificate requirements of a -Space. - - -In the event you would like more control over the issuing Certificate Authority -for your deployment or the deployment of cert-manager itself, this guide is for -you. - - -### Deploying - -An Upbound Space deployment doesn't have any special requirements for the -cert-manager deployment itself. The only expectation is that cert-manager and -the corresponding Custom Resources exist in the cluster. - -You should be free to install cert-manager in the cluster in any way that makes -sense for your organization. You can find some [installation ideas] in the -cert-manager docs. - -### Issuers - -A default Upbound Space install includes a [ClusterIssuer]. This `ClusterIssuer` -is a `selfSigned` issuer that other certificates are minted from. You have a -couple of options available to you for changing the default deployment of the -Issuer: -1. Changing the issuer name. -2. Providing your own ClusterIssuer. - - -#### Changing the issuer name - -The `ClusterIssuer` name is controlled by the `certificates.space.clusterIssuer` -Helm property. You can adjust this during installation by providing the -following parameter (assuming your new name is 'SpaceClusterIssuer'): -```shell ---set "certificates.space.clusterIssuer=SpaceClusterIssuer" -``` - - - -#### Providing your own ClusterIssuer - -To provide your own `ClusterIssuer`, you need to first setup your own -`ClusterIssuer` in the cluster. The cert-manager docs have a variety of options -for providing your own. See the [Issuer Configuration] docs for more details. - -Once you have your own `ClusterIssuer` set up in the cluster, you need to turn -off the deployment of the `ClusterIssuer` included in the Spaces deployment. -To do that, provide the following parameter during installation: -```shell ---set "certificates.provision=false" -``` - -###### Considerations -If your `ClusterIssuer` has a name that's different from the default name that -the Spaces installation expects ('spaces-selfsigned'), you need to also specify -your `ClusterIssuer` name during install using: -```shell ---set "certificates.space.clusterIssuer=" -``` - -## Ingress - -To route requests from an external client (kubectl, ArgoCD, etc) to a -control plane, a Spaces deployment includes a default [Ingress] manifest. In -order to ease getting started scenarios, the current `Ingress` includes -configurations (properties and annotations) that assume that you installed the -commonly used [ingress-nginx ingress controller] in the cluster. This section -walks you through using a different `Ingress`, if that's something that your -organization needs. - -### Default manifest - -An example of what the current `Ingress` manifest included in a Spaces install -is below: - -```yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: mxe-router-ingress - namespace: upbound-system - annotations: - nginx.ingress.kubernetes.io/use-regex: "true" - nginx.ingress.kubernetes.io/ssl-redirect: "false" - nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" - nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" - nginx.ingress.kubernetes.io/proxy-request-buffering: "off" - nginx.ingress.kubernetes.io/proxy-body-size: "0" - nginx.ingress.kubernetes.io/proxy-http-version: "1.1" - nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" - nginx.ingress.kubernetes.io/proxy-ssl-verify: "on" - nginx.ingress.kubernetes.io/proxy-ssl-secret: "upbound-system/mxp-hostcluster-certs" - nginx.ingress.kubernetes.io/proxy-ssl-name: spaces-router - nginx.ingress.kubernetes.io/configuration-snippet: | - more_set_headers "X-Request-Id: $req_id"; - more_set_headers "Request-Id: $req_id"; - more_set_headers "Audit-Id: $req_id"; -spec: - ingressClassName: nginx - tls: - - hosts: - - {{ .Values.ingress.host }} - secretName: mxe-router-tls - rules: - - host: {{ .Values.ingress.host }} - http: - paths: - - path: "/v1/controlPlanes" - pathType: Prefix - backend: - service: - name: spaces-router - port: - name: http -``` - -The notable pieces are: -1. Namespace - - - -This property represents the namespace that the spaces-router is deployed to. -In most cases this is `upbound-system`. - - - -2. proxy-ssl-* annotations - -The spaces-router pod terminates TLS using certificates located in the -mxp-hostcluster-certs `Secret` located in the `upbound-system` `Namespace`. - -3. proxy-* annotations - -Requests coming into the ingress-controller can be variable depending on what -the client is requesting. For example, `kubectl get crds` has different -requirements for the connection compared to a 'watch', for example -`kubectl get pods -w`. The ingress-controller is configured to be able to -account for either scenario. - - -4. configuration-snippets - -These commands add headers to the incoming requests that help with telemetry -and diagnosing problems within the system. - -5. Rules - -Requests coming into the control planes use a `/v1/controlPlanes` prefix and -need to be routed to the spaces-router. - - -### Using a different ingress manifest - -Operators can choose to use an `Ingress` manifest and ingress controller that -makes the most sense for their organization. If they want to turn off deploying -the default `Ingress` manifest, they can do so during installation by providing -the following parameter during installation: -```shell ---set ".Values.ingress.provision=false" -``` - -#### Considerations - - - - - -Operators will need to take into account the following considerations when -disabling the default `Ingress` deployment. - -1. Ensure the custom `Ingress` manifest is placed in the same namespace as the -`spaces-router` pod. -2. Ensure that the ingress is configured to use a `spaces-router` as a secure -backend and that the secret used is the mxp-hostcluster-certs secret. -3. Ensure that the ingress is configured to handle long-lived connections. -4. Ensure that the routing rule sends requests prefixed with -`/v1/controlPlanes` to the `spaces-router` using the `http` port. - - - - - - -[cert-manager]: https://cert-manager.io/ -[Certificate Custom Resource]: https://cert-manager.io/docs/usage/certificate/ -[ClusterIssuer]: https://cert-manager.io/docs/concepts/issuer/ -[ingress-nginx ingress controller]: https://kubernetes.github.io/ingress-nginx/deploy/ -[installation ideas]: https://cert-manager.io/docs/installation/ -[Ingress]: https://kubernetes.io/docs/concepts/services-networking/ingress/ -[Issuer Configuration]: https://cert-manager.io/docs/configuration/ -[official-provider-families]: /manuals/packages/providers/provider-families -[aws-eks]: https://aws.amazon.com/eks/ -[google-cloud-gke]: https://cloud.google.com/kubernetes-engine -[microsoft-aks]: https://azure.microsoft.com/en-us/products/kubernetes-service -[upbound-account]: https://www.upbound.io/register/?utm_source=docs&utm_medium=cta&utm_campaign=docs_spaces -[provider-aws-ec2]: https://marketplace.upbound.io/providers/upbound/provider-aws-ec2 -[provider-aws-s3]: https://marketplace.upbound.io/providers/upbound/provider-aws-s3 -[provider-aws-iam]: https://marketplace.upbound.io/providers/upbound/provider-aws-iam -[azure-limits]: https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/request-limits-and-throttling -[aws-ec2-limits]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/throttling.html#throttling-limits-rate-based -[upjet]: https://github.com/upbound/upjet diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/dr.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/dr.md deleted file mode 100644 index 67ecbfecf..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/dr.md +++ /dev/null @@ -1,412 +0,0 @@ ---- -title: Disaster Recovery -sidebar_position: 13 -description: Configure Space-wide backups for disaster recovery. ---- - -:::info API Version Information -This guide applies to **Spaces v1.9.0 and later** (Self-Hosted only). Space Backups is a alpha feature enabled by default starting in v1.14.0. - -- **v1.9.0-v1.13.x**: Alpha feature (requires manual enablement) -- **v1.14.0+**: GA (enabled by default) - -For version-specific features and backup resources, see the . control-plane backups, see [Backup and Restore](../backup-and-restore.md). -::: - -:::important -For Connected and Disconnected Spaces, this feature requires Spaces `v1.9.0` and, starting with `v1.14.0`, Spaces enables it by default. - -To enable it on versions earlier than `v1.14.0`, set features.alpha.spaceBackup.enabled=true when you install Spaces. - -```bash -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - ... - --set "features.alpha.spaceBackup.enabled=true" -``` -::: - -Upbound's _Space Backups_ is a built-in Space-wide backup and restore feature. This guide explains how to configure Space Backups and how to restore from one of them in case of disaster recovery. - -This feature is meant for Space administrators. Group or Control Plane users can leverage [Shared Backups][shared-backups] to backup and restore their ControlPlanes. - -## Benefits -The Space Backups feature provides the following benefits: - -* Automatic backups for all resources in a Space and all resources in control planes, without any operational overhead. -* Backup schedules. -* Selectors to specify resources to backup. - -## Prerequisites - -Enabled the Space Backups feature in the Space: - -- Cloud Spaces: Not accessible to users. -- Connected Spaces: Space administrator must enable this feature. -- Disconnected Spaces: Space administrator must enable this feature. - -## Configure a Space Backup Config - -[SpaceBackupConfig][spacebackupconfig] is a cluster-scoped resource. This resource configures the storage details and provider. Whenever a backup executes (either by schedule or manually initiated), it references a SpaceBackupConfig to tell it where store the snapshot. - - -### Backup config provider - - -The `spec.objectStorage.provider` and `spec.objectStorage.config` fields configures: - -* The object storage provider -* The path to the provider -* The credentials needed to communicate with the provider - -You can only set one provider. Upbound currently supports AWS, Azure, and GCP as providers. - - -`spec.objectStorage.config` is a freeform map of configuration options for the object storage provider. See [Thanos object storage][thanos-object-storage] for more information on the formats for each supported cloud provider. `spec.bucket` and `spec.provider` overrides the required values in the config. - - -#### AWS as a storage provider - -This example demonstrates how to use AWS as a storage provider for your backups: - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackupConfig -metadata: - name: default -spec: - objectStorage: - provider: AWS - bucket: spaces-backup-bucket - config: - endpoint: s3.eu-west-2.amazonaws.com - region: eu-west-2 - credentials: - source: Secret - secretRef: - name: bucket-creds - namespace: upbound-system - key: creds -``` - -This example assumes you've already created an S3 bucket called -`spaces-backup-bucket` in the `eu-west-2` AWS region. To access the bucket, -define the account credentials as a Secret in the specified Namespace -(`upbound-system` in this example). - -#### Azure as a storage provider - -This example demonstrates how to use Azure as a storage provider for your backups: - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackupConfig -metadata: - name: default - namespace: default -spec: - objectStorage: - provider: Azure - bucket: upbound-backups - config: - storage_account: upbackupstore - container: upbound-backups - endpoint: blob.core.windows.net - credentials: - source: Secret - secretRef: - name: bucket-creds - namespace: upbound-system - key: creds -``` - - -This example assumes you've already created an Azure storage account called -`upbackupstore` and blob `upbound-backups`. To access the blob, -define the account credentials as a Secret in the specified Namespace -(`upbound-system` in this example). - - -#### GCP as a storage provider - -This example demonstrates how to use Google Cloud Storage as a storage provider for your backups: - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackupConfig -metadata: - name: default - namespace: default -spec: - objectStorage: - provider: GCP - bucket: spaces-backup-bucket - credentials: - source: Secret - secretRef: - name: bucket-creds - namespace: upbound-system - key: creds -``` - - -This example assumes you've already created a Cloud bucket called -"spaces-backup-bucket" and a service account with access to this bucket. Define the key file as a Secret in the specified Namespace -(`upbound-system` in this example). - - -## Configure a Space Backup Schedule - - -[SpaceBackupSchedule][spacebackupschedule] is a cluster-scoped resource. This resource defines a backup schedule for the whole Space. - -Below is an example of a Space Backup Schedule running every day. It backs up all groups having `environment: production` labels and all control planes in those groups having `backup: please` labels. - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackupSchedule -metadata: - name: daily-schedule -spec: - schedule: "@daily" - configRef: - kind: SpaceBackupConfig - name: default - match: - groups: - labelSelectors: - - matchLabels: - environment: production - controlPlanes: - labelSelectors: - - matchLabels: - backup: please -``` - -### Define a schedule - -The `spec.schedule` field is a [Cron-formatted][cron-formatted] string. Some common examples are below: - -| Entry | Description | -| ----------------- | ------------------------------------------------------------------------------------------------- | -| `@hourly` | Run once an hour. | -| `@daily` | Run once a day. | -| `@weekly` | Run once a week. | -| `0 0/4 * * *` | Run every 4 hours. | -| `0/15 * * * 1-5` | Run every fifteenth minute on Monday through Friday. | -| `@every 1h30m10s` | Run every 1 hour, 30 minutes, and 10 seconds. Hour is the largest measurement of time for @every. | - -### Suspend a schedule - -Use `spec.suspend` field to suspend the schedule. It creates no new backups, but allows running backups to complete. - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackupSchedule -metadata: - name: daily-schedule -spec: - suspend: true -... -``` - -### Garbage collect backups when the schedule gets deleted - -Set the `spec.useOwnerReferencesInBackup` to garbage collect associated `SpaceBackup` when a `SpaceBackupSchedule` gets deleted. If set to true, backups are garbage collected when the schedule gets deleted. - -### Set the time to live - -Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. - -The time to live is a duration, for example, `168h` for 7 days. - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackupSchedule -metadata: - name: daily-schedule -spec: - ttl: 168h # Backup is garbage collected after 7 days -... -``` - -## Selecting space resources to backup - -By default, a SpaceBackup selects all groups and, for each of them, all control planes, secrets, and any other group-scoped resources. - -By setting `spec.match`, you can include only specific groups, control planes, secrets, or other Space resources in the backup. - -By setting `spec.exclude`, you can filter out some matched Space API resources from the backup. - -### Including space resources in a backup - -Different fields are available to include resources based on labels or names: -- `spec.match.groups` to include only some groups in the backup. -- `spec.match.controlPlanes` to include only some control planes in the backup. -- `spec.match.secrets` to include only some secrets in the backup. -- `spec.match.extras` to include only some extra resources in the backup. - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackup -metadata: - name: my-backup -spec: - configRef: - kind: SpaceBackupConfig - name: default - match: - groups: - labelSelectors: - - matchLabels: - environment: production - controlPlanes: - labelSelectors: - - matchLabels: - backup: please - secrets: - names: - - my-secret - extras: - - apiGroup: "spaces.upbound.io" - kind: "SharedBackupConfig" - names: - - my-shared-backup -``` - -### Excluding Space resources from the backup - -Use the `spec.exclude` field to exclude matched Space API resources from the backup. - -Different fields are available to exclude resources based on labels or names: -- `spec.exclude.groups` to exclude some groups from the backup. -- `spec.exclude.controlPlanes` to exclude some control planes from the backup. -- `spec.exclude.secrets` to exclude some secrets from the backup. -- `spec.exclude.extras` to exclude some extra resources from the backup. - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackup -metadata: - name: my-backup -spec: - ttl: 168h # Backup is garbage collected after 7 days - configRef: - kind: SpaceBackupConfig - name: default - match: - groups: - labelSelectors: - - matchLabels: - environment: production - exclude: - groups: - names: - - not-this-one-please -``` - -### Exclude resources in control planes' backups - -By default, it backs up all resources in a selected control plane. - -Use the `spec.controlPlaneBackups.excludedResources` field to exclude resources from control planes' backups. - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackup -metadata: - name: my-backup -spec: - ttl: 168h # Backup is garbage collected after 7 days - configRef: - kind: SpaceBackupConfig - name: default - controlPlaneBackups: - excludedResources: - - secrets - - buckets.s3.aws.upbound.io -``` - -## Create a manual backup - -[SpaceBackup][spacebackup] is a cluster-scoped resource that causes a single backup to occur for the whole Space. - -Below is an example of a manual SpaceBackup: - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackup -metadata: - name: my-backup -spec: - configRef: - kind: SpaceBackupConfig - name: default - deletionPolicy: Delete -``` - - -The backup specification `DeletionPolicy` defines backup deletion actions, -including the deletion of the backup file from the bucket. The `Deletion Policy` -value defaults to `Orphan`. Set it to `Delete` to remove uploaded files -in the bucket. -For more information on the backup and restore process, review the [Spaces API documentation][spaces-api-documentation]. - -### Set the time to live - -Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackup -metadata: - name: my-backup -spec: - ttl: 168h # Backup is garbage collected after 7 days -... -``` - -## Restore from a space backup - -Space Backup and Restore focuses only on disaster recovery. The restore procedure assumes a new Space installation with no existing resources. The restore procedure is idempotent, so you can run it multiple times without any side effects in case of failures. - -To restore a Space from an existing Space Backup, follow these steps: - -1. Install Spaces from scratch as needed. -2. Create a `SpaceBackupConfig` as needed to access the SpaceBackup from the object storage, for example named `my-backup-config`. -3. Select the backup you want to restore from, for example `my-backup`. -4. Run the following command to restore the Space: - -```shell -export SPACE_BACKUP_CONFIG=my-backup-config -export SPACE_BACKUP=my-backup -kubectl exec -ti -n upbound-system deployments/spaces-controller -c spaces -- hyperspace restore $SPACE_BACKUP $SPACE_BACKUP_CONFIG -``` - -### Restore specific control planes - -:::important -This feature is available from Spaces v1.11. -::: - -Instead of restoring the whole Space, you can choose to restore specific control planes -from a backup using the `--controlplanes` flag. You can also use -the `--skip-space-restore` flag to skip restoring Space objects. -This allows Spaces admins to restore individual control planes without -needing to restore the entire Space. - -```shell -export SPACE_BACKUP_CONFIG=my-backup-config -export SPACE_BACKUP=my-backup -kubectl exec -ti -n upbound-system deployments/spaces-controller -c spaces --- hyperspace restore $SPACE_BACKUP $SPACE_BACKUP_CONFIG --controlplanes default/ctp1,default/ctp2 --skip-space-restore -``` - - -[shared-backups]: /spaces/howtos/self-hosted/workload-id/backup-restore-config/ -[spacebackupconfig]: /reference/apis/spaces-api/v1_9 -[thanos-object-storage]: https://thanos.io/tip/thanos/storage.md/ -[spacebackupschedule]: /reference/apis/spaces-api/v1_9 -[cron-formatted]: https://en.wikipedia.org/wiki/Cron -[spacebackup]: /reference/apis/spaces-api/v1_9 -[spaces-api-documentation]: /reference/apis/spaces-api/v1_9 - diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/gitops-with-argocd.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/gitops-with-argocd.md deleted file mode 100644 index 004247a10..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/gitops-with-argocd.md +++ /dev/null @@ -1,142 +0,0 @@ ---- -title: GitOps with ArgoCD in Self-Hosted Spaces -sidebar_position: 80 -description: Set up GitOps workflows with Argo CD in self-hosted Spaces -plan: "business" ---- - -:::info Deployment Model -This guide applies to **self-hosted Spaces** deployments. For Upbound Cloud Spaces, see [GitOps with Upbound Control Planes](/spaces/howtos/cloud-spaces/gitops-on-upbound/). -::: - -GitOps is an approach for managing a system by declaratively describing desired resources' configurations in Git and using controllers to realize the desired state. Upbound's control planes are compatible with this pattern and it's strongly recommended you integrate GitOps in the platforms you build on Upbound. - - -## Integrate with Argo CD - - -[Argo CD][argo-cd] is a project in the Kubernetes ecosystem commonly used for -GitOps. You can use it in tandem with Upbound control planes to achieve GitOps -flows. The sections below explain how to integrate these tools with Upbound. - -### Configure connection secrets for control planes - -You can configure control planes to write their connection details to a secret. -Do this by setting the -[`spec.writeConnectionSecretToRef`][spec-writeconnectionsecrettoref] field in a -control plane manifest. For example: - -```yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - name: ctp1 - namespace: default -spec: - writeConnectionSecretToRef: - name: kubeconfig-ctp1 - namespace: default -``` - - -### Configure Argo CD - - -To configure Argo CD for Annotation resource tracking, edit the Argo CD -ConfigMap in the Argo CD namespace. Add `application.resourceTrackingMethod: -annotation` to the data section as below. - -Next, configure the [auto respect RBAC for the Argo CD -controller][auto-respect-rbac-for-the-argo-cd-controller-1]. By default, Argo CD -attempts to discover some Kubernetes resource types that don't exist in a -control plane. You must configure Argo CD to respect the cluster's RBAC rules so -that Argo CD can sync. Add `resource.respectRBAC: normal` to the data section as -below. - -```bash -apiVersion: v1 -kind: ConfigMap -metadata: - name: argocd-cm -data: - ... - application.resourceTrackingMethod: annotation - resource.respectRBAC: normal -``` - -:::tip -The `resource.respectRBAC` configuration above tells Argo to respect RBAC for -_all_ cluster contexts. If you're using an Argo CD instance to manage more than -only control planes, you should consider changing the `clusters` string match -for the configuration to apply only to control planes. For example, if every -control plane context name followed the convention of being named -`controlplane-`, you could set the string match to be `controlplane-*` -::: - - -### Create a cluster context definition - - -Once the control plane is ready, extract the following values from the secret -containing the kubeconfig: - -```bash -kubeconfig_content=$(kubectl get secrets kubeconfig-ctp1 -n default -o jsonpath='{.data.kubeconfig}' | base64 -d) -server=$(echo "$kubeconfig_content" | grep 'server:' | awk '{print $2}') -bearer_token=$(echo "$kubeconfig_content" | grep 'token:' | awk '{print $2}') -ca_data=$(echo "$kubeconfig_content" | grep 'certificate-authority-data:' | awk '{print $2}') -``` - -Generate a new secret in the cluster where you installed Argo, using the prior -values extracted: - -```yaml -cat < - -import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; - - - - -:::important -This feature is only available for select Business Critical customers. You can't -set up your own Managed Space without the assistance of Upbound. If you're -interested in this deployment mode, please [contact us][contact]. -::: - - - -A Managed Space deployed on AWS is a single-tenant deployment of a control plane -space in your AWS organization in an isolated sub-account. With Managed Spaces, -you can use the same API, CLI, and Console that Upbound offers, with the benefit -of running entirely in a cloud account that you own and Upbound manages for you. - -The following guide walks you through setting up a Managed Space in your AWS -organization. If you have any questions while working through this guide, -contact your Upbound Account Representative for help. - - - - - -A Managed Space deployed on GCP is a single-tenant deployment of a control plane -space in your GCP organization in an isolated project. With Managed Spaces, you -can use the same API, CLI, and Console that Upbound offers, with the benefit of -running entirely in a cloud account that you own and Upbound manages for you. - -The following guide walks you through setting up a Managed Space in your GCP -organization. If you have any questions while working through this guide, -contact your Upbound Account Representative for help. - - - - -## Managed Space on your cloud architecture - - - -A Managed Space is a deployment of the Upbound Spaces software inside an -Upbound-controlled sub-account in your AWS cloud environment. The Spaces -software runs in this sub-account, orchestrated by Kubernetes. Backups and -billing data get stored inside bucket or blob storage in the same sub-account. -The control planes deployed and controlled by the Spaces software runs on the -Kubernetes cluster which gets deployed into the sub-account. - -The diagram below illustrates the high-level architecture of Upbound Managed Spaces: - -![Upbound Managed Spaces arch](/img/managed-arch-aws.png) - -The Spaces software gets deployed on an EKS Cluster in the region of your -choice. This EKS cluster is where your control planes are ultimately run. -Upbound also deploys buckets, 1 for the collection of the billing data and 1 for -control plane backups. - -Upbound doesn't have access to other sub-accounts nor your organization-level -settings in your cloud environment. Outside of your cloud organization, Upbound -runs the Upbound Console, which includes the Upbound API and web application, -including the dashboard you see at `console.upbound.io`. By default, all -connections are encrypted, but public. Optionally, you also have the option to -use private network connectivity through [AWS PrivateLink][aws-privatelink]. - - - - - - -A Managed Space is a deployment of the Upbound Spaces software inside an -Upbound-controlled project in your GCP cloud environment. The Spaces software -runs in this project, orchestrated by Kubernetes. Backups and billing data get -stored inside bucket or blob storage in the same project. The control planes -deployed and controlled by the Spaces software runs on the Kubernetes cluster -which gets deployed into the project. - -The diagram below illustrates the high-level architecture of Upbound Managed Spaces: - -![Upbound Managed Spaces arch](/img/managed-arch-gcp.png) - -The Spaces software gets deployed on a GKE Cluster in the region of your choice. -This GKE cluster is where your control planes are ultimately run. Upbound also -deploys cloud buckets, 1 for the collection of the billing data and 1 for -control plane backups. - -Upbound doesn't have access to other projects nor your organization-level -settings in your cloud environment. Outside of your cloud organization, Upbound -runs the Upbound Console, which includes the Upbound API and web application, -including the dashboard you see at `console.upbound.io`. By default, all -connections are encrypted, but public. Optionally, you also have the option to -use private network connectivity through [GCP Private Service -Connect][gcp-private-service-connect]. - - - -## Prerequisites - -- An organization created on Upbound - - - -- You should have a preexisting AWS organization to complete this guide. -- You must create a new AWS sub-account. Read the [AWS documentation][aws-documentation] to learn how to create a new sub-account in an existing organization on AWS. - -After the sub-account information gets provided to Upbound, **don't change it -any further.** Any changes made to the sub-account or the resources created by -Upbound for the purposes of the Managed Space deployments voids the SLA you have -with Upbound. If you want to make configuration changes, contact your Upbound -Solutions Architect. - - - - - -- You should have a preexisting GCP organization with an active Cloud Billing account to complete this guide. -- You must create a new GCP project. Read the [GCP documentation][gcp-documentation] to learn how to create a new project in an existing organization on GCP. - -After the project information gets provided to Upbound, **don't change it any -further.** Any changes made to the project or the resources created by Upbound -for the purposes of the Managed Space deployments voids the SLA you have with -Upbound. If you want to make configuration changes, contact your Upbound -Solutions Architect. - - - - - -## Set up cross-account management - -Upbound supports using AWS Key Management Service with cross-account IAM -permissions. This enables the isolation of keys so the infrastructure operated -by Upbound has limited access to symmetric keys. - -In the KMS key's account, apply the baseline key policy: - -```json -{ - "Sid": "Allow Upbound to use this key", - "Effect": "Allow", - "Principal": { - "AWS": ["[Managed Space sub-account ID]"] - }, - "Action": ["kms:Encrypt", "kms:Decrypt", "kms:ReEncrypt*", "kms:GenerateDataKey*", "kms:DescribeKey"], - "Resource": "*" -} -``` - -You need another key policy to let the sub-account create persistent resources -with the KMS key: - -```json -{ - "Sid": "Allow attachment of persistent resources for an Upbound Managed Space", - "Effect": "Allow", - "Principal": { - "AWS": "[Managed Space sub-account ID]" - }, - "Action": ["kms:CreateGrant", "kms:ListGrants", "kms:RevokeGrant"], - "Resource": "*", - "Condition": { - "Bool": { - "kms:GrantIsForAWSResource": "true" - } - } -} -``` - -### Configure PrivateLink - -By default, all connections to the Upbound Console are encrypted, but public. -AWS PrivateLink is a feature that allows VPC peering whereby your traffic -doesn't traverse the public internet. To have this configured, contact your -Upbound Account Representative. - - - - - -## Enable APIs - -Enable the following APIs in the new project: - -- Kubernetes Engine API -- Cloud Resource Manager API -- Compute Engine API -- Cloud DNS API - -:::tip -Read how to enable APIs in a GCP project [here][here]. -::: - -## Create a service account - -Create a service account in the new project. Name the service account, -upbound-sa. Give the service account the following roles: - -- Compute Admin -- Project IAM Admin -- Service Account Admin -- DNS Administrator -- Editor - -Select the service account you just created. Select keys. Add a new key and -select JSON. The key gets downloaded to your machine. Save this for later. - -## Create a DNS Zone - -Create a DNS Zone, set the **Zone type** to `Public`. - -### Configure Private Service Connect - -By default, all connections to the Upbound Console are encrypted, but public. -GCP Private Service Connect is a feature that allows VPC peering whereby your -traffic doesn't traverse the public internet. To have this configured, contact -your Upbound Account Representative. - - - -## Provide information to Upbound - -Once these policies get attached to the key, tell your Upbound Account -Representative, providing them the following: - - - -- the full ARN of the KMS key. -- the name of the organization that you created in Upbound. Use the up CLI command, `up org list`, so see this information. -- Confirmation of which region in AWS you want the deployment to target. - - - - - -- The service account JSON key -- The NS records associated with the DNS name created in the last step. -- the name of the organization that you created in Upbound. Use the up CLI command, `up org list`, so see this information. -- Confirmation of which region in GCP you want the deployment to target. - - - -Once Upbound has this information, the request gets processed in a business day. - -## Use your Managed Space - -Once the Managed Space gets deployed, you can see it in the Space selector when browsing your environment on [`console.upbound.io`][console-upbound-io]. - - - - -[contact]: https://www.upbound.io/contact-us -[aws-privatelink]: #configure-privatelink -[aws-documentation]: https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_create.html#orgs_manage_accounts_create-new -[gcp-private-service-connect]: #configure-private-service-connect -[gcp-documentation]: https://cloud.google.com/resource-manager/docs/creating-managing-organization -[here]: https://cloud.google.com/apis/docs/getting-started#enabling_apis -[console-upbound-io]: https://console.upbound.io/ diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/oidc-configuration.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/oidc-configuration.md deleted file mode 100644 index cbef4dc42..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/oidc-configuration.md +++ /dev/null @@ -1,289 +0,0 @@ ---- -title: Configure OIDC -sidebar_position: 20 -description: Configure OIDC in your Space ---- -:::important -This guide is only applicable for administrators who've deployed self-hosted Spaces. general RBAC in Upbound, read [Upbound RBAC][upbound-rbac]. -::: - -Upbound uses the Kubernetes [Structured Authentication Configuration][structured-auth-config] to validate OIDC tokens sent to the API. Upbound stores this -configuration as a `ConfigMap` and authenticates with the Upbound router -component during installation with Helm. - -This guide walks you through how to create and apply an authentication -configuration to validate Upbound with an external identity provider. Each -section focuses on a specific part of the configuration file. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. - -For details on authentication and access control across versions, see the . related platform authentication features, see the [Platform manual](../../../../platform/). -::: - -## Creating the `AuthenticationConfiguration` file - -First, create a file called `config.yaml` with an `AuthenticationConfiguration` -kind. The `AuthenticationConfiguration` is the initial authentication structure -necessary for Upbound to communicate with your chosen identity provider. - -```yaml -apiVersion: apiserver.config.k8s.io/v1beta1 -kind: AuthenticationConfiguration -jwt: -- issuer: - url: oidc-issuer-url - audiences: - - oidc-client-id - claimMappings: # optional - username: - claim: oidc-username-claim - prefix: oidc-username-prefix - groups: - claim: oidc-groups-claim - prefix: oidc-groups-prefix -``` - - -For detailed configuration options, including the CEL-based token validation, -review the feature [documentation][structured-auth-config]. - - -The `AuthenticationConfiguration` allows you to configure multiple JWT -authenticators as separate issuers. - -### Configure an issuer - -The `jwt` array requires an `issuer` specification and typically contains: - -- A `username` claim mapping -- A `groups` claim mapping -Optionally, the configuration may also include: -- A set of claim validation rules -- A set of user validation rules - -The `issuer` URL must be unique across all configured authenticators. - -```yaml -issuer: - url: https://example.com - discoveryUrl: https://discovery.example.com/.well-known/openid-configuration - certificateAuthority: |- - - audiences: - - client-id-a - - client-id-b - audienceMatchPolicy: MatchAny -``` - -By default, the authenticator assumes the OIDC Discovery URL is -`{issuer.url}/.well-known/openid-configuration`. Most identity providers follow -this structure, and you can omit the `discoveryUrl` field. To use a separate -discovery service, specify the full path to the discovery endpoint in this -field. - -If the CA for the Issuer isn't public, provide the PEM encoded CA for the Discovery URL. - -At least one of the `audiences` entries must match the `aud` claim in the JWT. -For OIDC tokens, this is the Client ID of the application attempting to access -the Upbound API. Having multiple values set allows the same configuration to -apply to multiple client applications, for example the `kubectl` CLI and an -Internal Developer Portal. - -If you specify multiple `audiences` , `audienceMatchPolicy` must equal `MatchAny`. - -### Configure `claimMappings` - -#### Username claim mapping - -By default, the authenticator uses the `sub` claim as the user name. To override this, either: - -- specify *both* `claim` and `prefix`. `prefix` may be explicitly set to the empty string. -or - -- specify a CEL `expression` to calculate the user name. - -```yaml -claimMappings: - username: - claim: "sub" - prefix: "keycloak" - # - expression: 'claims.username + ":external-user"' -``` - - -#### Groups claim mapping - -By default, this configuration doesn't map groups, unless you either: - -- specify both `claim` and `prefix`. `prefix` may be explicitly set to the empty string. -or - -- specify a CEL `expression` that returns a string or list of strings. - - -```yaml -claimMappings: - groups: - claim: "groups" - prefix: "" - # - expression: 'claims.roles.split(",")' -``` - - -### Validation rules - - -Validation rules are outside the scope of this document. Review the -[documentation][structured-auth-config] for more information. Examples include -using CEL expressions to validate authentication such as: - - -- Validating that a token claim has a specific value -- Validating that a token has a limited lifetime -- Ensuring usernames and groups don't contain reserved prefixes - -## Required claims - -To interact with Space and ControlPlane APIs, users must have the `upbound.io/aud` claim set to one of the following: - -| Upbound.io Audience | Notes | -| -------------------------------------------------------- | -------------------------------------------------------------------- | -| `[]` | No Access to Space-level or ControlPlane APIs | -| `['upbound:spaces:api']` | This Identity is only for Space-level APIs | -| `['upbound:spaces:controlplanes']` | This Identity is only for ControlPlane APIs | -| `['upbound:spaces:api', 'upbound:spaces:controlplanes']` | This Identity is for both Space-level and ControlPlane APIs | - - -You can set this claim in two ways: - -- In the identity provider mapped in the ID token. -- Inject in the authenticator with the `jwt.claimMappings.extra` array. - -For example: -```yaml -apiVersion: apiserver.config.k8s.io/v1beta1 -kind: AuthenticationConfiguration -jwt: -- issuer: - url: https://keycloak:8443/realms/master - certificateAuthority: |- - - audiences: - - master-realm - audienceMatchPolicy: MatchAny - claimMappings: - username: - claim: "preferred_username" - prefix: "keycloak:" - groups: - claim: "groups" - prefix: "" - extra: - - key: 'upbound.io/aud' - valueExpression: "['upbound:spaces:controlplanes', 'upbound:spaces:api']" -``` - -## Install the `AuthenticationConfiguration` - -Once you create an `AuthenticationConfiguration` file, specify this file as a -`ConfigMap` in the host cluster for the Upbound Space. - -```sh -kubectl create configmap -n upbound-system --from-file=config.yaml=./path/to/config.yaml -``` - - -To enable OIDC authentication and disable Upbound IAM when installing the Space, -reference the configuration and pass an empty value to the Upbound IAM issuer -parameter: - - -```sh -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - ... - --set "authentication.structuredConfig=" \ - --set "router.controlPlane.extraArgs[0]=--upbound-iam-issuer-url=" -``` - -## Configure RBAC - - -In this scenario, the external identity provider handles authentication, but -permissions for Spaces and ControlPlane APIs use standard RBAC objects. - -### Spaces APIs - -The Spaces APIs include: -```yaml -- apiGroups: - - spaces.upbound.io - resources: - - controlplanes - - sharedexternalsecrets - - sharedsecretstores - - backups - - backupschedules - - sharedbackups - - sharedbackupconfigs - - sharedbackupschedules -- apiGroups: - - observability.spaces.upbound.io - resources: - - sharedtelemetryconfigs -``` - -### ControlPlane APIs - - - -Crossplane specifies three [roles][crossplane-managed-clusterroles] for a -ControlPlane: admin, editor, and viewer. These map to the verbs `admin`, `edit`, -and `view` on the `controlplanes/k8s` resource in the `spaces.upbound.io` API -group. - - -### Control access - -The `groups` claim in the `AuthenticationConfiguration` allows you to control -resource access when you create a `ClusterRoleBinding`. A `ClusterRole` defines -the role parameters and a `ClusterRoleBinding` subject. - -The example below allows `admin` permissions for all ControlPlanes to members of -the `ctp-admins` group: - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: allow-ctp-admin -rules: -- apiGroups: - - spaces.upbound.io - resources: - - controlplanes/k8s - verbs: - - admin -``` - -ctp-admins ClusterRoleBinding -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: allow-ctp-admin -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: allow-ctp-admin -subjects: -- apiGroup: rbac.authorization.k8s.io - kind: Group - name: ctp-admins -``` - -[structured-auth-config]: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#using-authentication-configuration -[crossplane-managed-clusterroles]: https://github.com/crossplane/crossplane/blob/master/design/design-doc-rbac-manager.md#managed-rbac-clusterroles -[upbound-rbac]: /manuals/platform/concepts/authorization/upbound-rbac diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/proxies-config.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/proxies-config.md deleted file mode 100644 index 3802e4cb0..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/proxies-config.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: Proxied configuration -sidebar_position: 20 -description: Configure Upbound within a proxied environment ---- - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments in proxied environments. Proxy configuration procedures are consistent across versions. - -For version-specific deployment considerations, see the . -::: - - - -When you install Upbound with Helm in a proxied environment, please update the specified registry with your internal registry. - - - -```bash -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "${SPACES_VERSION}" \ - --set "ingress.host=${SPACES_ROUTER_HOST}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "authentication.hubIdentities=true" \ - --set "authorization.hubRBAC=true" \ - --set "registry=registry.company.corp/spaces" \ - --set "controlPlanes.uxp.registryOverride=registry.company.corp/xpkg.upbound.io" \ - --set "controlPlanes.uxp.repository=registry.company.corp/spaces" \ - --wait -``` diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/query-api.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/query-api.md deleted file mode 100644 index c112e9001..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/query-api.md +++ /dev/null @@ -1,396 +0,0 @@ ---- -title: Deploy Query API infrastructure -weight: 130 -description: Query API -aliases: - - /all-spaces/self-hosted-spaces/query-api - - /self-hosted-spaces/query-api - - all-spaces/self-hosted-spaces/query-api ---- - - - - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). Query API has evolved across versions: - -- **Cloud Spaces**: Available since v1.6 (enabled by default) -- **Self-Hosted**: Available since v1.8 (requires manual enablement) - -For details on Query API availability across versions, see the . -::: - -:::important - -This feature is in preview. The Query API is available in the Cloud Space offering in `v1.6` and enabled by default. - -This is a requirement to be able to connect a Space since `v1.8.0`, and is off by default, see below to enable it. - -::: - -Upbound's Query API allows users to inspect objects and resources within their control planes. The read-only `up alpha query` and `up alpha get` CLI commands allow you to gather information on your control planes in a fast and efficient package. These commands follow the [`kubectl` conventions][kubectl-conventions] for filtering, sorting, and retrieving information from your Space. - -Query API requires a PostgreSQL database to store the data. You can use the default PostgreSQL instance provided by Upbound or bring your own PostgreSQL instance. - -## Managed setup - -:::tip -If you don't have specific requirements for your setup, Upbound recommends following this approach. -::: - -To enable this feature, set `features.alpha.apollo.enabled=true` and `apollo.apollo.storage.postgres.create=true` when installing Spaces. - -However, you need to install CloudNativePG (`CNPG`) to provide the PostgreSQL instance. You can let the `up` CLI do this for you, or install it manually. - -For more customization, see the [Helm chart reference][helm-chart-reference]. You can modify the number -of PostgreSQL instances, pooling instances, storage size, and more. - -If you have specific requirements not addressed in the Helm chart, see below for more information on how to bring your own [PostgreSQL setup][postgresql-setup]. - -### Using the up CLI - -Before you begin, make sure you have the most recent version of the [`up` CLI installed][up-cli-installed]. - -To enable this feature, set `features.alpha.apollo.enabled=true` and `apollo.apollo.storage.postgres.create=true` when installing Spaces: - -```bash -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - ... - --set "features.alpha.apollo.enabled=true" \ - --set "apollo.apollo.storage.postgres.create=true" -``` - -`up space init` and `up space upgrade` install CloudNativePG automatically, if needed. - -### Helm chart - -If you are installing the Helm chart in some other way, you can manually install CloudNativePG in one of the [supported ways][supported-ways], for example: - -```shell -kubectl apply --server-side -f \ - https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.24.1/cnpg-1.24.1.yaml -kubectl rollout status -n cnpg-system deployment cnpg-controller-manager -w --timeout 120s -``` - -Next, install the Spaces Helm chart with the necessary values, for example: - -```shell -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "${SPACES_VERSION}" \ - ... - --set "features.alpha.apollo.enabled=true" \ - --set "apollo.apollo.storage.postgres.create=true" \ - --wait -``` - -## Self-hosted PostgreSQL configuration - - -If your workflow requires more customization, you can provide your own -PostgreSQL instance and configure credentials manually. - -Using your own PostgreSQL instance requires careful architecture consideration. -Review the architecture and requirements guidelines. - -### Architecture - -The Query API architecture uses three components, other than a PostgreSQL database: -* **Apollo Syncers**: Watching `ETCD` for changes and syncing them to PostgreSQL. One, or more, per control plane. -* **Apollo Server**: Serving the Query API out of the data in PostgreSQL. One, or more, per Space. - -The default setup also uses the `PgBouncer` connection pooler to manage connections from the syncers. -```mermaid -graph LR - User[User] - - subgraph Cluster["Cluster (Spaces)"] - direction TB - Apollo[apollo] - - subgraph ControlPlanes["Control Planes"] - APIServer[API Server] - Syncer[apollo-syncer] - end - end - - PostgreSQL[(PostgreSQL)] - - User -->|requests| Apollo - - Apollo -->|connects| PostgreSQL - Apollo -->|creates schemas & users| PostgreSQL - - Syncer -->|watches| APIServer - Syncer -->|writes| PostgreSQL - - PostgreSQL -->|data| Apollo - - style PostgreSQL fill:#e1f5ff,stroke:#333,stroke-width:2px,color:#000 - style Apollo fill:#ffe1e1,stroke:#333,stroke-width:2px,color:#000 - style Cluster fill:#f0f0f0,stroke:#333,stroke-width:2px,color:#000 - style ControlPlanes fill:#fff,stroke:#666,stroke-width:1px,stroke-dasharray: 5 5,color:#000 -``` - - -Each component needs to connect to the PostgreSQL database. - -In the event of database issues, you can provide a new database and the syncers -automatically repopulate the data. - -### Requirements - -* A PostgreSQL 16 instance or cluster. -* A database, for example named `upbound`. -* **Optional**: A dedicated user for the Apollo Syncers, otherwise the Spaces Controller generates a dedicated set of credentials per syncer with the necessary permissions, for example named `syncer`. -* A dedicated **superuser or admin account** for the Apollo Server. -* **Optional**: A connection pooler, like PgBouncer, to manage connections from the Apollo Syncers. If you didn't provide the optional users, you might have to configure the pooler to allow users to connect using the same credentials as PostgreSQL. -* **Optional**: A read replica for the Apollo Syncers to connect to, to reduce load on the primary database, this might cause a slight delay in the data being available through the Query API. - -Below you can find examples of setups to get you started, you can mix and match the examples to suit your needs. - -### In-cluster setup - -:::tip - -If you don't have strong opinions on your setup, but still want full control on -the resources created for some unsupported customizations, Upbound recommends -the in-cluster setup. - -::: - -For more customization than the managed setup, you can use CloudNativePG for -PostgreSQL in the same cluster. - -For in-cluster setup, manually deploy the operator in one of the [supported ways][supported-ways-1], for example: - -```shell -kubectl apply --server-side -f \ - https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.24.1/cnpg-1.24.1.yaml -kubectl rollout status -n cnpg-system deployment cnpg-controller-manager -w --timeout 120s -``` - -Then create a `Cluster` and `Pooler` in the `upbound-system` namespace, for example: - -```shell -kubectl create ns upbound-system - -kubectl apply -f - < - -### External setup - - -:::tip - -If you want to run your PostgreSQL instance outside the cluster, but are fine with credentials being managed by the `apollo` user, this is the suggested way to proceed. - -::: - -When using this setup, you must manually create the required Secrets in the -`upbound-system` namespace. The `apollo` user must have permissions to create -schemas and users. - -```shell - -kubectl create ns upbound-system - -# A Secret containing the necessary credentials to connect to the PostgreSQL instance -kubectl create secret generic spaces-apollo-pg-app -n upbound-system \ - --from-literal=password=supersecret - -# A Secret containing the necessary CA certificate to verify the connection to the PostgreSQL instance -kubectl create secret generic spaces-apollo-pg-ca -n upbound-system \ - --from-file=ca.crt=/path/to/ca.crt -``` - -Next, install Spaces with the necessary settings: - -```shell -export PG_URL=your-postgres-host:5432 -export PG_POOLED_URL=your-pgbouncer-host:5432 # this could be the same as above - -helm upgrade --install ... \ - --set "features.alpha.apollo.enabled=true" \ - --set "apollo.apollo.storage.postgres.create=false" \ - --set "apollo.apollo.storage.postgres.connection.url=$PG_URL" \ - --set "apollo.apollo.storage.postgres.connection.credentials.secret.name=spaces-apollo-pg-app" \ - --set "apollo.apollo.storage.postgres.connection.credentials.format=basicauth" \ - --set "apollo.apollo.storage.postgres.connection.ca.name=spaces-apollo-pg-ca" \ - --set "apollo.apollo.storage.postgres.connection.syncer.url=$PG_POOLED_URL" -``` - -### External setup with all custom credentials - -For custom credentials with Apollo Syncers or Server, create a new secret in the -`upbound-system` namespace: - -```shell -export APOLLO_SYNCER_USER=syncer -export APOLLO_SERVER_USER=apollo - -kubectl create ns upbound-system - -# A Secret containing the necessary credentials to connect to the PostgreSQL instance -kubectl create secret generic spaces-apollo-pg-app -n upbound-system \ - --from-literal=password=supersecret - -# A Secret containing the necessary CA certificate to verify the connection to the PostgreSQL instance -kubectl create secret generic spaces-apollo-pg-ca -n upbound-system \ - --from-file=ca.crt=/path/to/ca.crt - -# A Secret containing the necessary credentials for the Apollo Syncers to connect to the PostgreSQL instance. -# These will be used by all Syncers in the Space. -kubectl create secret generic spaces-apollo-pg-syncer -n upbound-system \ - --from-literal=username=$APOLLO_SYNCER_USER \ - --from-literal=password=supersecret - -# A Secret containing the necessary credentials for the Apollo Server to connect to the PostgreSQL instance. -kubectl create secret generic spaces-apollo-pg-apollo -n upbound-system \ - --from-literal=username=$APOLLO_SERVER_USER \ - --from-literal=password=supersecret -``` - -Next, install Spaces with the necessary settings: - -```shell -export PG_URL=your-postgres-host:5432 -export PG_POOLED_URL=your-pgbouncer-host:5432 # this could be the same as above - -helm ... \ - --set "features.alpha.apollo.enabled=true" \ - --set "apollo.apollo.storage.postgres.create=false" \ - --set "apollo.apollo.storage.postgres.connection.url=$PG_URL" \ - --set "apollo.apollo.storage.postgres.connection.credentials.secret.name=spaces-apollo-pg-app" \ - --set "apollo.apollo.storage.postgres.connection.credentials.format=basicauth" \ - --set "apollo.apollo.storage.postgres.connection.ca.name=spaces-apollo-pg-ca" \ - --set "apollo.apollo.storage.postgres.connection.syncer.url=$PG_POOLED_URL" \ - - #. the syncers - --set "apollo.apollo.storage.postgres.connection.syncer.credentials.format=basicauth" \ - --set "apollo.apollo.storage.postgres.connection.syncer.credentials.user=$APOLLO_SYNCER_USER" \ - --set "apollo.apollo.storage.postgres.connection.syncer.credentials.secret.name=spaces-apollo-pg-syncer" \ - - #. the server - --set "apollo.apollo.storage.postgres.connection.apollo.credentials.format=basicauth" \ - --set "apollo.apollo.storage.postgres.connection.apollo.credentials.user=$APOLLO_SERVER_USER" \ - --set "apollo.apollo.storage.postgres.connection.apollo.credentials.secret.name=spaces-apollo-pg-apollo" \ - --set "apollo.apollo.storage.postgres.connection.apollo.url=$PG_POOLED_URL" -``` - - -## Using the Query API - - -See the [Query API documentation][query-api-documentation] for more information on how to use the Query API. - - - - -[postgresql-setup]: #self-hosted-postgresql-configuration -[up-cli-installed]: /manuals/cli/overview -[query-api-documentation]: /spaces/howtos/query-api - -[helm-chart-reference]: /reference/helm-reference -[kubectl-conventions]: https://kubernetes.io/docs/reference/kubectl/generated/kubectl_get/ -[supported-ways]: https://cloudnative-pg.io/documentation/current/installation_upgrade/ -[supported-ways-1]: https://cloudnative-pg.io/documentation/current/installation_upgrade/ -[cloudnativepg-documentation]: https://cloudnative-pg.io/documentation/1.24/storage/#configuration-via-a-pvc-template -[postgresql-cluster]: https://cloudnative-pg.io/documentation/1.24/resource_management/ -[pooler]: https://cloudnative-pg.io/documentation/1.24/connection_pooling/#pod-templates -[postgresql-cluster-2]: https://cloudnative-pg.io/documentation/1.24/replication/ -[pooler-3]: https://cloudnative-pg.io/documentation/1.24/connection_pooling/#high-availability-ha -[postgresql-cluster-4]: https://cloudnative-pg.io/documentation/1.24/operator_capability_levels/#override-of-operand-images-through-the-crd -[pooler-5]: https://cloudnative-pg.io/documentation/1.24/connection_pooling/#pod-templates -[cloudnativepg-documentation-6]: https://cloudnative-pg.io/documentation/1.24/postgresql_conf/ diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/scaling-resources.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/scaling-resources.md deleted file mode 100644 index 7bb04d2c2..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/scaling-resources.md +++ /dev/null @@ -1,184 +0,0 @@ ---- -title: Scaling vCluster and etcd Resources -weight: 950 -description: A guide for scaling vCluster and etcd resources in self-hosted Spaces -aliases: - - /all-spaces/self-hosted-spaces/scaling-resources - - /spaces/scaling-resources ---- - -In large workloads or control plane migration, you may performance impacting -resource constraints. This guide explains how to scale vCluster and `etcd` -resources for optimal performance in your self-hosted Space. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Scaling procedures are consistent across versions. - -For version-specific resource requirements and capacity planning, see the . -::: - -## Signs of resource constraints - -You may need to scale your vCluster or `etcd` resources if you observe: - -- API server timeout errors such as `http: Handler timeout` -- Error messages about `too many requests` and requests to `try again later` -- Operations like provider installation failing with errors like `cannot apply provider package secret` -- vCluster pods experiencing continuous restarts -- API performance degrades with high resource volume - - -## Scaling vCluster resources - - -The vCluster component handles Kubernetes API requests for your control planes. -Deployments with multiple control planes or providers may exceed default resource allocations. - -```yaml -# Default settings -controlPlanes.vcluster.resources.limits.cpu: "3000m" -controlPlanes.vcluster.resources.limits.memory: "3960Mi" -controlPlanes.vcluster.resources.requests.cpu: "170m" -controlPlanes.vcluster.resources.requests.memory: "1320Mi" -``` - -For larger workloads, like migrating from an existing control plane with several -providers, increase these resource limits in your Spaces `values.yaml` file. - -```yaml -controlPlanes: - vcluster: - resources: - limits: - cpu: "4000m" # Increase to 4 cores - memory: "6Gi" # Increase to 6GB memory - requests: - cpu: "500m" # Increase baseline CPU request - memory: "2Gi" # Increase baseline memory request -``` - -## Scaling `etcd` storage - -Kubernetes relies on `etcd` performance, which can lead to IOPS (input/output -operations per second) bottlenecks. Upbound allocates `50Gi` volumes for `etcd` -in cloud environments to ensure adequate IOPS performance. - -```yaml -# Default setting -controlPlanes.etcd.persistence.size: "5Gi" -``` - -For production environments or when migrating large control planes, increase -`etcd` volume size and specify an appropriate storage class: - -```yaml -controlPlanes: - etcd: - persistence: - size: "50Gi" # Recommended for production - storageClassName: "fast-ssd" # Use a high-performance storage class -``` - -### Storage class considerations - -For AWS: -- Use GP3 volumes with adequate IOPS --. AWS GP3 volumes, IOPS scale with volume size (3000 IOPS baseline) --. optimal performance, provision at least 32Gi to support up to 16,000 IOPS - -For GCP and Azure: -- Use SSD-based persistent disk types for optimal performance -- Consider premium storage options for high-throughput workloads - -## Scaling Crossplane resources - -Crossplane manages provider resources in your control planes. You may need to increase provider resources for larger deployments: - -```yaml -# Default settings -controlPlanes.uxp.resourcesCrossplane.requests.cpu: "370m" -controlPlanes.uxp.resourcesCrossplane.requests.memory: "400Mi" -``` - - -For environments with many providers or managed resources: - - -```yaml -controlPlanes: - uxp: - resourcesCrossplane: - limits: - cpu: "1000m" # Add CPU limit - memory: "1Gi" # Add memory limit - requests: - cpu: "500m" # Increase CPU request - memory: "512Mi" # Increase memory request -``` - -## High availability configuration - -For production environments, enable High Availability mode to ensure resilience: - -```yaml -controlPlanes: - ha: - enabled: true -``` - -## Best practices for migration scenarios - -When migrating from existing control planes into a self-hosted Space: - -1. **Pre-scale resources**: Scale up resources before performing the migration -2. **Monitor resource usage**: Watch resource consumption during and after migration with `kubectl top pods` -3. **Scale incrementally**: If issues persist, increase resources incrementally until performance stabilizes -4. **Consider storage performance**: `etcd` is sensitive to storage I/O performance - -## Helm values configuration - -Apply these settings through your Spaces Helm values file: - -```yaml -controlPlanes: - vcluster: - resources: - limits: - cpu: "4000m" - memory: "6Gi" - requests: - cpu: "500m" - memory: "2Gi" - etcd: - persistence: - size: "50Gi" - storageClassName: "gp3" # Use your cloud provider's fast storage class - uxp: - resourcesCrossplane: - limits: - cpu: "1000m" - memory: "1Gi" - requests: - cpu: "500m" - memory: "512Mi" - ha: - enabled: true #. production environments -``` - -Apply the configuration using Helm: - -```bash -helm upgrade --install spaces oci://xpkg.upbound.io/spaces-artifacts/spaces \ - -f values.yaml \ - -n upbound-system -``` - -## Considerations - -- **Provider count**: Each provider adds resource overhead - consider using provider families to optimize resource usage -- **Managed resources**: The number of managed resources impacts CPU usage more than memory -- **Vertical pod autoscaling**: Consider using vertical pod autoscaling in Kubernetes to automatically adjust resources based on usage -- **Storage performance**: Storage performance is as important as capacity for etcd -- **Network latency**: Low-latency connections between components improve performance - - diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/self-hosted-spaces-deployment.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/self-hosted-spaces-deployment.md deleted file mode 100644 index e549e3939..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/self-hosted-spaces-deployment.md +++ /dev/null @@ -1,461 +0,0 @@ ---- -title: Deployment Workflow -sidebar_position: 3 -description: A quickstart guide for Upbound Spaces -tier: "business" ---- -import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; - - - - - -This guide deploys a self-hosted Upbound cluster in AWS. - - - - - -This guide deploys a self-hosted Upbound cluster in Azure. - - - - - -This guide deploys a self-hosted Upbound cluster in GCP. - - - -Disconnected Spaces allows you to host control planes in your preferred environment. - -## Prerequisites - -To get started deploying your own Disconnected Space, you need: - -- An Upbound organization account string, provided by your Upbound account representative -- A `token.json` license, provided by your Upbound account representative - - - -- An AWS account and the AWS CLI - - - - - -- An Azure account and the Azure CLI - - - - - -- An GCP account and the GCP CLI - - - -:::important -Disconnected Spaces are a business critical feature of Upbound and requires a license token to successfully complete the installation. [Contact Upbound][contact-upbound] if you want to try out Upbound with Disconnected Spaces. -::: - -## Provision the hosting environment - -### Create a cluster - - - -Configure the name and target region you want the EKS cluster deployed to. - -```ini -export SPACES_CLUSTER_NAME=upbound-space-quickstart -export SPACES_REGION=us-east-1 -``` - -Provision a 3-node cluster using eksctl. - -```bash -cat < - - - -Configure the name and target region you want the AKS cluster deployed to. - -```ini -export SPACES_RESOURCE_GROUP_NAME=upbound-space-quickstart -export SPACES_CLUSTER_NAME=upbound-space-quickstart -export SPACES_LOCATION=westus -``` - -Provision a new Azure resource group. - -```bash -az group create --name ${SPACES_RESOURCE_GROUP_NAME} --location ${SPACES_LOCATION} -``` - -Provision a 3-node cluster. - -```bash -az aks create -g ${SPACES_RESOURCE_GROUP_NAME} -n ${SPACES_CLUSTER_NAME} \ - --enable-managed-identity \ - --node-count 3 \ - --node-vm-size Standard_D4s_v4 \ - --enable-addons monitoring \ - --enable-msi-auth-for-monitoring \ - --generate-ssh-keys \ - --network-plugin kubenet \ - --network-policy calico -``` - -Get the kubeconfig of your AKS cluster. - -```bash -az aks get-credentials --resource-group ${SPACES_RESOURCE_GROUP_NAME} --name ${SPACES_CLUSTER_NAME} -``` - - - - - -Configure the name and target region you want the GKE cluster deployed to. - -```ini -export SPACES_PROJECT_NAME=upbound-spaces-project -export SPACES_CLUSTER_NAME=upbound-spaces-quickstart -export SPACES_LOCATION=us-west1-a -``` - -Create a new project and set it as the current project. - -```bash -gcloud projects create ${SPACES_PROJECT_NAME} -gcloud config set project ${SPACES_PROJECT_NAME} -``` - -Provision a 3-node cluster. - -```bash -gcloud container clusters create ${SPACES_CLUSTER_NAME} \ - --enable-network-policy \ - --num-nodes=3 \ - --zone=${SPACES_LOCATION} \ - --machine-type=e2-standard-4 -``` - -Get the kubeconfig of your GKE cluster. - -```bash -gcloud container clusters get-credentials ${SPACES_CLUSTER_NAME} --zone=${SPACES_LOCATION} -``` - - - -## Configure the pre-install - -### Set your Upbound organization account details - -Set your Upbound organization account string as an environment variable for use in future steps - -```ini -export UPBOUND_ACCOUNT= -``` - -### Set up pre-install configurations - -Export the path of the license token JSON file provided by your Upbound account representative. - -```ini {copy-lines="2"} -# Change the path to where you saved the token. -export SPACES_TOKEN_PATH="/path/to/token.json" -``` - -Set the version of Spaces software you want to install. - -```ini -export SPACES_VERSION= -``` - -Set the router host and cluster type. The `SPACES_ROUTER_HOST` is the domain name that's used to access the control plane instances. It's used by the ingress controller to route requests. - -```ini -export SPACES_ROUTER_HOST="proxy.upbound-127.0.0.1.nip.io" -``` - -:::important -Make sure to replace the placeholder text in `SPACES_ROUTER_HOST` and provide a real domain that you own. -::: - - -## Install the Spaces software - - -### Install cert-manager - -Install cert-manager. - -```bash -kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.11.0/cert-manager.yaml -kubectl wait deployment -n cert-manager cert-manager-webhook --for condition=Available=True --timeout=360s -``` - - - -### Install ALB Load Balancer - -```bash -helm install aws-load-balancer-controller aws-load-balancer-controller --namespace kube-system \ - --repo https://aws.github.io/eks-charts \ - --set clusterName=${SPACES_CLUSTER_NAME} \ - --set serviceAccount.create=false \ - --set serviceAccount.name=aws-load-balancer-controller \ - --wait -``` - - - -### Install ingress-nginx - -Starting with Spaces v1.10.0, you need to configure the ingress-nginx -controller to allow SSL-passthrough mode. You can do so by passing the -`--enable-ssl-passthrough=true` command-line option to the controller. -The following Helm install command enables this with the `controller.extraArgs` -parameter: - - - -```bash -helm upgrade --install ingress-nginx ingress-nginx \ - --create-namespace --namespace ingress-nginx \ - --repo https://kubernetes.github.io/ingress-nginx \ - --version 4.12.1 \ - --set 'controller.service.type=LoadBalancer' \ - --set 'controller.extraArgs.enable-ssl-passthrough=true' \ - --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-type=external' \ - --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-scheme=internet-facing' \ - --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-nlb-target-type=ip' \ - --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-healthcheck-protocol=http' \ - --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-healthcheck-path=/healthz' \ - --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-healthcheck-port=10254' \ - --wait -``` - - - - - -```bash -helm upgrade --install ingress-nginx ingress-nginx \ - --create-namespace --namespace ingress-nginx \ - --repo https://kubernetes.github.io/ingress-nginx \ - --version 4.12.1 \ - --set 'controller.service.type=LoadBalancer' \ - --set 'controller.extraArgs.enable-ssl-passthrough=true' \ - --set 'controller.service.annotations.service\.beta\.kubernetes\.io/azure-load-balancer-health-probe-request-path=/healthz' \ - --wait -``` - - - - - -```bash -helm upgrade --install ingress-nginx ingress-nginx \ - --create-namespace --namespace ingress-nginx \ - --repo https://kubernetes.github.io/ingress-nginx \ - --version 4.12.1 \ - --set 'controller.service.type=LoadBalancer' \ - --set 'controller.extraArgs.enable-ssl-passthrough=true' \ - --wait -``` - - - -### Install Upbound Spaces software - -Create an image pull secret so that the cluster can pull Upbound Spaces images. - -```bash -kubectl create ns upbound-system -kubectl -n upbound-system create secret docker-registry upbound-pull-secret \ - --docker-server=https://xpkg.upbound.io \ - --docker-username="$(jq -r .accessId $SPACES_TOKEN_PATH)" \ - --docker-password="$(jq -r .token $SPACES_TOKEN_PATH)" -``` - -Log in with Helm to be able to pull chart images for the installation commands. - -```bash -jq -r .token $SPACES_TOKEN_PATH | helm registry login xpkg.upbound.io -u $(jq -r .accessId $SPACES_TOKEN_PATH) --password-stdin -``` - -Install the Spaces software. - -```bash -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "${SPACES_VERSION}" \ - --set "ingress.host=${SPACES_ROUTER_HOST}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "authentication.hubIdentities=true" \ - --set "authorization.hubRBAC=true" \ - --wait -``` - -### Create a DNS record - -:::important -If you chose to create a public ingress, you also need to create a DNS record for the load balancer of the public facing ingress. Do this before you create your first control plane. -::: - -Create a DNS record for the load balancer of the public facing ingress. To get the address for the Ingress, run the following: - - - -```bash -kubectl get ingress \ - -n upbound-system mxe-router-ingress \ - -o jsonpath='{.status.loadBalancer.ingress[0].hostname}' -``` - - - - - -```bash -kubectl get ingress \ - -n upbound-system mxe-router-ingress \ - -o jsonpath='{.status.loadBalancer.ingress[0].ip}' -``` - - - - - -```bash -kubectl get ingress \ - -n upbound-system mxe-router-ingress \ - -o jsonpath='{.status.loadBalancer.ingress[0].ip}' -``` - - - -If the preceding command doesn't return a load balancer address then your provider may not have allocated it yet. Once it's available, add a DNS record for the `ROUTER_HOST` to point to the given load balancer address. If it's an IPv4 address, add an A record. If it's a domain name, add a CNAME record. - -## Configure the up CLI - -With your kubeconfig pointed at the Kubernetes cluster where you installed -Upbound Spaces, create a new profile in the `up` CLI. This profile interacts -with your Space: - -```bash -up profile create --use ${SPACES_CLUSTER_NAME} --type=disconnected --organization ${UPBOUND_ACCOUNT} -``` - -Optionally, log in to your Upbound account using the new profile so you can use the Upbound Marketplace with this profile as well: - -```bash -up login -``` - - -## Connect to your Space - - -Use `up ctx` to create a kubeconfig context pointed at your new Space: - -```bash -up ctx disconnected/$(kubectl config current-context) -``` - -## Create your first control plane - -You can now create a control plane with the `up` CLI: - -```bash -up ctp create ctp1 -``` - -You can also create a control plane with kubectl: - -```yaml -cat < -```yaml -observability: - spacesCollector: - env: - - name: API_KEY - valueFrom: - secretKeyRef: - name: my-secret - key: api-key - config: - exporters: - otlphttp: - endpoint: "" - headers: - api-key: ${env:API_KEY} - exportPipeline: - logs: - - otlphttp - metrics: - - otlphttp - traces: - - otlphttp -``` - - -You can export metrics, logs, and traces from your Crossplane installation, Spaces -infrastructure (controller, API, router, etc.), provider-helm, and -provider-kubernetes. - -### Router metrics - -The Spaces router component uses Envoy as a reverse proxy and exposes detailed -metrics about request handling, circuit breakers, and connection pooling. -Upbound collects these metrics in your Space after you enable Space-level -observability. - -Envoy metrics in Upbound include: - -- **Upstream cluster metrics** - Request status codes, timeouts, retries, and latency for traffic to control planes and services -- **Circuit breaker metrics** - Connection and request circuit breaker state for both `DEFAULT` and `HIGH` priority levels -- **Downstream listener metrics** - Client connections and requests received -- **HTTP connection manager metrics** - End-to-end HTTP request processing and latency - -For a complete list of available router metrics and example PromQL queries, see the [Router metrics reference][router-ref]. - -### Router tracing - -The Spaces router generates distributed traces through OpenTelemetry integration, -providing end-to-end visibility into request flow across the system. Use these -traces to debug latency issues, understand request paths, and correlate errors -across services. - -The router uses: - -- **Protocol**: OTLP (OpenTelemetry Protocol) over gRPC -- **Service name**: `spaces-router` -- **Transport**: TLS-encrypted connection to telemetry collector - -#### Trace configuration - -Enable tracing and configure the sampling rate with the following Helm values: - -```yaml -observability: - enabled: true - tracing: - enabled: true - sampling: - rate: 0.1 # Sample 10% of new traces (0.0-1.0) -``` - -The sampling behavior depends on whether a parent trace context exists: - -- **With parent context**: If a `traceparent` header is present, the parent's - sampling decision is respected, enabling proper distributed tracing across services. -- **Root spans**:. new traces without a parent, Envoy samples based on - `x-request-id` hashing. The default sampling rate is 10%. - -#### TLS configuration for external collectors - -To send traces to an external OTLP collector, configure the endpoint and TLS settings: - -```yaml -observability: - enabled: true - tracing: - enabled: true - endpoint: "otlp-gateway.example.com" - port: 443 - tls: - caBundleSecretRef: "custom-ca-secret" -``` - -If `caBundleSecretRef` is set, the router uses the CA bundle from the referenced -Kubernetes secret. The secret must contain a key named `ca.crt` with the -PEM-encoded CA bundle. If not set, the router uses the Spaces CA for the -in-cluster collector. - -#### Custom trace tags - -The router adds custom tags to every span to enable filtering and grouping by -control plane: - -| Tag | Source | Description | -|-----|--------|-------------| -| `controlplane.id` | `x-upbound-mxp-id` header | Control plane UUID | -| `controlplane.name` | `x-upbound-mxp-host` header | Internal vcluster hostname | -| `hostcluster.id` | `x-upbound-hostcluster-id` header | Host cluster identifier | - -These tags enable queries like "show all slow requests to control plane X" or -"find errors for control planes in host cluster Y." - -#### Example trace - -The following example shows the attributes from a successful GET request: - -```text -Span: ingress -├─ Service: spaces-router -├─ Duration: 8.025ms -├─ Attributes: -│ ├─ http.method: GET -│ ├─ http.status_code: 200 -│ ├─ upstream_cluster: ctp-b2b37aaa-ee55-492c-ba0c-4d561a6325fa-api-cluster -│ ├─ controlplane.id: b2b37aaa-ee55-492c-ba0c-4d561a6325fa -│ ├─ controlplane.name: vcluster.mxp-b2b37aaa-ee55-492c-ba0c-4d561a6325fa-system -│ └─ response_size: 1827 -``` - -## Available metrics - -Space-level observability collects metrics from multiple infrastructure components: - -### Infrastructure component metrics - -- Crossplane controller metrics -- Spaces controller, API, and router metrics -- Provider metrics (provider-helm, provider-kubernetes) - -### Router metrics - -The router component exposes Envoy proxy metrics for monitoring traffic flow and -service health. Key metric categories include: - -- `envoy_cluster_upstream_rq_*` - Upstream request metrics (status codes, timeouts, retries, latency) -- `envoy_cluster_circuit_breakers_*` - Circuit breaker state and capacity -- `envoy_listener_downstream_*` - Client connection and request metrics -- `envoy_http_downstream_*` - HTTP request processing metrics - -Example query to monitor total request rate: - -```promql -sum(rate(envoy_cluster_upstream_rq_total{job="spaces-router-envoy"}[5m])) -``` - -Example query for P95 latency: - -```promql -histogram_quantile( - 0.95, - sum by (le) ( - rate(envoy_cluster_upstream_rq_time_bucket{job="spaces-router-envoy"}[5m]) - ) -) -``` - -For detailed router metrics documentation and more query examples, see the [Router metrics reference][router-ref]. - - -## OpenTelemetryCollector image - - -Control plane (`SharedTelemetry`) and Space observability deploy the same custom -OpenTelemetry Collector image. The OpenTelemetry Collector image supports -`otlhttp`, `datadog`, and `debug` exporters. - -For more information on observability configuration, review the [Helm chart reference][helm-chart-reference]. - -## Observability in control planes - -Read the [observability documentation][observability-documentation] to learn -about the features Upbound offers for collecting telemetry from control planes. - - -## Router metrics reference {#router-ref} - -To avoid overwhelming observability tools with hundreds of Envoy metrics, an -allow-list filters metrics to only the following metric families. - -### Upstream cluster metrics - -Metrics tracking requests sent from Envoy to configured upstream clusters. -Individual control planes, spaces-api, and other services are each considered -an upstream cluster. Use these metrics to monitor service health, identify -upstream errors, and measure backend latency. - -| Metric | Description | -|--------|-------------| -| `envoy_cluster_upstream_rq_xx_total` | HTTP status codes (2xx, 3xx, 4xx, 5xx) with label `envoy_response_code_class` | -| `envoy_cluster_upstream_rq_timeout_total` | Requests that timed out waiting for upstream | -| `envoy_cluster_upstream_rq_retry_limit_exceeded_total` | Requests that exhausted retry attempts | -| `envoy_cluster_upstream_rq_total` | Total upstream requests | -| `envoy_cluster_upstream_rq_time_bucket` | Latency histogram (for P50/P95/P99 calculations) | -| `envoy_cluster_upstream_rq_time_sum` | Sum of request durations | -| `envoy_cluster_upstream_rq_time_count` | Count of requests | - -### Circuit breaker metrics - - - -Metrics tracking circuit breaker state and remaining capacity. Circuit breakers -prevent cascading failures by limiting connections and concurrent requests to -unhealthy upstreams. Two priority levels exist: `DEFAULT` for watch requests and -`HIGH` for API requests. - - -| Name | Description | -|--------|-------------| -| `envoy_cluster_circuit_breakers_default_cx_open` | `DEFAULT` priority connection circuit breaker open (gauge) | -| `envoy_cluster_circuit_breakers_default_rq_open` | `DEFAULT` priority request circuit breaker open (gauge) | -| `envoy_cluster_circuit_breakers_default_remaining_cx` | Available `DEFAULT` priority connections (gauge) | -| `envoy_cluster_circuit_breakers_default_remaining_rq` | Available `DEFAULT` priority request slots (gauge) | -| `envoy_cluster_circuit_breakers_high_cx_open` | `HIGH` priority connection circuit breaker open (gauge) | -| `envoy_cluster_circuit_breakers_high_rq_open` | `HIGH` priority request circuit breaker open (gauge) | -| `envoy_cluster_circuit_breakers_high_remaining_cx` | Available `HIGH` priority connections (gauge) | -| `envoy_cluster_circuit_breakers_high_remaining_rq` | Available `HIGH` priority request slots (gauge) | - -### Downstream listener metrics - -Metrics tracking requests received from clients such as kubectl and API consumers. -Use these metrics to monitor client connection patterns, overall request volume, -and responses sent to external users. - -| Name | Description | -|--------|-------------| -| `envoy_listener_downstream_rq_xx_total` | HTTP status codes for responses sent to clients | -| `envoy_listener_downstream_rq_total` | Total requests received from clients | -| `envoy_listener_downstream_cx_total` | Total connections from clients | -| `envoy_listener_downstream_cx_active` | Currently active client connections (gauge) | - - - -### HTTP connection manager metrics - - -Metrics from Envoy's HTTP connection manager tracking end-to-end request -processing. These metrics provide a comprehensive view of the HTTP request -lifecycle including status codes and client-perceived latency. - -| Name | Description | -|--------|-------------| -| `envoy_http_downstream_rq_xx` | HTTP status codes (note: no `_total` suffix for this metric family) | -| `envoy_http_downstream_rq_total` | Total HTTP requests received | -| `envoy_http_downstream_rq_time_bucket` | Downstream request latency histogram | -| `envoy_http_downstream_rq_time_sum` | Sum of downstream request durations | -| `envoy_http_downstream_rq_time_count` | Count of downstream requests | - -[router-ref]: #router-ref -[observability-documentation]: /spaces/howtos/observability -[opentelemetry-collector]: https://opentelemetry.io/docs/collector/ -[opentelemetry-operator]: https://opentelemetry.io/docs/kubernetes/operator/ -[helm-chart-reference]: /reference/helm-reference diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/spaces-management.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/spaces-management.md deleted file mode 100644 index 3df61c306..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/spaces-management.md +++ /dev/null @@ -1,219 +0,0 @@ ---- -title: Interacting with Disconnected Spaces -sidebar_position: 10 -description: Common operations in Spaces ---- - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Spaces management procedures are consistent across versions. - -For version compatibility details, see the . -::: - -## Spaces management - -### Create a Space - -To install an Upbound Space into a cluster, it's recommended you dedicate an entire Kubernetes cluster for the Space. You can use [up space init][up-space-init] to install an Upbound Space. Below is an example: - -```bash -up space init "v1.9.0" -``` -:::tip -For a full guide to get started with Spaces, read the [quickstart][quickstart] guide: -::: - -You can also install the helm chart for Spaces directly. In order for a Spaces install to succeed, you must install some prerequisites first and configure them. This includes: - -- UXP -- provider-helm and provider-kubernetes -- cert-manager - -Furthermore, the Spaces chart requires a pull secret, which Upbound must provide to you. - -```bash -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "v1.9.0" \ - --set "ingress.host=your-host.com" \ - --set "clusterType=eks" \ - --set "account=your-upbound-account" \ - --wait -``` -For a complete tutorial of the helm install, read one of the deployment guides for [AWS][aws], [Azure][azure] , or [GCP][gcp] which cover the step-by-step process. - -### Upgrade a Space - -To upgrade a Space from one version to the next, use [up space upgrade][up-space-upgrade]. Spaces supports upgrading from version `ver x.N.*` to version `ver x.N+1.*`. - -```bash -up space upgrade "v1.9.0" -``` - -You can also upgrade a Space by manually bumping the Helm chart version. Before -upgrading, review the release notes for any breaking changes or -special requirements: - -1. Review the release notes for the target version in the [Spaces Release Notes][spaces-release-notes] -2. Upgrade the Space by updating the helm chart version: - -```bash -helm -n upbound-system upgrade spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "v1.9.0" \ - --reuse-values \ - --wait -``` - -For major version upgrades or configuration changes, extract your current values -and adjust: - -```bash -# Extract current values to a file -helm -n upbound-system get values spaces > spaces-values.yaml - -# Upgrade with modified values -helm -n upbound-system upgrade spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "v1.9.0" \ - -f spaces-values.yaml \ - --wait -``` - -### Downgrade a Space - -To rollback a Space from one version to the previous, use [up space upgrade][up-space-upgrade-1]. Spaces supports downgrading from version `ver x.N.*` to version `ver x.N-1.*`. - -```bash -up space upgrade --rollback -``` - -You can also downgrade a Space manually using Helm by specifying an earlier version: - -```bash -helm -n upbound-system upgrade spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "v1.8.0" \ - --reuse-values \ - --wait -``` - -When downgrading, make sure to: -1. Check the [release notes][release-notes] for specific downgrade instructions -2. Verify compatibility between the downgraded Space and any control planes -3. Back up any critical data before proceeding - -### Uninstall a Space - -To uninstall a Space from a Kubernetes cluster, use [up space destroy][up-space-destroy]. A destroy operation uninstalls core components and orphans control planes and their associated resources. - -```bash -up space destroy -``` - -## Control plane management - -You can manage control planes in a Space via the [up CLI][up-cli] or the Spaces-local Kubernetes API. When you install a Space, it defines new a API type, `kind: Controlplane`, that you can use to create and manage control planes in the Space. - -### Create a control plane - -To create a control plane in a Space using `up`, run the following: - -```bash -up ctp create ctp1 -``` - -You can also declare a new control plane like the example below and apply it to your Spaces cluster: - -```yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - name: ctp1 - namespace: default -spec: - writeConnectionSecretToRef: - name: kubeconfig-ctp1 - namespace: default -``` - -This manifest: - -- Creates a new control plane in the space called `ctp1`. -- Publishes the kubeconfig to connect to the control plane to a secret in the Spaces cluster, called `kubeconfig-ctp1` - -### Connect to a control plane - -To connect to a control plane in a Space using `up`, run the following: - -```bash -up ctp connect new-control-plane -``` - -The command changes your kubeconfig's current context to the control plane you specify. If you want to change your kubeconfig back to a previous context, run: - -```bash -up ctp disconnect -``` - -If you configured your control plane to publish connection details, you can also access it this way. Once the control plane is ready, use the secret (containing connection details) to connect to the API server of your control plane. - -```bash -kubectl get secret -n default -o jsonpath='{.data.kubeconfig}' | base64 -d > /tmp/.yaml -``` - -Reference the kubeconfig whenever you want to interact directly with the API server of the control plane (vs the Space's API server): - -```bash -kubectl get providers --kubeconfig=/tmp/.yaml -``` - -### Configure a control plane - -Spaces offers a built-in feature that allows you to connect a control plane to a Git source. This experience is like when a control plane runs in [Upbound's SaaS environment][upbound-s-saas-environment]. Upbound recommends using the built-in Git integration to drive configuration of your control planes in a Space. - -Learn more in the [Spaces Git integration][spaces-git-integration] documentation. - -### List control planes - -To list all control planes in a Space using `up`, run the following: - -```bash -up ctp list -``` - -Or you can use Kubernetes-style semantics to list the control plane: - -```bash -kubectl get controlplanes -``` - - -### Delete a control plane - -To delete a control plane in a Space using `up`, run the following: - -```bash -up ctp delete ctp1 -``` - -Or you can use Kubernetes-style semantics to delete the control plane: - -```bash -kubectl delete controlplane ctp1 -``` - - -[up-space-init]: /reference/cli-reference -[quickstart]: / -[aws]: /spaces/howtos/self-hosted/self-hosted-spaces-deployment -[azure]:/spaces/howtos/self-hosted/self-hosted-spaces-deployment -[gcp]:/spaces/howtos/self-hosted/self-hosted-spaces-deployment -[up-space-upgrade]: /reference/cli-reference -[spaces-release-notes]: /reference/release-notes/spaces -[up-space-upgrade-1]: /reference/cli-reference -[release-notes]: /reference/release-notes/spaces -[up-space-destroy]: /reference/cli-reference -[up-cli]: /reference/cli-reference -[upbound-s-saas-environment]: /spaces/howtos/self-hosted/spaces-management -[spaces-git-integration]: /spaces/howtos/self-hosted/gitops diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/troubleshooting.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/troubleshooting.md deleted file mode 100644 index 8d1ca6517..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/troubleshooting.md +++ /dev/null @@ -1,132 +0,0 @@ ---- -title: Troubleshooting -sidebar_position: 100 -description: A guide for troubleshooting an issue that occurs in a Space ---- - -Find guidance below on how to find solutions for issues you encounter when deploying and using an Upbound Space. Use the tips below as a supplement to the observability metrics discussed in the [Observability][observability] page. - -## General tips - -Most issues fall into two general categories: - -1. issues with the Spaces management plane -2. issues on a control plane - -If your control plane doesn't reach a `Ready` state, it's indicative of the former. If your control plane is in a created and running state, but resources aren't reconciling, it's indicative of the latter. - -### Spaces component layout - -Run `kubectl get pods -A` against the cluster hosting a Space. You should see a variety of pods across several namespaces. It should look something like this: - -```bash -NAMESPACE NAME READY STATUS RESTARTS AGE -cert-manager cert-manager-6d6769565c-mc5df 1/1 Running 0 25m -cert-manager cert-manager-cainjector-744bb89575-nw4fg 1/1 Running 0 25m -cert-manager cert-manager-webhook-759d6dcbf7-ps4mq 1/1 Running 0 25m -ingress-nginx ingress-nginx-controller-7f8ccfccc6-6szlp 1/1 Running 0 25m -kube-system coredns-5d78c9869d-4p477 1/1 Running 0 26m -kube-system coredns-5d78c9869d-pdxt6 1/1 Running 0 26m -kube-system etcd-kind-control-plane 1/1 Running 0 26m -kube-system kindnet-8s7pq 1/1 Running 0 26m -kube-system kube-apiserver-kind-control-plane 1/1 Running 0 26m -kube-system kube-controller-manager-kind-control-plane 1/1 Running 0 26m -kube-system kube-proxy-l68r8 1/1 Running 0 26m -kube-system kube-scheduler-kind-control-plane 1/1 Running 0 26m -local-path-storage local-path-provisioner-6bc4bddd6b-qsdjt 1/1 Running 0 26m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system coredns-5dc69d6447-f56rh-x-kube-system-x-vcluster 1/1 Running 0 21m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system crossplane-6b6d67bc66-6b8nx-x-upbound-system-x-vcluster 1/1 Running 0 20m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system crossplane-rbac-manager-78f6fc7cb4-pjkhc-x-upbound-s-12253c3c4e 1/1 Running 0 20m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system kube-state-metrics-7f8f4dcc5b-8p8c4 1/1 Running 0 22m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system mxp-gateway-68f546b9c8-xnz5j-x-upbound-system-x-vcluster 1/1 Running 0 20m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system mxp-ksm-config-54655667bb-hv9br 1/1 Running 0 22m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system mxp-readyz-5f7f97d967-b98bw 1/1 Running 0 22m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system otlp-collector-56d7d46c8d-g5sh5-x-upbound-system-x-vcluster 1/1 Running 0 20m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-67c9fb8959-ppb2m 1/1 Running 0 22m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-api-6bfbccc49d-ffgpj 1/1 Running 0 22m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-controller-7cc6855656-8c46b 1/1 Running 0 22m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-etcd-0 1/1 Running 0 22m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vector-754b494b84-wljw4 1/1 Running 0 22m -mxp-system mxp-charts-chartmuseum-7587f77558-8tltb 1/1 Running 0 23m -upbound-system crossplane-b4dc7b4c9-6hjh5 1/1 Running 0 25m -upbound-system crossplane-contrib-provider-helm-ce18dd03e6e4-7945d8985-4gcwr 1/1 Running 0 24m -upbound-system crossplane-contrib-provider-kubernetes-1f1e32c1957d-577756gs2x4 1/1 Running 0 24m -upbound-system crossplane-rbac-manager-d8cb49cbc-gbvvf 1/1 Running 0 25m -upbound-system spaces-controller-6647677cf9-5zl5q 1/1 Running 0 24m -upbound-system spaces-router-bc78c96d7-kzts2 2/2 Running 0 24m -``` - -What you are seeing is: - -- Pods in the `upbound-system` namespace are components required to run the management plane of the Space. This includes the `spaces-controller`, `spaces-router`, and install of UXP. -- Pods in the `mxp-{GUID}-system` namespace are components that collectively power a control plane. Notable call outs include pod names that look like `vcluster-api-{GUID}` and `vcluster-controller-{GUID}`, which are integral components of a control plane. -- Pods in other notable namespaces, including `cert-manager` and `ingress-nginx`, are prerequisite components that support a Space's successful operation. - - - -### Troubleshooting tips for the Spaces management plane - -Start by getting the status of all the pods in a Space: - -1. Make sure the current context of your kubeconfig points at the Kubernetes cluster hosting your Space -2. Get the status of all the pods in the Space: -```bash -kubectl get pods -A -``` -3. Scan the `Status` column to see if any of the pods report a status besides `Running`. -4. Scan the `Restarts` column to see if any of the pods have restarted. -5. If you notice a Status other than `Running` or see pods that restarted, you should investigate their events by running -```bash -kubectl describe pod -n -``` - -Next, inspect the status of objects and releases: - -1. Make sure the current context of your kubeconfig points at the Kubernetes cluster hosting your Space -2. Inspect the objects in your Space. If any are unhealthy, describe those objects to get the events: -```bash -kubectl get objects -``` -3. Inspect the releases in your Space. If any are unhealthy, describe those releases to get the events: -```bash -kubectl get releases -``` - -### Troubleshooting tips for control planes in a Space - -General troubleshooting in a control plane starts by fetching the events of the control plane: - -1. Make sure the current context of your kubeconfig points at the Kubernetes cluster hosting your Space -2. Run the following to fetch your control planes. -```bash -kubectl get ctp -``` -3. Describe the control plane by providing its name, found in the preceding instruction. -```bash -kubectl describe controlplanes.spaces.upbound.io -``` - -## Issues - - -### Your control plane is stuck in a 'creating' state - -#### Error: unknown field "ports" in io.k8s.api.networking.v1.NetworkPolicySpec - -This error is emitted by a Helm release named `control-plane-host-policies` attempting to be installed by the Spaces software. The full error is: - -_CannotCreateExternalResource failed to install release: unable to build kubernetes objects from release manifest: error validating "": error validating data: ValidationError(NetworkPolicy.spec): unknown field "ports" in io.k8s.api.networking.v1.NetworkPolicySpec_ - -This error may be caused by running a Space on an earlier version of Kubernetes than is supported (`v1.26 or later`). To resolve this issue, upgrade the host Kubernetes cluster version to 1.25 or later. - -### Your Spaces install fails - -#### Error: You tried to install a Space on a previous Crossplane installation - -If you try to install a Space on an existing cluster that previously had Crossplane or UXP on it, you may encounter errors. Due to how the Spaces installer tests for the presence of UXP, it may detect orphaned CRDs that weren't cleaned up by the previous uninstall of Crossplane. You may need to manually [remove old Crossplane CRDs][remove-old-crossplane-crds] for the installer to properly detect the UXP prerequisite. - - - - -[observability]: /spaces/howtos/observability -[remove-old-crossplane-crds]: https://docs.crossplane.io/latest/guides/uninstall-crossplane/ diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/use-argo.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/use-argo.md deleted file mode 100644 index d58f7db44..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/use-argo.md +++ /dev/null @@ -1,228 +0,0 @@ ---- -title: Use ArgoCD Plugin -sidebar_position: 15 -description: A guide for integrating Argo with control planes in a Space. -aliases: - - /all-spaces/self-hosted-spaces/use-argo - - /deploy/disconnected-spaces/use-argo-flux - - /all-spaces/self-hosted-spaces/use-argo-flux - - /connect/use-argo ---- - - -:::info API Version Information -This guide covers the ArgoCD Plugin, a preview feature available in all supported versions (v1.9-v1.14+) for self-hosted deployments. - -For details on GitOps patterns and related features across versions, see [Automation and GitOps Overview](/spaces/howtos/automation-and-gitops/overview/). -::: - -:::important -This feature is in preview and is off by default. To enable, set `features.alpha.argocdPlugin.enabled=true` when installing Spaces: - -```bash -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - ... - --set "features.alpha.argocdPlugin.enabled=true" -``` -::: - -Spaces provides an optional plugin to assist with integrating a control plane in a Space with Argo CD. You must enable the plugin for the entire Space at Spaces install or upgrade time. The plugin's job is to propagate the connection details of each control plane in a Space to Argo CD. By default, Upbound stores these connection details in a Kubernetes secret named after the control plane. To run Argo CD across multiple namespaces, Upbound recommends enabling the `features.alpha.argocdPlugin.useUIDFormatForCTPSecrets` flag to use a UID-based format for secret names to avoid conflicts. - -:::tip -For general guidance on integrating Upbound with GitOps flows, see [GitOps with Control Planes][gitops-with-control-planes]. -::: - -## On cluster Argo CD - -If you are running Argo CD on the same cluster as the Space, run the following to enable the plugin: - - - - - - -```bash {hl_lines="3-4"} -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "features.alpha.argocdPlugin.enabled=true" \ - --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ - --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" -``` - - - - - -```bash {hl_lines="7-8"} -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "${SPACES_VERSION}" \ - --set "ingress.host=${SPACES_ROUTER_HOST}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "features.alpha.argocdPlugin.enabled=true" \ - --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ - --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ - --wait -``` - - - - - - -The important flags are: - -- `features.alpha.argocdPlugin.enabled=true` -- `features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true` -- `features.alpha.argocdPlugin.target.secretNamespace=argocd` - -The first flag enables the feature and the second indicates the namespace on the cluster where you installed Argo CD. - -Be sure to [configure Argo][configure-argo] after it's installed. - -## External cluster Argo CD - -If you are running Argo CD on an external cluster from where you installed your Space, you need to provide some extra flags: - - - - - - -```bash {hl_lines="3-7"} -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "features.alpha.argocdPlugin.enabled=true" \ - --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ - --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ - --set "features.alpha.argocdPlugin.target.externalCluster.enabled=true" \ - --set "features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster" \ - --set "features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig" -``` - - - - - -```bash {hl_lines="7-11"} -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "${SPACES_VERSION}" \ - --set "ingress.host=${SPACES_ROUTER_HOST}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "features.alpha.argocdPlugin.enabled=true" \ - --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ - --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ - --set "features.alpha.argocdPlugin.target.externalCluster.enabled=true" \ - --set "features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster" \ - --set "features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig" \ - --wait -``` - - - - - -```bash -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "${SPACES_VERSION}" \ - --set "ingress.host=${SPACES_ROUTER_HOST}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "features.alpha.argocdPlugin.enabled=true" \ - --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ - --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ - --set "features.alpha.argocdPlugin.target.externalCluster.enabled=true" \ - --set "features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster" \ - --set "features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig" \ - --wait -``` - -The extra flags are: - -- `features.alpha.argocdPlugin.target.externalCluster.enabled=true` -- `features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true` -- `features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster` -- `features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig` - -These flags tell the plugin (running in Spaces) where your Argo CD instance is. After you've done this at install-time, you also need to create a `Secret` on the Spaces cluster. This secret must contain a kubeconfig pointing to your Argo CD instance. The secret needs to be in the same namespace as the `spaces-controller`, which is `upbound-system`. - -Once you enable the plugin and configure it, the plugin automatically propagates connection details for your control planes to your Argo CD instance. You can then target the control plane and use Argo to sync Crossplane-related objects to it. - -Be sure to [configure Argo][configure-argo-1] after it's installed. - -## Configure Argo - -Argo's default configuration causes it to try to query for resource kinds that don't exist in control planes. You should configure Argo's [general configmap][general-configmap] to include the resource group/kinds which make sense in the context of control planes. example, the concept of `nodes` isn't exposed in control planes. - -To configure Argo CD, connect to the cluster where you've installed it and edit the configmap: - -```bash -kubectl edit configmap argocd-cm -n argocd -``` - -Adjust the resource inclusions and exclusions under the `data` field of the configmap: - -```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: argocd-cm - namespace: argocd -data: - resource.exclusions: | - - apiGroups: - - "*" - kinds: - - "*" - clusters: - - "*" - resource.inclusions: | - - apiGroups: - - "*" - kinds: - - Provider - - Configuration - clusters: - - "*" -``` - -The preceding configuration causes Argo to exclude syncing **all** resource group/kinds--except Crossplane `providers` and `configurations`--for **all** control planes. You're encouraged to adjust the `resource.inclusions` to include the types that make sense for your control plane, such as an `XRD` you've built with Crossplane. You're also encouraged to customize the `clusters` pattern to selectively apply these exclusions/inclusions to control planes (for example, `control-plane-prod-*`). - -## Control plane connection secrets - -To deploy control planes through Argo CD, you need to configure the `writeConnectionSecretToRef` field in your control plane spec. This field specifies where to store the control plane's `kubeconfig` and makes connection details available to Argo CD. - -### Basic Configuration - -In your control plane manifest, include the `writeConnectionSecretToRef` field: - -```yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - name: my-control-plane - namespace: my-control-plane-group -spec: - writeConnectionSecretToRef: - name: kubeconfig-my-control-plane - namespace: my-control-plane-group - # ... other control plane configuration -``` - -### Parameters - -The `writeConnectionSecretToRef` field requires two parameters: - -- `name`: A unique name for the secret containing the kubeconfig (`kubeconfig-my-control-plane`) -- `namespace`: The Kubernetes namespace where you store the secret, which must match the metadata namespace. The system copies it into the `argocd` namespace when you set the `features.alpha.argocdPlugin.target.secretNamespace=argocd` configuration parameter. - -Control plane labels automatically propagate to the connection secret, which allows you to use label selectors in Argo CD for automated discovery and management. - -This configuration enables Argo CD to automatically discover and manage resources on your control planes. - - -[gitops-with-control-planes]: /spaces/howtos/cloud-spaces/gitops -[configure-argo]: #configure-argo -[configure-argo-1]: #configure-argo -[general-configmap]: https://argo-cd.readthedocs.io/en/stable/operator-manual/argocd-cm-yaml/ diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/workload-id/_category_.json b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/workload-id/_category_.json deleted file mode 100644 index c5ecc93f6..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/workload-id/_category_.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "label": "Workload Identity Configuration", - "position": 2, - "collapsed": true, - "customProps": { - "plan": "business" - } - -} - - diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/workload-id/backup-restore-config.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/workload-id/backup-restore-config.md deleted file mode 100644 index 935ca69ec..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/workload-id/backup-restore-config.md +++ /dev/null @@ -1,384 +0,0 @@ ---- -title: Backup and Restore Workload ID -weight: 1 -description: Configure workload identity for Spaces Backup and Restore ---- -import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; - - - - - - - -Workload-identity authentication lets you use access policies to grant temporary -AWS credentials to your Kubernetes pod with a service account. Assigning IAM roles and service accounts allows the pod to assume the IAM role dynamically and much more securely than static credentials. - -This guide walks you through creating an IAM trust role policy and applying it -to your EKS cluster to handle backup and restore storage. - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary Azure credentials to your Kubernetes pod based on -a service account. Assigning managed identities and service accounts allows the pod to -authenticate with Azure resources dynamically and much more securely than static credentials. - -This guide walks you through creating a managed identity and federated credential for your AKS -cluster to handle backup and restore storage. - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary GCP credentials to your Kubernetes pod based on -a service account. Assigning IAM roles and service accounts allows the pod to -access cloud resources dynamically and much more securely than static credentials. - -This guide walks you through configuring workload identity for your GKE -cluster to handle backup and restore storage. - - - -## Prerequisites - - -To set up a workload-identity, you'll need: - - -- A self-hosted Space cluster -- Administrator access in your cloud provider -- Helm and `kubectl` - -## About the backup and restore component - -The `mxp-controller` component handles backup and restore workloads. It needs to -access your cloud storage to store and retrieve backups. By default, this -component runs in each control plane's host namespace. - -## Configuration - - - -Upbound supports workload-identity configurations in AWS with IAM Roles for -Service Accounts and EKS pod identity association. - -#### IAM Roles for Service Accounts (IRSA) - -With IRSA, you can associate a Kubernetes service account in an EKS cluster with -an AWS IAM role. Upbound authenticates workloads with that service account as -the IAM role using temporary credentials instead of static role credentials. -IRSA relies on AWS `AssumeRoleWithWebIdentity` `STS` to exchange OIDC ID tokens with -the IAM role's temporary credentials. IRSA uses the `eks.amazon.aws/role-arn` -annotation to link the service account and the IAM role. - -First, create an IAM role with appropriate permissions to access your S3 bucket: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:GetObject", - "s3:PutObject", - "s3:ListBucket", - "s3:DeleteObject" - ], - "Resource": [ - "arn:aws:s3:::${YOUR_BACKUP_BUCKET}", - "arn:aws:s3:::${YOUR_BACKUP_BUCKET}/*" - ] - } - ] -} -``` - -Next, ensure your EKS cluster has an OIDC identity provider: - -```shell -eksctl utils associate-iam-oidc-provider --cluster ${YOUR_CLUSTER_NAME} --approve -``` - -Configure the IAM role trust policy with the namespace for each -provisioned control plane. - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:oidc-provider/${YOUR_OIDC_PROVIDER}" - }, - "Action": "sts:AssumeRoleWithWebIdentity", - "Condition": { - "StringEquals": { - "${YOUR_OIDC_PROVIDER}:aud": "sts.amazonaws.com", - "${YOUR_OIDC_PROVIDER}:sub": "system:serviceaccount:${YOUR_NAMESPACE}:mxp-controller" - } - } - } - ] -} -``` - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the Backup and Restore component: - -```shell ---set controlPlanes.mxpController.serviceAccount.annotations."eks\.amazonaws\.com/role-arn"="${SPACES_BR_IAM_ROLE_ARN}" -``` - -This command allows the backup and restore component to authenticate with your -dedicated IAM role in your EKS cluster environment. - -#### EKS pod identities - -Upbound also supports EKS Pod Identity configuration. EKS Pod Identities allow -you to create a pod identity association with your Kubernetes namespace, a -service account, and an IAM role, which allows the EKS control plane to -automatically handle the credential exchange. - -First, create an IAM role with appropriate permissions to access your S3 bucket: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:GetObject", - "s3:PutObject", - "s3:ListBucket", - "s3:DeleteObject" - ], - "Resource": [ - "arn:aws:s3:::${YOUR_BACKUP_BUCKET}", - "arn:aws:s3:::${YOUR_BACKUP_BUCKET}/*" - ] - } - ] -} -``` - -When you install or upgrade your Space with Helm, add the backup/restore values: - -```shell -helm upgrade spaces spaces-helm-chart \ - --set "billing.enabled=true" \ - --set "backup.enabled=true" \ - --set "backup.storage.provider=aws" \ - --set "backup.storage.aws.region= ${YOUR_AWS_REGION}" \ - --set "backup.storage.aws.bucket= ${YOUR_BACKUP_BUCKET}" -``` - -After Upbound provisions your control plane, create a Pod Identity Association -with the `aws` CLI: - -```shell -aws eks create-pod-identity-association \ - --cluster-name ${YOUR_CLUSTER_NAME} \ - --namespace ${YOUR_CONTROL_PLANE_NAMESPACE} \ - --service-account mxp-controller \ - --role-arn arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/backup-restore-role -``` - - - - - -Upbound supports workload-identity configurations in Azure with Azure's built-in -workload identity feature. - -#### Prepare your cluster - -First, enable the OIDC issuer and workload identity in your AKS cluster: - -```shell -az aks update --resource-group ${YOUR_RESOURCE_GROUP} --name ${YOUR_AKS_CLUSTER_NAME} --enable-oidc-issuer --enable-workload-identity -``` - -Next, find and store the OIDC issuer URL as an environment variable: - -```shell -export AKS_OIDC_ISSUER="$(az aks show --name ${YOUR_AKS_CLUSTER_NAME} --resource-group ${YOUR_RESOURCE_GROUP} --query "oidcIssuerProfile.issuerUrl" --output tsv)" -``` - -#### Create a User-Assigned Managed Identity - -Create a new managed identity to associate with the backup and restore component: - -```shell -az identity create --name backup-restore-identity --resource-group ${YOUR_RESOURCE_GROUP} --location ${YOUR_LOCATION} -``` - -Retrieve the client ID and store it as an environment variable: - -```shell -export USER_ASSIGNED_CLIENT_ID="$(az identity show --name backup-restore-identity --resource-group ${YOUR_RESOURCE_GROUP} --query clientId -otsv)" -``` - -Grant the managed identity you created to access your Azure Storage account: - -```shell -az role assignment create \ - --role "Storage Blob Data Contributor" \ - --assignee ${USER_ASSIGNED_CLIENT_ID} \ - --scope /subscriptions/${YOUR_SUBSCRIPTION_ID}/resourceGroups/${YOUR_RESOURCE_GROUP}/providers/Microsoft.Storage/storageAccounts/${YOUR_STORAGE_ACCOUNT} -``` - -#### Apply the managed identity role - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the backup and restore component: - -```shell ---set controlPlanes.mxpController.serviceAccount.annotations."azure\.workload\.identity/client-id"="${YOUR_USER_ASSIGNED_CLIENT_ID}" ---set controlPlanes.mxpController.pod.customLabels."azure\.workload\.identity/use"="true" -``` - -#### Create a Federated Identity credential - -```shell -az identity federated-credential create \ - --name backup-restore-federated-identity \ - --identity-name backup-restore-identity \ - --resource-group ${YOUR_RESOURCE_GROUP} \ - --issuer ${AKS_OIDC_ISSUER} \ - --subject system:serviceaccount:${YOUR_CONTROL_PLANE_NAMESPACE}:mxp-controller -``` - - - - - -Upbound supports workload-identity configurations in GCP with IAM principal -identifiers and service account impersonation. - -#### Prepare your cluster - -First, enable Workload Identity Federation on your GKE cluster: - -```shell -gcloud container clusters update ${YOUR_CLUSTER_NAME} \ - --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ - --region=${YOUR_REGION} -``` - -#### Create a Google Service Account - -Create a service account for the backup and restore component: - -```shell -gcloud iam service-accounts create backup-restore-sa \ - --display-name "Backup Restore Service Account" \ - --project ${YOUR_PROJECT_ID} -``` - -Grant the service account access to your Google Cloud Storage bucket: - -```shell -gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ - --member "serviceAccount:backup-restore-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" \ - --role "roles/storage.objectAdmin" -``` - -#### Configure Workload Identity - -Create an IAM binding to grant the Kubernetes service account access to the Google service account: - -```shell -gcloud iam service-accounts add-iam-policy-binding \ - backup-restore-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com \ - --role roles/iam.workloadIdentityUser \ - --member "serviceAccount:${YOUR_PROJECT_ID}.svc.id.goog[${YOUR_CONTROL_PLANE_NAMESPACE}/mxp-controller]" -``` - -#### Apply the service account configuration - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the backup and restore component: - -```shell ---set controlPlanes.mxpController.serviceAccount.annotations."iam\.gke\.io/gcp-service-account"="backup-restore-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" -``` - - - -## Verify your configuration - -After you apply the configuration use `kubectl` to verify the service account -has the correct annotation: - -```shell -kubectl get serviceaccount mxp-controller -n ${YOUR_CONTROL_PLANE_NAMESPACE} -o yaml -``` - -Verify the `mxp-controller` pod is running: - -```shell -kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep mxp-controller -``` - -## Restart workload - -You must manually restart a workload's pod when you add the workload identity annotations to the running pod's service account. - - - -This restart enables the EKS pod identity webhook to inject the necessary -environment for using IRSA. - - - - - -This restart enables the workload identity webhook to inject the necessary -environment for using Azure workload identity. - - - - - -This restart enables the workload identity webhook to inject the necessary -environment for using GCP workload identity. - - - -```shell -kubectl rollout restart deployment mxp-controller -n ${YOUR_CONTROL_PLANE_NAMESPACE} -``` - -## Use cases - - -Configuring backup and restore with workload identity eliminates the need for -static credentials in your cluster and the overhead of credential rotation. -These benefits are helpful in: - -* Disaster recovery scenarios -* Control plane migration -* Compliance requirements -* Rollbacks after unsuccessful upgrades - -## Next steps - -Now that you have a workload identity configured for the backup and restore -component, visit the [Backup Configuration][backup-restore-guide] documentation. - -Other workload identity guides are: -* [Billing][billing] -* [Shared Secrets][secrets] - -[backup-restore-guide]: /spaces/howtos/backup-and-restore -[billing]: /spaces/howtos/self-hosted/workload-id/billing-config -[secrets]: /spaces/howtos/self-hosted/workload-id/eso-config diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/workload-id/billing-config.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/workload-id/billing-config.md deleted file mode 100644 index 323a6122f..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/workload-id/billing-config.md +++ /dev/null @@ -1,454 +0,0 @@ ---- -title: Billing Workload ID -weight: 1 -description: Configure workload identity for Spaces Billing ---- -import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; - - - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary AWS credentials to your Kubernetes pod based on -a service account. Assigning IAM roles and service accounts allows the pod to -assume the IAM role dynamically and much more securely than static credentials. - -This guide walks you through creating an IAM trust role policy and applying it to your EKS -cluster for billing in your Space cluster. - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary Azure credentials to your Kubernetes pod based on -a service account. Assigning managed identities and service accounts allows the pod to -authenticate with Azure resources dynamically and much more securely than static credentials. - -This guide walks you through creating a managed identity and federated credential for your AKS -cluster for billing in your Space cluster. - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary GCP credentials to your Kubernetes pod based on -a service account. Assigning IAM roles and service accounts allows the pod to -access cloud resources dynamically and much more securely than static -credentials. - -This guide walks you through configuring workload identity for your GKE -cluster's billing component. - - - -## Prerequisites - - -To set up a workload-identity, you'll need: - - -- A self-hosted Space cluster -- Administrator access in your cloud provider -- Helm and `kubectl` - -## About the billing component - -The `vector.dev` component handles billing metrics collection in spaces. It -stores account data in your cloud storage. By default, this component runs in -each control plane's host namespace. - -## Configuration - - - -Upbound supports workload-identity configurations in AWS with IAM Roles for -Service Accounts and EKS pod identity association. - -#### IAM Roles for Service Accounts (IRSA) - -With IRSA, you can associate a Kubernetes service account in an EKS cluster with -an AWS IAM role. Upbound authenticates workloads with that service account as -the IAM role using temporary credentials instead of static role credentials. -IRSA relies on AWS `AssumeRoleWithWebIdentity` `STS` to exchange OIDC ID tokens with -the IAM role's temporary credentials. IRSA uses the `eks.amazon.aws/role-arn` -annotation to link the service account and the IAM role. - -**Create an IAM role and trust policy** - -First, create an IAM role appropriate permissions to access your S3 bucket: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:GetObject", - "s3:PutObject", - "s3:ListBucket", - "s3:DeleteObject" - ], - "Resource": [ - "arn:aws:s3:::${YOUR_BILLING_BUCKET}", - "arn:aws:s3:::${YOUR_BILLING_BUCKET}/*" - ] - } - ] -} -``` - -You must configure the IAM role trust policy with the exact match for each -provisioned control plane. An example of a trust policy for a single control -plane is below: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:oidc-provider/${YOUR_OIDC_PROVIDER}" - }, - "Action": "sts:AssumeRoleWithWebIdentity", - "Condition": { - "StringEquals": { - ":aud": "sts.amazonaws.com", - ":sub": "system:serviceaccount:${YOUR_NAMESPACE}:vector" - } - } - } - ] -} -``` - -**Configure the EKS OIDC provider** - -Next, ensure your EKS cluster has an OIDC identity provider: - -```shell -eksctl utils associate-iam-oidc-provider --cluster ${YOUR_CLUSTER_NAME} --approve -``` - -**Apply the IAM role** - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the Billing component: - -```shell ---set "billing.enabled=true" ---set "billing.storage.provider=aws" ---set "billing.storage.aws.region=${YOUR_AWS_REGION}" ---set "billing.storage.aws.bucket=${YOUR_BILLING_BUCKET}" ---set "billing.storage.secretRef.name=" ---set controlPlanes.vector.serviceAccount.customAnnotations."eks\.amazonaws\.com/role-arn"="arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_BILLING_ROLE_NAME}" -``` - -:::important -You **must** set the `billing.storage.secretRef.name` to an empty string to -enable workload identity for the billing component -::: - -#### EKS pod identities - -Upbound also supports EKS Pod Identity configuration. EKS Pod Identities allow -you to create a pod identity association with your Kubernetes namespace, a -service account, and an IAM role, which allows the EKS control plane to -automatically handle the credential exchange. - -**Create an IAM role** - -First, create an IAM role appropriate permissions to access your S3 bucket: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:GetObject", - "s3:PutObject", - "s3:ListBucket" - ], - "Resource": [ - "arn:aws:s3:::${YOUR_BILLING_BUCKET}", - "arn:aws:s3:::${YOUR_BILLING_BUCKET}/*" - ] - } - ] -} -``` - -**Configure your Space with Helm** - -When you install or upgrade your Space with Helm, add the billing values: - -```shell -helm upgrade spaces spaces-helm-chart \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=aws" \ - --set "billing.storage.aws.region=${YOUR_AWS_REGION}" \ - --set "billing.storage.aws.bucket=${YOUR_BILLING_BUCKET}" \ - --set "billing.storage.secretRef.name=" -``` - -**Create a Pod Identity Association** - -After Upbound provisions your control plane, create a Pod Identity Association -with the `aws` CLI: - -```shell -aws eks create-pod-identity-association \ - --cluster-name ${YOUR_CLUSTER_NAME} \ - --namespace ${YOUR_CONTROL_PLANE_NAMESPACE} \ - --service-account vector \ - --role-arn arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_BILLING_ROLE_NAME} -``` - - - - - -Upbound supports workload-identity configurations in Azure with Azure's built-in -workload identity feature. - -First, enable the OIDC issuer and workload identity in your AKS cluster: - -```shell -az aks update --resource-group ${YOUR_RESOURCE_GROUP} --name ${YOUR_AKS_CLUSTER_NAME} --enable-oidc-issuer --enable-workload-identity -``` - -Next, find and store the OIDC issuer URL as an environment variable: - -```shell -export AKS_OIDC_ISSUER="$(az aks show --name ${YOUR_AKS_CLUSTER_NAME} --resource-group ${YOUR_RESOURCE_GROUP} --query "oidcIssuerProfile.issuerUrl" --output tsv)" -``` - -Create a new managed identity to associate with the billing component: - -```shell -az identity create --name billing-identity --resource-group ${YOUR_RESOURCE_GROUP} --location ${YOUR_LOCATION} -``` - -Retrieve the client ID and store it as an environment variable: - -```shell -export USER_ASSIGNED_CLIENT_ID="$(az identity show --name billing-identity --resource-group ${YOUR_RESOURCE_GROUP} --query clientId -otsv)" -``` - -Grant the managed identity you created to access your Azure Storage account: - -```shell -az role assignment create --role "Storage Blob Data Contributor" --assignee $USER_ASSIGNED_CLIENT_ID --scope /subscriptions/${YOUR_SUBSCRIPTION_ID}/resourceGroups/${YOUR_RESOURCE_GROUP}/providers/Microsoft.Storage/storageAccounts/${YOUR_STORAGE_ACCOUNT} -``` - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the billing component: - -```shell ---set "billing.enabled=true" ---set "billing.storage.provider=azure" ---set "billing.storage.azure.storageAccount=${SPACES_BILLING_STORAGE_ACCOUNT}" ---set "billing.storage.azure.container=${SPACES_BILLING_STORAGE_CONTAINER}" ---set "billing.storage.secretRef.name=" ---set controlPlanes.vector.serviceAccount.customAnnotations."azure\.workload\.identity/client-id"="${SPACES_BILLING_APP_ID}" ---set controlPlanes.vector.pod.customLabels."azure\.workload\.identity/use"="true" -``` - -Create a federated credential to establish trust between the managed identity -and your AKS OIDC provider: - -```shell -az identity federated-credential create \ - --name billing-federated-identity \ - --identity-name billing-identity \ - --resource-group ${YOUR_RESOURCE_GROUP} \ - --issuer ${AKS_OIDC_ISSUER} \ - --subject system:serviceaccount:${YOUR_CONTROL_PLANE_NAMESPACE}:vector -``` - - - - - -Upbound supports workload-identity configurations in GCP with IAM principal -identifiers or service account impersonation. - -#### IAM principal identifiers - -IAM principal identifiers allow you to grant permissions directly to -Kubernetes service accounts without additional annotation. Upbound recommends -this approach for ease-of-use and flexibility. - -First, enable Workload Identity Federation on your GKE cluster: - -```shell -gcloud container clusters update ${YOUR_CLUSTER_NAME} \ - --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ - --region=${YOUR_REGION} -``` - -Next, configure your Spaces installation with the Spaces Helm chart parameters: - -```shell ---set "billing.enabled=true" ---set "billing.storage.provider=gcp" ---set "billing.storage.gcp.bucket=${YOUR_BILLING_BUCKET}" ---set "billing.storage.secretRef.name=" -``` - -:::important -You **must** set the `billing.storage.secretRef.name` to an empty string to -enable workload identity for the billing component. -::: - -Grant the necessary permissions to your Kubernetes service account: - -```shell -gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ - --member="principalSet://iam.googleapis.com/projects/${YOUR_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${YOUR_PROJECT_ID}.svc.id.goog/attribute.kubernetes_namespace/${YOUR_CONTROL_PLANE_NAMESPACE}/attribute.kubernetes_service_account/vector" \ - --role="roles/storage.objectAdmin" -``` - -Enable uniform bucket-level access on your storage bucket: - -```shell -gcloud storage buckets update gs://${YOUR_BILLING_BUCKET} --uniform-bucket-level-access -``` - -#### Service account impersonation - -Service account impersonation allows you to link a Kubernetes service account to -a GCP service account. The Kubernetes service account assumes the permissions of -the GCP service account you specify. - -Enable workload id federation on your GKE cluster: - -```shell -gcloud container clusters update ${YOUR_CLUSTER_NAME} \ - --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ - --region=${YOUR_REGION} -``` - -Next, create a dedicated service account for your billing operations: - -```shell -gcloud iam service-accounts create billing-sa \ - --project=${YOUR_PROJECT_ID} -``` - -Grant storage permissions to the service account you created: - -```shell -gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ - --member="serviceAccount:billing-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" \ - --role="roles/storage.objectAdmin" -``` - -Link the Kubernetes service account to the GCP service account: - -```shell -gcloud iam service-accounts add-iam-policy-binding \ - billing-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com \ - --role="roles/iam.workloadIdentityUser" \ - --member="serviceAccount:${YOUR_PROJECT_ID}.svc.id.goog[${YOUR_CONTROL_PLANE_NAMESPACE}/vector]" -``` - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the billing component: - -```shell ---set "billing.enabled=true" ---set "billing.storage.provider=gcp" ---set "billing.storage.gcp.bucket=${YOUR_BILLING_BUCKET}" ---set "billing.storage.secretRef.name=" ---set controlPlanes.vector.serviceAccount.customAnnotations."iam\.gke\.io/gcp-service-account"="billing-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" -``` - - - -## Verify your configuration - -After you apply the configuration use `kubectl` to verify the service account -has the correct annotation: - -```shell -kubectl get serviceaccount vector -n ${YOUR_CONTROL_PLANE_NAMESPACE} -o yaml -``` - -Verify the `vector` pod is running: - -```shell -kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep vector -``` - -## Restart workload - - - -You must manually restart a workload's pod when you add the -`eks.amazonaws.com/role-arn key` annotation to the running pod's service -account. - -This restart enables the EKS pod identity webhook to inject the necessary -environment for using IRSA. - - - - - -You must manually restart a workload's pod when you add the workload identity annotations to the running pod's service account. - -This restart enables the workload identity webhook to inject the necessary -environment for using Azure workload identity. - - - - - -GCP workload identity doesn't require pod restarts after configuration changes. -If you do need to restart the workload, use the `kubectl` command to force the -component restart: - - - -```shell -kubectl rollout restart deployment vector -``` - - -## Use cases - - -Using workload identity authentication for billing eliminates the need for static -credentials in your cluster as well as the overhead of credential rotation. -These benefits are helpful in: - -* Resource usage tracking across teams/projects -* Cost allocation for multi-tenant environments -* Financial auditing requirements -* Capacity billing and resource optimization -* Automated billing workflows - -## Next steps - -Now that you have workload identity configured for the billing component, visit -the [Billing guide][billing-guide] for more information. - -Other workload identity guides are: -* [Backup and restore][backuprestore] -* [Shared Secrets][secrets] - -[billing-guide]: /spaces/howtos/self-hosted/billing -[backuprestore]: /spaces/howtos/self-hosted/workload-id/backup-restore-config -[secrets]: /spaces/howtos/self-hosted/workload-id/eso-config diff --git a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/workload-id/eso-config.md b/spaces_versioned_docs/version-v1.14/howtos/self-hosted/workload-id/eso-config.md deleted file mode 100644 index c1418c171..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/self-hosted/workload-id/eso-config.md +++ /dev/null @@ -1,503 +0,0 @@ ---- -title: Shared Secrets Workload ID -weight: 1 -description: Configure workload identity for Spaces Shared Secrets ---- -import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; - - - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary AWS credentials to your Kubernetes pod based on -a service account. Assigning IAM roles and service accounts allows the pod to -assume the IAM role dynamically and much more securely than static credentials. - -This guide walks you through creating an IAM trust role policy and applying it to your EKS -cluster for secret sharing with Kubernetes. - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary Azure credentials to your Kubernetes pod based on -a service account. Assigning managed identities and service accounts allows the pod to -authenticate with Azure resources dynamically and much more securely than static credentials. - -This guide walks you through creating a managed identity and federated credential for your AKS -cluster for shared secrets in your Space cluster. - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary GCP credentials to your Kubernetes pod based on -a service account. Assigning IAM roles and service accounts allows the pod to -access cloud resources dynamically and much more securely than static -credentials. - -This guide walks you through configuring workload identity for your GKE -cluster's Shared Secrets component. - - - -## Prerequisites - - -To set up a workload-identity, you'll need: - - -- A self-hosted Space cluster -- Administrator access in your cloud provider -- Helm and `kubectl` - - -## About the Shared Secrets component - - - - -The External Secrets Operator (ESO) runs in each control plane's host namespace as `external-secrets-controller`. It needs to access -your external secrets management service like AWS Secrets Manager. - -To configure your shared secrets workflow controller, you must: - -* Annotate the Kubernetes service account to associate it with a cloud-side - principal (such as an IAM role, service account, or enterprise application). The workload must then - use this service account. -* Label the workload (pod) to allow the injection of a temporary credential set, - enabling authentication. - - - - - -The External Secrets Operator (ESO) component runs in each control plane's host -namespace as `external-secrets-controller`. It synchronizes secrets from -external APIs into Kubernetes secrets. Shared secrets allow you to manage -credentials outside your Kubernetes cluster while making them available to your -application - - - - - -The External Secrets Operator (ESO) component runs in each control plane's host -namespace as `external-secrets-controller`. It synchronizes secrets from -external APIs into Kubernetes secrets. Shared secrets allow you to manage -credentials outside your Kubernetes cluster while making them available to your -application - - - -## Configuration - - - -Upbound supports workload-identity configurations in AWS with IAM Roles for -Service Accounts or EKS pod identity association. - -#### IAM Roles for Service Accounts (IRSA) - -With IRSA, you can associate a Kubernetes service account in an EKS cluster with -an AWS IAM role. Upbound authenticates workloads with that service account as -the IAM role using temporary credentials instead of static role credentials. -IRSA relies on AWS `AssumeRoleWithWebIdentity` `STS` to exchange OIDC ID tokens with -the IAM role's temporary credentials. IRSA uses the `eks.amazon.aws/role-arn` -annotation to link the service account and the IAM role. - -**Create an IAM role and trust policy** - -First, create an IAM role with appropriate permissions to access AWS Secrets Manager: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "secretsmanager:GetSecretValue", - "secretsmanager:DescribeSecret", - "ssm:GetParameter" - ], - "Resource": [ - "arn:aws:secretsmanager:${YOUR_REGION}:${YOUR_AWS_ACCOUNT_ID}:secret:${YOUR_SECRET_PREFIX}*", - "arn:aws:ssm:${YOUR_REGION}:${YOUR_AWS_ACCOUNT_ID}:parameter/${YOUR_PARAMETER_PREFIX}*" - ] - } - ] -} -``` - -You must configure the IAM role trust policy with the exact match for each -provisioned control plane. An example of a trust policy for a single control -plane is below: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:oidc-provider/${YOUR_OIDC_PROVIDER}" - }, - "Action": "sts:AssumeRoleWithWebIdentity", - "Condition": { - "StringEquals": { - ":aud": "sts.amazonaws.com" - }, - "StringLike": { - ":sub": "system:serviceaccount:*:external-secrets-controller" - } - } - } - ] -} -``` - -**Configure the EKS OIDC provider** - -Next, ensure your EKS cluster has an OIDC identity provider: - -```shell -eksctl utils associate-iam-oidc-provider --cluster ${YOUR_CLUSTER_NAME} --approve -``` - -**Apply the IAM role** - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the shared secrets component: - -```yaml ---set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."eks\.amazonaws\.com/role-arn"="arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_ESO_ROLE_NAME}" -``` - -This command allows the shared secrets component to authenticate with your -dedicated IAM role in your EKS cluster environment. - -#### EKS pod identities - -Upbound also supports EKS Pod Identity configuration. EKS Pod Identities allow -you to create a pod identity association with your Kubernetes namespace, a -service account, and an IAM role, which allows the EKS control plane to -automatically handle the credential exchange. - -**Create an IAM role** - -First, create an IAM role with appropriate permissions to access AWS Secrets Manager: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "secretsmanager:GetSecretValue", - "secretsmanager:DescribeSecret", - "ssm:GetParameter" - ], - "Resource": [ - "arn:aws:secretsmanager:${YOUR_AWS_REGION}:${YOUR_AWS_ACCOUNT_ID}:secret:${YOUR_SECRET_PREFIX}*", - "arn:aws:ssm:${YOUR_AWS_REGION}:${YOUR_AWS_ACCOUNT_ID}:parameter/${YOUR_PARAMETER_PREFIX}*" - ] - } - ] -} -``` - -**Configure your Space with Helm** - -When you install or upgrade your Space with Helm, add the shared secrets value: - -```shell -helm upgrade spaces spaces-helm-chart \ - --set "sharedSecrets.enabled=true" -``` - -**Create a Pod Identity Association** - -After Upbound provisions your control plane, create a Pod Identity Association -with the `aws` CLI: - -```shell -aws eks create-pod-identity-association \ - --cluster-name ${YOUR_CLUSTER_NAME} \ - --namespace ${YOUR_CONTROL_PLANE_NAMESPACE} \ - --service-account external-secrets-controller \ - --role-arn arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_ROLE_NAME} -``` - - - - - -Upbound supports workload-identity configurations in Azure with Azure's built-in -workload identity feature. - -First, enable the OIDC issuer and workload identity in your AKS cluster: - -```shell -az aks update --resource-group ${YOUR_RESOURCE_GROUP} --name ${YOUR_AKS_CLUSTER_NAME} --enable-oidc-issuer --enable-workload-identity -``` - -Next, find and store the OIDC issuer URL as an environment variable: - -```shell -export AKS_OIDC_ISSUER="$(az aks show --name ${YOUR_AKS_CLUSTER_NAME} --resource-group ${YOUR_RESOURCE_GROUP} --query "oidcIssuerProfile.issuerUrl" --output tsv)" -``` - -Create a new managed identity to associate with the shared secrets component: - -```shell -az identity create --name secrets-identity --resource-group ${YOUR_RESOURCE_GROUP} --location ${YOUR_LOCATION} -``` - -Retrieve the client ID and store it as an environment variable: - -```shell -export USER_ASSIGNED_CLIENT_ID="$(az identity show --name secrets-identity --resource-group ${YOUR_RESOURCE_GROUP} --query clientId -otsv)" -``` - -Grant the managed identity you created to access your Azure Storage account: - -```shell -az keyvault set-policy --name ${YOUR_KEY_VAULT_NAME} \ - --resource-group ${YOUR_RESOURCE_GROUP} \ - --object-id $(az identity show --name secrets-identity --resource-group ${YOUR_RESOURCE_GROUP} --query principalId -otsv) \ - --secret-permissions get list -``` - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the shared secrets component: - -```shell ---set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."azure\.workload\.identity/client-id"="${USER_ASSIGNED_CLIENT_ID}" ---set controlPlanes.sharedSecrets.pod.customLabels."azure\.workload\.identity/use"="true" -``` - -Next, create a federated credential to establish trust between the managed identity -and your AKS OIDC provider: - -```shell -az identity federated-credential create \ - --name secrets-federated-identity \ - --identity-name secrets-identity \ - --resource-group ${YOUR_RESOURCE_GROUP} \ - --issuer ${AKS_OIDC_ISSUER} \ - --subject system:serviceaccount:${YOUR_CONTROL_PLANE_NAMESPACE}:external-secrets-controller -``` - - - - - -Upbound supports workload-identity configurations in GCP with IAM principal -identifiers or service account impersonation. - -#### IAM principal identifiers - -IAM principal identifiers allow you to grant permissions directly to -Kubernetes service accounts without additional annotation. Upbound recommends -this approach for ease-of-use and flexibility. - -First, enable Workload Identity Federation on your GKE cluster: - -```shell -gcloud container clusters update ${YOUR_CLUSTER_NAME} \ - --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ - --region=${YOUR_REGION} -``` - -Next, grant the necessary permissions to your Kubernetes service account: - -```shell -gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ - --member="principalSet://iam.googleapis.com/projects/${YOUR_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${YOUR_PROJECT_ID}.svc.id.goog/attribute.kubernetes_namespace/${YOUR_CONTROL_PLANE_NAMESPACE}/attribute.kubernetes_service_account/external-secrets-controller" \ - --role="roles/secretmanager.secretAccessor" -``` - -#### Service account impersonation - -Service account impersonation allows you to link a Kubernetes service account to -a GCP service account. The Kubernetes service account assumes the permissions of -the GCP service account you specify. - -Enable workload id federation on your GKE cluster: - -```shell -gcloud container clusters update ${YOUR_CLUSTER_NAME} \ - --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ - --region=${YOUR_REGION} -``` - -Next, create a dedicated service account for your secrets operations: - -```shell -gcloud iam service-accounts create secrets-sa \ - --project=${YOUR_PROJECT_ID} -``` - -Grant secret access permissions to the service account you created: - -```shell -gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ - --member="serviceAccount:secrets-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" \ - --role="roles/secretmanager.secretAccessor" -``` - -Link the Kubernetes service account to the GCP service account: - -```shell -gcloud iam service-accounts add-iam-policy-binding \ - secrets-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com \ - --role="roles/iam.workloadIdentityUser" \ - --member="serviceAccount:${YOUR_PROJECT_ID}.svc.id.goog[${YOUR_CONTROL_PLANE_NAMESPACE}/external-secrets-controller]" -``` - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the shared secrets component: - -```shell ---set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."iam\.gke\.io/gcp-service-account"="secrets-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" -``` - - - -## Verify your configuration - -After you apply the configuration use `kubectl` to verify the service account -has the correct annotation: - -```shell -kubectl get serviceaccount external-secrets-controller -n ${YOUR_CONTROL_PLANE_NAMESPACE} -o yaml -``` - - - -Verify the `external-secrets` pod is running correctly: - -```shell -kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep external-secrets -``` - - - - - -Verify the External Secrets Operator pod is running correctly: - -```shell -kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep external-secrets -``` - - - - - -Verify the `external-secrets` pod is running correctly: - -```shell -kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep external-secrets -``` - - - -## Restart workload - - - -You must manually restart a workload's pod when you add the -`eks.amazonaws.com/role-arn key` annotation to the running pod's service -account. - -This restart enables the EKS pod identity webhook to inject the necessary -environment for using IRSA. - - - - - -You must manually restart a workload's pod when you add the workload identity annotations to the running pod's service account. - -This restart enables the workload identity webhook to inject the necessary -environment for using Azure workload identity. - - - - - -GCP workload identity doesn't require pod restarts after configuration changes. -If you do need to restart the workload, use the `kubectl` command to force the -component restart: - - - -```shell -kubectl rollout restart deployment external-secrets -``` - -## Use cases - - - - -Shared secrets with workload identity eliminates the need for static credentials -in your cluster. These benefits are particularly helpful in: - -* Secure application credentials management -* Database connection string storage -* API token management -* Compliance with secret rotation security standards -* Multi-environment configuration with centralized secret management - - - - - -Using workload identity authentication for shared secrets eliminates the need for static -credentials in your cluster as well as the overhead of credential rotation. -These benefits are particularly helpful in: - -* Secure application credentials management -* Database connection string storage -* API token management -* Compliance with secret rotation security standards - - - - - -Configuring the external secrets operator with workload identity eliminates the need for -static credentials in your cluster and the overhead of credential rotation. -These benefits are particularly helpful in: - -* Secure application credentials management -* Database connection string storage -* API token management -* Compliance with secret rotation security standards - - - -## Next steps - -Now that you have workload identity configured for the shared secrets component, visit -the [Shared Secrets][eso-guide] guide for more information. - -Other workload identity guides are: -* [Backup and restore][backuprestore] -* [Billing][billing] - -[eso-guide]: /spaces/howtos/secrets-management -[backuprestore]: /spaces/howtos/self-hosted/workload-id/backup-restore-config -[billing]: /spaces/howtos/self-hosted/workload-id/billing-config diff --git a/spaces_versioned_docs/version-v1.14/howtos/simulations.md b/spaces_versioned_docs/version-v1.14/howtos/simulations.md deleted file mode 100644 index 26cb0e657..000000000 --- a/spaces_versioned_docs/version-v1.14/howtos/simulations.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -title: Simulate changes to your Control Plane Projects -sidebar_position: 100 -description: Use the Up CLI to mock operations before deploying to your environments. ---- - -:::info API Version Information -This guide covers Simulations, available in v1.10+ (GA since v1.13). version-specific availability and features, see the . - -For API specifications on the Simulations CRD, see the [Spaces API Reference](../../../reference/apis/spaces-api/). -::: - -:::important -The Simulations feature is in private preview. more information, [reach out to Upbound][reach-out-to-upbound]. -::: - -Control plane simulations allow you to preview changes to your resources before -applying them to your control planes. Like a plan or dry-run operation, -simulations expose the impact of updates to compositions or claims without -changing your actual resources. - -A control plane simulation creates a temporary copy of your control plane and -returns a preview of the desired changes. The simulation change plan helps you -reduce the risk of unexpected behavior based on your changes. - -## Simulation benefits - -Control planes are dynamic systems that automatically reconcile resources to -match your desired state. Simulations provide visibility into this -reconciliation process by showing: - - -* New resources to create -* Existing resources to change -* Existing resources to delete -* How configuration changes propagate through the system - -These insights are crucial when planning complex changes or upgrading Crossplane -packages. - -## Requirements - -Simulations are available to select customers on Upbound Cloud with Team -Tier or higher. more information, [reach out to Upbound][reach-out-to-upbound-1]. - -## How to simulate your control planes - -Before you start a simulation, build your project and use the `up -project run` command to run your control plane. - -Use the `up project simulate` command with your control plane name to start the -simulation: - -```ini {copy-lines="all"} -up project simulate --complete-after=60s --terminate-on-finish -``` - -The `complete-after` flag determines how long to run the simulation before it completes and calculates the results. Depending on the change, a simulation may not complete within your defined interval leaving unaffected resources as `unchanged`. - -The `terminate-on-finish` flag terminates the simulation after the time -you set - deleting the control plane that ran the simulation. - -At the end of your simulation, your CLI returns: -* A summary of the resources created, modified, or deleted -* Diffs for each resource affected - -## View your simulation in the Upbound Console -You can also view your simulation results in the Upbound Console: - -1. Navigate to your base control plane in the Upbound Console -2. Select the "Simulations" tab in the menu -3. Select a simulation object for a change list of all - resources affected. - -The Console provides visual indications of changes: - -- Created Resources: Marked with green -- Modified Resources: Marked with yellow -- Deleted Resources: Marked with red -- Unchanged Resources: Displayed in gray - -![Upbound Console Simulation](/img/simulations.png) - -## Considerations - -Simulations is a **private preview** feature. - -Be aware of the following limitations: - -- Simulations can't predict the exact behavior of external systems due to the - complexity and non-deterministic reconciliation pattern in Crossplane. - -- The only completion criteria for a simulation is time. Your simulation may not - receive a conclusive result within that interval. Upbound recommends the - default `60s` value. - -- Providers don't run in simulations. Simulations can't compose resources that - rely on the status of Managed Resources. - - -The Upbound team is working to improve these limitations. Your feedback is always appreciated. - -## Next steps - -For more information, follow the [tutorial][tutorial] on Simulations. - - -[tutorial]: /manuals/cli/howtos/simulations -[reach-out-to-upbound]: https://www.upbound.io/contact-us -[reach-out-to-upbound-1]: https://www.upbound.io/contact-us diff --git a/spaces_versioned_docs/version-v1.14/overview/_category_.json b/spaces_versioned_docs/version-v1.14/overview/_category_.json deleted file mode 100644 index 54bb16430..000000000 --- a/spaces_versioned_docs/version-v1.14/overview/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Overview", - "position": 0 -} diff --git a/spaces_versioned_docs/version-v1.14/overview/index.md b/spaces_versioned_docs/version-v1.14/overview/index.md deleted file mode 100644 index 7b79f6e44..000000000 --- a/spaces_versioned_docs/version-v1.14/overview/index.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Spaces Overview -sidebar_position: 0 ---- - -# Upbound Spaces - -Welcome to the Upbound Spaces documentation. This section contains comprehensive documentation for Spaces API and Spaces operations across all supported versions (v1.9 through v1.15). - -## Get Started - -- **[Concepts](/spaces/concepts/control-planes)** - Core concepts for Spaces -- **[How-To Guides](/spaces/howtos/auto-upgrade)** - Step-by-step guides for operating Spaces -- **[API Reference](/spaces/reference/)** - API specifications and resources diff --git a/spaces_versioned_docs/version-v1.14/reference/_category_.json b/spaces_versioned_docs/version-v1.14/reference/_category_.json deleted file mode 100644 index 4a6a139c4..000000000 --- a/spaces_versioned_docs/version-v1.14/reference/_category_.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "label": "Spaces API", - "position": 1, - "collapsed": true -} diff --git a/spaces_versioned_docs/version-v1.14/reference/index.md b/spaces_versioned_docs/version-v1.14/reference/index.md deleted file mode 100644 index 5e68b0768..000000000 --- a/spaces_versioned_docs/version-v1.14/reference/index.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: Spaces API Reference -description: Documentation for the Spaces API resources (v1.15 - Latest) -sidebar_position: 1 ---- -import CrdDocViewer from '@site/src/components/CrdViewer'; - - -This page documents the Custom Resource Definitions (CRDs) for the Spaces API. - - -## Control Planes -### Control Planes - - -## Observability -### Shared Telemetry Configs - - -## `pkg` -### Controller Revisions - - -### Controller Runtime Configs - - -### Controllers - - -### Remote Configuration Revisions - - -### Remote Configurations - - -## Policy -### Shared Upbound Policies - - -## References -### Referenced Objects - - -## Scheduling -### Environments - - -## Secrets -### Shared External Secrets - - -### Shared Secret Stores - - -## Simulations - - -## Spaces Backups -### Backups - - -### Backup Schedules - - -### Shared Backup Configs - - -### Shared Backups - - -### Shared Backup Schedules - diff --git a/spaces_versioned_docs/version-v1.15/concepts/_category_.json b/spaces_versioned_docs/version-v1.15/concepts/_category_.json deleted file mode 100644 index 4b8667e29..000000000 --- a/spaces_versioned_docs/version-v1.15/concepts/_category_.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "label": "Concepts", - "position": 2, - "collapsed": true -} - - diff --git a/spaces_versioned_docs/version-v1.15/concepts/control-planes.md b/spaces_versioned_docs/version-v1.15/concepts/control-planes.md deleted file mode 100644 index 7066343de..000000000 --- a/spaces_versioned_docs/version-v1.15/concepts/control-planes.md +++ /dev/null @@ -1,227 +0,0 @@ ---- -title: Control Planes -weight: 1 -description: An overview of control planes in Upbound ---- - - -Control planes in Upbound are fully isolated Crossplane control plane instances that Upbound manages for you. This means: - -- the underlying lifecycle of infrastructure (compute, memory, and storage) required to power your instance. -- scaling of the infrastructure. -- the maintenance of the core Crossplane components that make up a control plane. - -This lets users focus on building their APIs and operating their control planes, while Upbound handles the rest. Each control plane has its own dedicated API server connecting users to their control plane. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). - -For detailed information about Control Plane API specifications and CRD fields, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: - -## Control plane architecture - -![Managed Control Plane Architecture](/img/mcp.png) - -Along with underlying infrastructure, Upbound manages the Crossplane system components. You don't need to manage the Crossplane API server or core resource controllers because Upbound manages your control plane lifecycle from creation to deletion. - -### Crossplane API - -Each control plane offers a unified endpoint. You interact with your control plane through Kubernetes and Crossplane API calls. Each control plane runs a Kubernetes API server to handle API requests. You can make API calls in the following ways: - -- Direct calls: HTTP/gRPC -- Indirect calls: the up CLI, Kubernetes clients such as kubectl, or the Upbound Console. - -Like in Kubernetes, the API server is the hub for all communication for the control plane. All internal components such as system processes and provider controllers act as clients of the API server. - -Your API requests tell Crossplane your desired state for the resources your control plane manages. Crossplane attempts to constantly maintain that state. Crossplane lets you configure objects in the API either imperatively or declaratively. - -### Crossplane versions and features - -Upbound automatically upgrades Crossplane system components on control planes to new Crossplane versions for updated features and improvements in the open source project. With [automatic upgrades][automatic-upgrades], you choose the cadence that Upbound automatically upgrades the system components in your control plane. You can also choose to manually upgrade your control plane to a different Crossplane version. - -For detailed information on versions and upgrades, refer to the [release notes][release-notes] and the automatic upgrade documentation. If you don't enroll a control plane in a release channel, Upbound doesn't apply automatic upgrades. - -Features considered "alpha" in Crossplane are by default not supported in a control plane unless otherwise specified. - -### Hosting environments - -Every control plane in Upbound belongs to a [control plane group][control-plane-group]. Control plane groups are a logical grouping of one or more control planes with shared objects (such as secrets or backup configuration). Every group resides in a [Space][space] in Upbound, which are hosting environments for control planes. - -Think of a Space as being conceptually the same as an AWS, Azure, or GCP region. Regardless of the Space type you run a control plane in, the core experience is identical. - -## Management - -### Create a control plane - -You can create a new control plane from the Upbound Console, [up CLI][up-cli], or with Kubernetes clients such as `kubectl`. - - - - - -To use the CLI, run the following: - -```shell -up ctp create -``` - -To learn more about control plane-related commands in `up`, go to the [CLI reference][cli-reference] documentation. - - - -You can create and manage control planes declaratively in Upbound. Before you -begin, ensure you're logged into Upbound and set the correct context: - -```bash -up login -# Example: acmeco/upbound-gcp-us-west-1/default -up ctx ${yourOrganization}/${yourSpace}/${yourGroup} -```` - -```yaml -#controlplane-a.yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - name: controlplane-a -spec: - crossplane: - autoUpgrade: - channel: Rapid -``` - -```bash -kubectl apply -f controlplane-a.yaml -``` - - - - - -### Connect directly to your control plane - -Each control plane offers a unified endpoint. You interact with your control plane through Kubernetes and Crossplane API calls. Each control plane runs a Kubernetes API server to handle API requests. - -You can connect to a control plane's API server directly via the up CLI. Use the [`up ctx`][up-ctx] command to set your kubeconfig's current context to a control plane: - -```shell -# Example: acmeco/upbound-gcp-us-west-1/default/ctp1 -up ctx ${yourOrganization}/${yourSpace}/${yourGroup}/${yourControlPlane} -``` - -To disconnect from your control plane and revert your kubeconfig's current context to the previous entry, run the following: - -```shell -up ctx .. -``` - -You can also generate a `kubeconfig` file for a control plane with [`up ctx -f`][up-ctx-f]. - -```shell -up ctx ${yourOrganization}/${yourSpace}/${yourGroup}/${yourControlPlane} -f - > ctp-kubeconfig.yaml -``` - -:::tip -To learn more about how to use `up ctx` to navigate different contexts in Upbound, read the [CLI documentation][cli-documentation]. -::: - -## Configuration - -When you create a new control plane, Upbound provides you with a fully isolated instance of Crossplane. Configure your control plane by installing packages that extend its capabilities, like to create and manage the lifecycle of new types of infrastructure resources. - -You're encourage to install any available Crossplane package type (Providers, Configurations, Functions) available in the [Upbound Marketplace][upbound-marketplace] on your control planes. - -### Install packages - -Below are a couple ways to install Crossplane packages on your control plane. - - - - - - -Use the `up` CLI to install Crossplane packages from the [Upbound Marketplace][upbound-marketplace-1] on your control planes. Connect directly to your control plane via `up ctx`. Then, to install a provider: - -```shell -up ctp provider install xpkg.upbound.io/upbound/provider-family-aws -``` - -To install a Configuration: - -```shell -up ctp configuration install xpkg.upbound.io/upbound/platform-ref-aws -``` - -To install a Function: - -```shell -up ctp function install xpkg.upbound.io/crossplane-contrib/function-kcl -``` - - -You can use kubectl to directly apply any Crossplane manifest. Below is an example for installing a Crossplane provider: - -```yaml -cat < - - - -For production-grade scenarios, it's recommended you configure your control plane declaratively via Git plus a Continuous Delivery (CD) Engine such as Argo. guidance on this topic, read [GitOps with control planes][gitops-with-control-planes]. - - - - - - -### Configure Crossplane ProviderConfigs - -#### ProviderConfigs with OpenID Connect - -Use OpenID Connect (`OIDC`) to authenticate to Upbound control planes without credentials. OIDC lets your control plane exchange short-lived tokens directly with your cloud provider. Read how to [connect control planes to external services][connect-control-planes-to-external-services] to learn more. - -#### Generic ProviderConfigs - -The Upbound Console doesn't allow direct editing of ProviderConfigs that don't support `Upbound` authentication. To edit these ProviderConfigs on your control plane, connect to the control plane directly by following the instructions in the previous section and using `kubectl`. - -### Configure secrets - -Upbound gives users the ability to configure the synchronization of secrets from external stores into control planes. Configure this capability at the group-level, explained in the [Spaces documentation][spaces-documentation]. - -### Configure backups - -Upbound gives users the ability to configure backup schedules, take impromptu backups, and conduct self-service restore operations. Configure this capability at the group-level, explained in the [Spaces documentation][spaces-documentation-1]. - -### Configure telemetry - - -Upbound gives users the ability to configure the collection of telemetry (logs, metrics, and traces) in their control planes. Using Upbound's built-in [OTEL][otel] support, you can stream this data out to your preferred observability solution. Configure this capability at the group-level, explained in the [Spaces documentation][spaces-documentation-2]. - - - -[automatic-upgrades]: /spaces/howtos/auto-upgrade -[release-notes]: https://github.com/upbound/universal-crossplane/releases -[control-plane-group]: /spaces/concepts/groups -[space]: /spaces/overview -[up-cli]: /reference/cli-reference -[cli-reference]: /reference/cli-reference -[up-ctx]: /reference/cli-reference -[up-ctx-f]: /reference/cli-reference -[cli-documentation]: /manuals/cli/concepts/contexts -[upbound-marketplace]: https://marketplace.upbound.io -[upbound-marketplace-1]: https://marketplace.upbound.io -[gitops-with-control-planes]: /spaces/howtos/cloud-spaces/gitops -[connect-control-planes-to-external-services]: /manuals/platform/howtos/oidc -[spaces-documentation]: /spaces/howtos/secrets-management -[spaces-documentation-1]: /spaces/howtos/backup-and-restore -[otel]: https://otel.com -[spaces-documentation-2]: /spaces/howtos/observability diff --git a/spaces_versioned_docs/version-v1.15/concepts/deployment-modes.md b/spaces_versioned_docs/version-v1.15/concepts/deployment-modes.md deleted file mode 100644 index f5e718f88..000000000 --- a/spaces_versioned_docs/version-v1.15/concepts/deployment-modes.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: Deployment Modes -sidebar_position: 10 -description: An overview of deployment modes for Spaces ---- - -Upbound Spaces can be deployed and used in a variety of modes: - -- **Cloud Spaces:** Multi-tenant Upbound-hosted, Upbound-managed Space environment. Cloud Spaces provide a typical SaaS experience. -- **[Dedicated Spaces][dedicated-spaces]:** Single-tenant Upbound-hosted, Upbound-managed Space environment. Dedicated Spaces provide a SaaS experience, with additional isolation guarantees that your workloads run in a fully isolated context. -- **[Managed Spaces][managed-spaces]:** Single-tenant customer-hosted, Upbound-managed Space environment. Managed Spaces provide a SaaS-like experience, with additional guarantees of all hosting infrastructure being served from your own cloud account. -- **[Self-Hosted Spaces][self-hosted-spaces]:** Single-tenant customer-hosted, customer-managed Space environment. This is a fully self-hosted, self-managed software experience for using Spaces. Upbound delivers the Spaces software and you run it yourself. - -The Upbound platform uses a federated model to connect each Space back to a -central service called the [Upbound Console][console], which is deployed and -managed by Upbound. - -By default, customers have access to a set of Cloud Spaces. - -## Supported clouds - -You can use host Upbound Spaces on Amazon Web Services (AWS), Microsoft Azure, -and Google Cloud Platform (GCP). Regardless of the hosting platform, you can use -Spaces to deploy control planes that manage the lifecycle of your resources. - -## Supported regions - -This table lists the cloud service provider regions supported by Upbound. - -### GCP - -| Region | Location | -| --- | --- | -| `us-west-1` | Western US (Oregon) -| `us-central-1` | Central US (Iowa) -| `eu-west-3` | Eastern Europe (Frankfurt) - -### AWS - -| Region | Location | -| --- | --- | -| `us-east-1` | Eastern US (Northern Virginia) - -### Azure - -| Region | Location | -| --- | --- | -| `us-east-1` | Eastern US (Iowa) - -[dedicated-spaces]: /spaces/howtos/cloud-spaces/dedicated-spaces-deployment -[managed-spaces]: /spaces/howtos/self-hosted/managed-spaces-deployment -[self-hosted-spaces]: /spaces/howtos/self-hosted/self-hosted-spaces-deployment -[console]: /manuals/console/upbound-console/ diff --git a/spaces_versioned_docs/version-v1.15/concepts/groups.md b/spaces_versioned_docs/version-v1.15/concepts/groups.md deleted file mode 100644 index d2ccacdb3..000000000 --- a/spaces_versioned_docs/version-v1.15/concepts/groups.md +++ /dev/null @@ -1,115 +0,0 @@ ---- -title: Control Plane Groups -sidebar_position: 2 -description: An introduction to the Control Plane Groups in Upbound -plan: "enterprise" ---- - - - -In Upbound, Control Plane Groups (or just, 'groups') are a logical grouping of one or more control planes with shared resources like [secrets][secrets] or [backups][backups]. It's a mechanism for isolating these groups of resources within a single [Space][space]. All role-based access control in Upbound happens at the control plane group-level. - -## When to use multiple groups - -You should use groups in environments where there's a need to have Crossplane manage infrastructure across multiple cloud accounts or projects. For users who only need to deploy and manage resources in a couple cloud accounts, you shouldn't need to think about groups at all. - -Groups are a way to divide access in Upbound between multiple teams. Think of a group as being analogous to a Kubernetes _namespace_. - -## The 'default' group - -Every Cloud Space in Upbound has a group named _default_ available. - -## Working with groups - -### View groups - -You can list groups in a Space using: - -```shell -up group list -``` - -If you're operating in a single-tenant Space and have access to the underlying cluster, you can list namespaces that have the group label: - -```shell -kubectl get namespaces -l spaces.upbound.io/group=true -``` - -### Set the group for a request - -Several commands in _up_ have a group context. To set the group for a request, use the `--group` flag: - -```shell -up ctp list --group=team1 -``` -```shell -up ctp create new-ctp --group=team2 -``` - -### Set the group preference - -The _up_ CLI operates upon a single [Upbound context][upbound-context]. Whatever context gets set is then used as the preference for other commands. An Upbound context is capable of pointing at a variety of altitudes: - -1. A Space in Upbound -2. A group within a Space -3. a control plane within a group - -To set the group preference, use `up ctx` to choose a group as your preferred Upbound context. For example: - -```shell -# This sets the context for the up CLI to the default group in an Upbound-managed Cloud Space (gcp-us-west-1) for an organization called 'acmeco' -up ctx acmeco/upbound-gcp-us-west-1/default/ -``` - -### Create a group - -To create a group, login to Upbound and set your context to your desired Space: - -```shell -up login -up ctx '/' -# Example: up ctx acmeco/upbound-gcp-us-west-1 -``` - - -Create a group: - -```shell -up group create my-new-group -``` - -### Delete a group - -To delete a group, login to Upbound and set your context to your desired Space: - -```shell -up login -up ctx '/' -# Example: up ctx acmeco/upbound-gcp-us-west-1 -``` - -Delete a group: - -```shell -up group delete my-new-group -``` - -### Protected groups - -Once a control plane gets created in a group, Upbound enforces a protection policy on the group. Upbound prevents accidental deletion of the group. To delete a group that has control planes in it, you should first delete all control planes in the group. - -## Groups in the context of single-tenant Spaces - -Upbound offers a variety of deployment models to use the product. If you deploy your own single-tenant Upbound Space (whether connected or disconnected), you're self-hosting Upbound software in a Kubernetes cluster. In these environments, a control plane group maps to a corresponding namespace in the cluster which hosts the Space. - -Most Kubernetes clusters come with some set of predefined namespaces. Because a group maps to a corresponding Kubernetes namespace, whenever a group gets created, there too must be a Kubernetes namespace accordingly. When the Spaces software is newly installed, no groups exist. You _can_ elevate a Kubernetes namespace to become a group by doing the following: - -1. Creating a group with the same name as a preexisting Kubernetes namespace -2. Creating a control plane in a preexisting Kubernetes namespace -3. Labeling a Kubernetes namespace with the label `spaces.upbound.io/group=true` - - -[secrets]: /spaces/howtos/secrets-management -[backups]: /spaces/howtos/self-hosted/workload-id/backup-restore-config/ -[space]: /spaces/overview -[upbound-context]: /manuals/cli/concepts/contexts diff --git a/spaces_versioned_docs/version-v1.15/howtos/_category_.json b/spaces_versioned_docs/version-v1.15/howtos/_category_.json deleted file mode 100644 index d3a8547aa..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/_category_.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "label": "How-tos", - "position": 3, - "collapsed": true -} - - diff --git a/spaces_versioned_docs/version-v1.15/howtos/api-connector.md b/spaces_versioned_docs/version-v1.15/howtos/api-connector.md deleted file mode 100644 index a14468f52..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/api-connector.md +++ /dev/null @@ -1,413 +0,0 @@ ---- -title: API Connector -weight: 90 -description: Connect Kubernetes clusters to remote Crossplane control planes for resource synchronization -aliases: - - /api-connector - - /concepts/api-connector ---- -:::info API Version Information -This guide covers API Connector, a preview feature available in all supported versions (v1.9-v1.14+). - -For related API specifications and available resources, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: - -:::warning -API Connector is currently in **Preview**. The feature is under active -development and subject to breaking changes. Use for testing and evaluation -purposes only. -::: - -API Connector enables seamless integration between Kubernetes application -clusters consuming APIs and remote Crossplane control planes providing and -reconciling APIs. - -You can use the API Connector to decouple where Crossplane is running (for -example in an Upbound control plane), and where APIs are consumed -(for example in an existing Kubernetes cluster). This gives you flexibility and -consistency in your control plane operations. - - - -Unlike the [Control Plane Connector](ctp-connector.md) which offers only -coarse-grained connectivity between app clusters and a control plane, API -connector offers fine-grained configuration of which APIs get offered along with -multi-cluster connectivity. - -## Architecture overview - -![API Connector Architecture](/img/api-connector.png) - -API Connector uses a **provider-consumer** model: - -- **Provider control plane**: The Upbound control plane that provides APIs and manages infrastructure. -- **Consumer cluster**: Any Kubernetes cluster where its users wants to use APIs provided by the provider control plane, without having to run Crossplane. API connector gets installed in the consumer cluster, and bidirectionally syncs API objects to the provider. - -### Key components - -**Custom Resource Definitions (CRDs)**: - - -- `ClusterConnection`: Establishes a connection from the consumer to the provider cluster. Pulls bindable CRD APIs from the provider into the consumer cluster for use. - -- `ClusterAPIBinding`: Instructs API connector to sync all API objects cluster-wide with a given API group to a given provider cluster. -- `APIBinding`: Namespaced version of `ClusterAPIBinding`. Instructs API connector to sync API objects within a given namespace and with a given API group to a given provider cluster. - - -## Prerequisites - -Before using API Connector, ensure: - -1. **Consumer cluster** has network access to the provider control plane -1. You have an license to use API connector. If you are unsure, [contact Upbound][contact] or your sales representative. - -This guide walks through how to automate connecting your cluster to an Upbound -control plane. You can also manually configure the API Connector. - -## Publishing APIs in the provider cluster - - - - -First, log in to your provider control plane, and choose which CRD APIs you want -to make accessible to the consumer cluster's. API connector only syncs -these "bindable" CRDs. - - - - - - -Use the `up` CLI to login: - -```bash -up login -``` - -Connect to your control plane: - -```bash -up ctx -``` - -Check what CRDs are available: - -```bash -kubectl get crds -``` - - -Label all CRDs you want to publish with the bindable label: - - -```bash -kubectl label crd 'connect.upbound.io/bindable'='true' --overwrite -``` - - - - -Change context to the provider cluster: -```bash -kubectl config set-context -``` - -Check what CRDs are available: -```bash -kubectl get crds -``` - - -Label all CRDs you want to publish with the bindable label - -```bash -kubectl label crd 'connect.upbound.io/bindable'='true' --overwrite -``` - - - -## Installation - - - - -The up CLI provides the simplest installation method with automatic -configuration: - -Make sure the current Kubeconfig context is set to the **provider control plane** -```bash -up ctx - -up controlplane api-connector install --consumer-kubeconfig [OPTIONS] -``` - -The command: -1. creates a Robot account (named ``) in the Upbound Cloud organization ``, -1. Gives the created robot account `admin` permissions to the provider control plane `` -1. Generates a JWT token for the robot account, and stores it in a Kubernetes Secret in the consumer cluster. -1. Installs the API connector Helm chart in the consumer cluster. -1. Creates a `ClusterConnection` object in the consumer cluster, referring to the newly generated Secret, so that API connector can authenticate successfully to the provider control plane. -1. API connector pulls all published CRDs from the previous step into the consumer cluster. - -**Example**: -```bash -up controlplane api-connector install \ - --consumer-kubeconfig ~/.kube/config \ - --consumer-context my-cluster \ - --upbound-token -``` - -This command uses provided token to authenticate with the **Provider control plane** -and create a `ClusterConnection` resource in the **Consumer cluster** to connect to the -**Provider control plane**. - -**Key Options**: -- `--consumer-kubeconfig`: Path to consumer cluster kubeconfig (required) -- `--consumer-context`: Context name for consumer cluster (required) -- `--name`: Custom name for connection resources (optional) -- `--upbound-token`: API token for authentication (optional) -- `--upgrade`: Upgrade existing installation (optional) -- `--version`: Specific version to install (optional) - - - - -For manual installation or custom configurations: - -```bash -helm upgrade --install api-connector oci://xpkg.upbound.io/spaces-artifacts/api-connector \ - --namespace upbound-system \ - --create-namespace \ - --version \ - --set consumerClusterDisplayName= -``` - -### Authentication methods - -API Connector supports two authentication methods: - - - - -For Upbound Spaces integration: - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: spaces-secret - namespace: upbound-system -type: Opaque -stringData: - token: - organization: - spacesBaseURL: - controlPlaneGroupName: - controlPlaneName: -``` - - - -For direct cluster access: - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: provider-kubeconfig - namespace: upbound-system -type: Opaque -data: - kubeconfig: -``` - - - - -### Connection setup - -Create a `ClusterConnection` to establish connectivity: - - - - -```yaml -apiVersion: connect.upbound.io/v1alpha1 -kind: ClusterConnection -metadata: - name: spaces-connection - namespace: upbound-system -spec: - secretRef: - kind: UpboundRobotToken - name: spaces-secret - namespace: upbound-system - crdManagement: - pullBehavior: Pull -``` - - - - -```yaml -apiVersion: connect.upbound.io/v1alpha1 -kind: ClusterConnection -metadata: - name: provider-connection - namespace: upbound-system -spec: - secretRef: - kind: KubeConfig - name: provider-kubeconfig - namespace: upbound-system - crdManagement: - pullBehavior: Pull -``` - - - - - - - -### Configuration - -Bind APIs to make them available in your consumer cluster: - -```yaml -apiVersion: connect.upbound.io/v1alpha1 -kind: ClusterAPIBinding -metadata: - name: -spec: - connectionRef: - kind: ClusterConnection - name: # Or --name value -``` - - - - -The `ClusterAPIBinding` name must match the **Resource.Group** (name of the CustomResourceDefinition) of the CRD you want to bind. - - - - -## Usage example - -After configuration, you can create API objects (in the consumer cluster) that -will be synchronized to the provider cluster: - -```yaml -apiVersion: nop.example.org/v1alpha1 -kind: NopResource -metadata: - name: my-resource - namespace: default -spec: - coolField: "Synchronized resource" - compositeDeletePolicy: Foreground -``` - -Verify the resource status: - -```bash -kubectl get nopresource my-resource -o yaml - -``` -When the `APIBound=True` condition is present, it means that the API object has -been synced to the provider cluster, and is being reconciled there. Whenever the -API object in the provider cluster gets status updates (for example -`Ready=True`), that status is synced back to the consumer cluster. - -Switch contexts to the provider cluster to see the API object being created: - -```bash -up ctx -# or kubectl config set-context -``` - -```bash -kubectl get nopresource my-resource -o yaml -``` - -Note that in the provider cluster, the API object is labeled with information on -where the API object originates from, and `connect.upbound.io/managed=true`. - -## Monitoring and troubleshooting - -### Check connection status - -```bash -kubectl get clusterconnection -``` - -Expected output: -``` -NAME STATUS MESSAGE -spaces-connection Ready Provider controlplane is available -``` - -### View available APIs - -```bash -kubectl get clusterconnection spaces-connection -o jsonpath='{.status.offeredAPIs[*].name}' -``` - -### Check API binding status - -```bash -kubectl get clusterapibinding -``` - -### Debug resource synchronization - -```bash -kubectl describe -``` - -## Removal - -### Using the up CLI - -```bash -up controlplane api-connector uninstall \ - --consumer-kubeconfig ~/.kube/config \ - --all -``` - -The `--all` flag removes all resources including connections and secrets. -Without the flag, only runtime related resources won't be removed. - -:::note -Uninstall doesn't remove any API objects in the provider control plane. If you -want to clean up all API objects there, delete all API objects from the consumer -cluster before API connector uninstallation, and wait for the objects to get -deleted. -::: - - -### Using Helm - -```bash -helm uninstall api-connector -n upbound-system -``` - -## Limitations - -- **Preview feature**: Subject to breaking changes. Not yet production grade. -- **CRD updates**: CRDs are pulled once but not automatically updated. If multiple Crossplane clusters offer the same CRD API, API changes must be synchronized out of band, for example using a [Crossplane Configuration](https://docs.crossplane.io/latest/packages/). -- **Network requirements**: Consumer cluster must have direct network access to provider cluster. -- **Wide permissions needed in consumer cluster**: Because the API connector doesn't know up front the names of the APIs it needs to reconcile, it currently runs with full "root" privileges in the consumer cluster. - -- **Connector polling**: API Connector checks for drift between the consumer and provider cluster - periodically through polling. The poll interval can be changed with the `pollInterval` Helm value. - - -## Advanced configuration - -### Multiple connections - -You can connect to multiple provider clusters simultaneously by creating multiple `ClusterConnection` resources with different names and configurations. - -[contact]: https://www.upbound.io/contact-us diff --git a/spaces_versioned_docs/version-v1.15/howtos/auto-upgrade.md b/spaces_versioned_docs/version-v1.15/howtos/auto-upgrade.md deleted file mode 100644 index 249056fb4..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/auto-upgrade.md +++ /dev/null @@ -1,131 +0,0 @@ ---- -title: Automatically upgrade control planes -sidebar_position: 50 -description: How to configure automatic upgrades of Crossplane in a control plane -plan: "standard" ---- - - - -Upbound Spaces can automatically upgrade the version of Upbound Crossplane in managed control plane instances. You can edit the `spec.crossplane.autoUpgrade` field in your `ControlPlane` specification with the available release channels below. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). The `spec.crossplane.autoUpgrade` field has been available since v1.9. - -For ControlPlane API specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: - -| Channel | Description | Example | -|------------|-----------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------| -| **None** | Disables auto upgrades. | _Uses version specified in `spec.crossplane.version`._ | -| **Patch** | Upgrades to the latest supported patch release. | _Control plane version 1.12.2-up.2 auto upgrades to 1.12.3-up.1 upon release._ | -| **Stable** | Default setting. Upgrades to the latest supported patch release on minor version _N-1_ where N is the latest supported minor version. | _If latest supported minor version is 1.14, auto upgrades to latest patch - 1.13.2-up.3_ | -| **Rapid** | Upgrades to the latest supported patch release on the latest supported minor version. | _If the latest supported minor version is 1.14, auto upgrades to the latest patch of minor version. 1.14 upgrades to 1.14.5-up.1_ | - - -:::warning - -The `Rapid` channel is only recommended for users willing to accept the risk of new features and potentially breaking changes. - -::: - -## Examples - -The specs below are examples of how to edit the `autoUpgrade` channel in your `ControlPlane` specification. - -To run a control plane with the `Rapid` auto upgrade channel, your spec should look like this: - -```yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - name: example-ctp -spec: - crossplane: - autoUpgrade: - channel: Rapid - writeConnectionSecretToRef: - name: kubeconfig-example-ctp -``` - -To run a control plane with a pinned version of Crossplane, specify in the `version` field: - -```yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - name: example-ctp -spec: - crossplane: - version: 1.14.3-up.1 - autoUpgrade: - channel: None - writeConnectionSecretToRef: - name: kubeconfig-example-ctp -``` - -## Supported Crossplane versions - -Spaces supports the three [preceding minor versions][preceding-minor-versions] from the last supported minor version. example, if the last supported minor version is `1.14`, minor versions `1.13` and `1.12` are also supported. Versions older than the three most recent minor versions aren't supported. Only supported Crossplane versions are valid specifications for new control planes. - -Current Crossplane version support by Spaces version: - -| Spaces Version | Crossplane Version Min | Crossplane Version Max | -|:--------------:|:----------------------:|:----------------------:| -| 1.2 | 1.13 | 1.15 | -| 1.3 | 1.13 | 1.15 | -| 1.4 | 1.14 | 1.16 | -| 1.5 | 1.14 | 1.16 | -| 1.6 | 1.14 | 1.16 | -| 1.7 | 1.14 | 1.16 | -| 1.8 | 1.15 | 1.17 | -| 1.9 | 1.16 | 1.18 | -| 1.10 | 1.16 | 1.18 | -| 1.11 | 1.16 | 1.18 | -| 1.12 | 1.17 | 1.19 | - - -Upbound offers extended support for all installed Crossplane versions released within a 12 month window since the last Spaces release. Contact your Upbound sales representative for more information on version support. - - -:::warning - -If the auto upgrade channel is `Stable` or `Rapid`, the Crossplane version will always stay within the support window after auto upgrade. If set to `Patch` or `None`, the minor version may be outside the support window. You are responsible for upgrading to a supported version - -::: - -To view the support status of a control plane instance, use `kubectl get ctp`. - -```bash -kubectl get ctp -NAME CROSSPLANE VERSION SUPPORTED READY MESSAGE AGE -example-ctp 1.13.2-up.3 True True 31m - -``` - -Unsupported versions return `SUPPORTED: False`. - -```bash -kubectl get ctp -NAME CROSSPLANE VERSION SUPPORTED READY MESSAGE AGE -example-ctp 1.11.5-up.1 False True 31m - -``` - -For more information, use the `-o yaml` flag to return more information. - -```bash -kubectl get controlplanes.spaces.upbound.io example-ctp -o yaml -status: -conditions: -... -- lastTransitionTime: "2024-01-23T06:36:10Z" - message: Crossplane version 1.11.5-up.1 is outside of the support window. - Oldest supported minor version is 1.12. - reason: UnsupportedCrossplaneVersion - status: "False" - type: Supported -``` - - -[preceding-minor-versions]: /reference/usage/lifecycle/#maintenance-and-updates diff --git a/spaces_versioned_docs/version-v1.15/howtos/automation-and-gitops/_category_.json b/spaces_versioned_docs/version-v1.15/howtos/automation-and-gitops/_category_.json deleted file mode 100644 index b65481af6..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/automation-and-gitops/_category_.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "label": "Automation & GitOps", - "position": 11, - "collapsed": true, - "customProps": { - "plan": "business" - } -} diff --git a/spaces_versioned_docs/version-v1.15/howtos/automation-and-gitops/overview.md b/spaces_versioned_docs/version-v1.15/howtos/automation-and-gitops/overview.md deleted file mode 100644 index 57eeb15fc..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/automation-and-gitops/overview.md +++ /dev/null @@ -1,138 +0,0 @@ ---- -title: Automation and GitOps Overview -sidebar_label: Overview -sidebar_position: 1 -description: Guide to automating control plane deployments with GitOps and Argo CD -plan: "business" ---- - -Automating control plane deployments with GitOps enables declarative, version-controlled infrastructure management. This section covers integrating GitOps workflows with Upbound control planes using Argo CD and related tools. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). GitOps patterns are compatible across all versions of Upbound Spaces. - -For details on deployment models and their differences, see the [Deployment Modes](../../concepts/deployment-modes.md) concept guide. version-specific features, see the . -::: - -## What is GitOps? - -GitOps is an approach for managing infrastructure by: -- **Declaratively describing** desired system state in Git -- **Using controllers** to continuously reconcile actual state with desired state -- **Treating Git as the source of truth** for all configuration and deployments - -Upbound control planes are fully compatible with GitOps patterns and we strongly recommend integrating GitOps in the platforms you build on Upbound. - -## Key Concepts - -### Argo CD -[Argo CD](https://argo-cd.readthedocs.io/) is a popular Kubernetes-native GitOps controller. It continuously monitors Git repositories and automatically applies changes to your infrastructure when commits are detected. - -### Deployment Models - -The way you configure GitOps depends on your deployment model: - -| Aspect | Cloud Spaces | Self-Hosted Spaces | -|--------|--------------|-------------------| -| **Access Method** | Upbound API with tokens | Kubernetes native (secrets/kubeconfig) | -| **Configuration** | Kubeconfig via `up` CLI | Control plane connection secrets | -| **Setup Complexity** | More involved (API integration) | Simpler (native Kubernetes) | -| **Typical Use Case** | Managing Upbound resources | Managing workloads on control planes | - -## Getting Started - -**Choose your path based on your deployment model:** - -###. Cloud Spaces -If you're using Upbound Cloud Spaces (Dedicated or Managed): -1. Start with [GitOps with Upbound Control Planes](../cloud-spaces/gitops-on-upbound.md) -2. Learn how to integrate Argo CD with Cloud Spaces -3. Manage both control plane infrastructure and Upbound resources declaratively - -###. Self-Hosted Spaces -If you're running self-hosted Spaces: -1. Start with [GitOps with ArgoCD in Self-Hosted Spaces](../self-hosted/gitops-with-argocd.md) -2. Learn how to configure control plane connection secrets -3. Manage workloads deployed to your control planes - -## Common Workflows - -### Workflow 1: Managing Control Planes with GitOps -Create and manage control planes themselves declaratively using provider-kubernetes: - -```yaml -apiVersion: kubernetes.crossplane.io/v1alpha2 -kind: Object -metadata: - name: my-controlplane -spec: - forProvider: - manifest: - apiVersion: spaces.upbound.io/v1beta1 - kind: ControlPlane - # ... control plane configuration -``` - -### Workflow 2: Managing Workloads on Control Planes -Deploy applications and resources to control planes using standard Kubernetes GitOps patterns: - -```yaml -apiVersion: v1 -kind: Namespace -metadata: - name: my-app ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: my-app - namespace: my-app -# ... deployment configuration -``` - -### Workflow 3: Managing Upbound Resources -Use provider-upbound to manage Upbound IAM and repository resources: - -- Teams -- Robots and their team memberships -- Repositories and permissions - -## Advanced Topics - -### Argo CD Plugin for Upbound -Learn more in the [ArgoCD Plugin guide](../self-hosted/use-argo.md) for enhanced integration with self-hosted Spaces. - -### Declarative Control Plane Creation -See [Declaratively create control planes](../self-hosted/declarative-ctps.md) for advanced automation patterns. - -### Consuming Control Plane APIs -Understand how to [consume control plane APIs in your app cluster](../mcp-connector-guide.md) with Argo CD. - -## Prerequisites - -Before implementing GitOps with control planes, ensure you have: - -**For Cloud Spaces:** -- Access to Upbound Cloud Spaces -- `up` CLI installed and configured -- API token with appropriate permissions -- Argo CD or similar GitOps controller running -- Familiarity with Kubernetes RBAC - -**For Self-Hosted Spaces:** -- Self-hosted Spaces deployed and running -- Argo CD deployed in your infrastructure -- Kubectl access to the cluster hosting Spaces -- Understanding of control plane architecture - -## Next Steps - -1. **Choose your deployment model** above -2. **Review the relevant getting started guide** -3. **Set up your GitOps controller** (Argo CD) -4. **Deploy your first automated control plane** -5. **Explore advanced topics** as needed - -:::tip -Start with simple deployments to test your GitOps workflow before moving to production. Use [simulations](../simulations.md) to preview changes before applying them. -::: diff --git a/spaces_versioned_docs/version-v1.15/howtos/backup-and-restore.md b/spaces_versioned_docs/version-v1.15/howtos/backup-and-restore.md deleted file mode 100644 index 3b8d026cb..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/backup-and-restore.md +++ /dev/null @@ -1,530 +0,0 @@ ---- -title: Backup and restore -sidebar_position: 13 -description: Configure and manage backups in your Upbound Space. -plan: "enterprise" ---- - - - -Upbound's _Shared Backups_ is a built-in backup and restore feature. Shared Backups lets you configure automatic schedules for taking snapshots of your control planes. You can restore data from these backups by making new control planes. This guide explains how to use Shared Backups for disaster recovery or upgrade scenarios. - -:::info API Version Information & Available Versions -This guide applies to **all supported versions** (v1.9-v1.15+). - -**Select your API version**: -- [Spaces API v1.15 (Latest)](../../../next/reference/apis/spaces-api/) -- [Spaces API v1.14](../../../1.14/reference/apis/spaces-api/) -- [Spaces API v1.13](../../../1.13/reference/apis/spaces-api/) -- [Spaces API v1.12](../../../1.12/reference/apis/spaces-api/) -- [Spaces API v1.11](../../../1.11/reference/apis/spaces-api/) -- [Spaces API v1.10](../../../1.10/reference/apis/spaces-api/) -- [Spaces API v1.9](../../../1.9/reference/apis/spaces-api/) - -For version support policy, see . version compatibility details, see the . -::: - -## Benefits - -The Shared Backups feature provides the following benefits: - -* Automatic backups for control planes without any operational overhead -* Backup schedules for multiple control planes in a group -* Shared Backups are available across all hosting environments of Upbound (Disconnected, Connected or Cloud Spaces) - - -## Configure a Shared Backup Config - - -[SharedBackupConfig][sharedbackupconfig] is a [group-scoped][group-scoped] resource. You should create them in a group containing one or more control planes. This resource configures the storage details and provider. Whenever a backup executes (either by schedule or manually initiated), it references a SharedBackupConfig to tell it where store the snapshot. - - -### Backup config provider - - -The `spec.objectStorage.provider` and `spec.objectStorage.config` fields configures: - -* The object storage provider -* The path to the provider -* The credentials needed to communicate with the provider - -You can only set one provider. Upbound currently supports AWS, Azure, and GCP as providers. - - -`spec.objectStorage.config` is a freeform map of configuration options for the object storage provider. See [Thanos object storage][thanos-object-storage] for more information on the formats for each supported cloud provider. `spec.bucket` and `spec.provider` overrides the required values in the config. - - - -#### AWS as a storage provider - -:::important -For Cloud Spaces, static credentials are currently the only supported auth method. -::: - -This example demonstrates how to use AWS as a storage provider for your backups: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupConfig -metadata: - name: default - namespace: default -spec: - objectStorage: - provider: AWS - bucket: spaces-backup-bucket - config: - endpoint: s3.eu-west-2.amazonaws.com - region: eu-west-2 - credentials: - source: Secret - secretRef: - name: bucket-creds - key: creds -``` - - -This example assumes you've already created an S3 bucket called "spaces-backup-bucket" in AWS `eu-west-2` region. The account credentials to access the bucket should exist in a secret of the same namespace as the Shared Backup Config. - -#### Azure as a storage provider - -:::important -For Cloud Spaces, static credentials are currently the only supported auth method. -::: - -This example demonstrates how to use Azure as a storage provider for your backups: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupConfig -metadata: - name: default - namespace: default -spec: - objectStorage: - provider: Azure - bucket: upbound-backups - config: - storage_account: upbackupstore - container: upbound-backups - endpoint: blob.core.windows.net - credentials: - source: Secret - secretRef: - name: bucket-creds - key: creds -``` - - -This example assumes you've already created an Azure storage account called `upbackupstore` and blob `upbound-backups`. The storage account key to access the blob should exist in a secret of the same namespace as the Shared Backup Config. - - -#### GCP as a storage provider - -:::important -For Cloud Spaces, static credentials are currently the only supported auth method. -::: - -This example demonstrates how to use Google Cloud Storage as a storage provider for your backups: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupConfig -metadata: - name: default - namespace: default -spec: - objectStorage: - provider: GCP - bucket: spaces-backup-bucket - credentials: - source: Secret - secretRef: - name: bucket-creds - key: creds -``` - - -This example assumes you've already created a Cloud bucket called "spaces-backup-bucket" and a service account with access to this bucket. The key file should exist in a secret of the same namespace as the Shared Backup Config. - - -## Configure a Shared Backup Schedule - - -[SharedBackupSchedule][sharedbackupschedule] is a [group-scoped][group-scoped-1] resource. You should create them in a group containing one or more control planes. This resource defines a backup schedule for control planes within its corresponding group. - -Below is an example of a Shared Backup Schedule that takes backups every day of all control planes having `environment: production` labels: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: daily-schedule - namespace: default -spec: - schedule: "@daily" - configRef: - kind: SharedBackupConfig - name: default - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production -``` - -### Define a schedule - -The `spec.schedule` field is a [Cron-formatted][cron-formatted] string. Some common examples are below: - - -| Entry | Description | -| ----------------- | ------------------------------------------------------------------------------------------------- | -| `@hourly` | Run once an hour. | -| `@daily` | Run once a day. | -| `@weekly` | Run once a week. | -| `0 0/4 * * *` | Run every 4 hours. | -| `0/15 * * * 1-5` | Run every fifteenth minute on Monday through Friday. | -| `@every 1h30m10s` | Run every 1 hour, 30 minutes, and 10 seconds. Hour is the largest measurement of time for @every. | - - -### Exclude resources from the backup - -The `spec.excludedResources` field is an array of resource names to exclude from each backup. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: daily-schedule -spec: - excludedResources: - - "xclusters.aws.platformref.upbound.io" - - "xdatabase.aws.platformref.upbound.io" - - "xrolepolicyattachment.iam.aws.crossplane.io" -``` - -:::warning -You must specify resource names in lowercase "resource.group" format (for example, `xclusters.aws.platformref.upbound.io`). Using only the resource kind (for example, `XCluster`) isn't supported. -::: - -### Suspend a schedule - -Use `spec.suspend` field to suspend the schedule. It creates no new backups, but allows running backups to complete. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: daily-schedule -spec: - suspend: true -``` - -### Set the time to live - -Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: daily-schedule -spec: - ttl: 168h # Backup is garbage collected after 7 days -``` -:::tip -By default, this setting doesn't delete uploaded files. Review the next section to define -the deletion policy. -::: - -### Define the deletion policy - -Set the `spec.deletionPolicy` to define backup deletion actions, including the -deletion of the backup file from the bucket. The Deletion Policy value defaults -to `Orphan`. Set it to `Delete` to remove uploaded files in the bucket. more -information on the backup and restore process, review the [Spaces API -documentation][spaces-api-documentation]. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: daily-schedule -spec: - ttl: 168h # Backup is garbage collected after 7 days - deletionPolicy: Delete # Defaults to Orphan -``` - -### Garbage collect backups when the schedule gets deleted - -Set the `spec.useOwnerReferencesInBackup` to garbage collect associated backups when a shared schedule gets deleted. If set to true, backups are garbage collected when the schedule gets deleted. - -### Control plane selection - -To configure which control planes in a group you want to create a backup schedule for, use the `spec.controlPlaneSelector` field. You can either use `labelSelectors` or the `names` of a control plane directly. A control plane matches if any of the label selectors match. - -This example matches all control planes in the group that have `environment: production` as a label: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: my-backup-schedule -spec: - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production -``` - -You can use the more complex `matchExpressions` to match labels based on an expression. This example matches control planes that have label `environment: production` or `environment: staging`: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: my-backup-schedule -spec: - controlPlaneSelector: - labelSelectors: - - matchExpressions: - - { key: environment, operator: In, values: [production,staging] } -``` - -You can also specify the names of control planes directly: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: my-backup-schedule -spec: - controlPlaneSelector: - names: - - controlplane-dev - - controlplane-staging - - controlplane-prod -``` - - -## Configure a Shared Backup - - - -[SharedBackup][sharedbackup] is a [group-scoped][group-scoped-2] resource. You should create them in a group containing one or more control planes. This resource causes a backups to occur for control planes within its corresponding group. - -Below is an example of a Shared Backup that takes a backup of all control planes having `environment: production` labels: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackup -metadata: - name: my-backup - namespace: default -spec: - configRef: - kind: SharedBackupConfig - name: default - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production -``` - -### Exclude resources from the backup - -The `spec.excludedResources` field is an array of resource names to exclude from each backup. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackup -metadata: - name: my-backup -spec: - excludedResources: - - "xclusters.aws.platformref.upbound.io" - - "xdatabase.aws.platformref.upbound.io" - - "xrolepolicyattachment.iam.aws.crossplane.io" -``` - -:::warning -You must specify resource names in lowercase "resource.group" format (for example, `xclusters.aws.platformref.upbound.io`). Using only the resource kind (for example, `XCluster`) isn't supported. -::: - -### Set the time to live - -Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackup -metadata: - name: my-backup -spec: - ttl: 168h # Backup is garbage collected after 7 days -``` - - -### Garbage collect backups on Shared Backup deletion - - - -Set the `spec.useOwnerReferencesInBackup` to define whether to garbage collect associated backups when a shared backup gets deleted. If set to true, backups are garbage collected when the shared backup gets deleted. - -### Control plane selection - -To configure which control planes in a group you want to create a backup for, use the `spec.controlPlaneSelector` field. You can either use `labelSelectors` or the `names` of a control plane directly. A control plane matches if any of the label selectors match. - -This example matches all control planes in the group that have `environment: production` as a label: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackup -metadata: - name: my-backup -spec: - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production -``` - -You can use the more complex `matchExpressions` to match labels based on an expression. This example matches control planes that have label `environment: production` or `environment: staging`: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackup -metadata: - name: my-backup -spec: - controlPlaneSelector: - labelSelectors: - - matchExpressions: - - { key: environment, operator: In, values: [production,staging] } -``` - -You can also specify the names of control planes directly: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackup -metadata: - name: my-backup -spec: - controlPlaneSelector: - names: - - controlplane-dev - - controlplane-staging - - controlplane-prod -``` - -## Create a manual backup - -[Backup][backup] is a [group-scoped][group-scoped-3] resource that causes a single backup to occur for a control planes in its corresponding group. - -Below is an example of a manual Backup of a control plane: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: Backup -metadata: - name: my-backup - namespace: default -spec: - configRef: - kind: SharedBackupConfig - name: default - controlPlane: my-awesome-ctp - deletionPolicy: Delete -``` - -The backup specification `DeletionPolicy` defines backup deletion actions, -including the deletion of the backup file from the bucket. The `Deletion Policy` -value defaults to `Orphan`. Set it to `Delete` to remove uploaded files -in the bucket. -For more information on the backup and restore process, review the [Spaces API documentation][spaces-api-documentation-1]. - - -### Choose a control plane to backup - -The `spec.controlPlane` field defines which control plane to execute a backup against. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: Backup -metadata: - name: my-backup - namespace: default -spec: - controlPlane: my-awesome-ctp -``` - -If the control plane doesn't exist, the backup fails after multiple failed retry attempts. - -### Exclude resources from the backup - -The `spec.excludedResources` field is an array of resource names to exclude from the manual backup. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: Backup -metadata: - name: my-backup -spec: - excludedResources: - - "xclusters.aws.platformref.upbound.io" - - "xdatabase.aws.platformref.upbound.io" - - "xrolepolicyattachment.iam.aws.crossplane.io" -``` - -:::warning -You must specify resource names in lowercase "resource.group" format (for example, `xclusters.aws.platformref.upbound.io`). Using only the resource kind (for example, `XCluster`) isn't supported. -::: - -### Set the time to live - -Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: Backup -metadata: - name: my-backup -spec: - ttl: 168h # Backup is garbage collected after 7 days -``` - -## Restore a control plane from a backup - -You can restore a control plane's state from a backup. Below is an example of creating a new control plane from a previous backup called `restore-me`: - - -```yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - name: my-awesome-restored-ctp - namespace: default -spec: - restore: - source: - kind: Backup - name: restore-me -``` - - -[group-scoped]: /spaces/concepts/groups -[group-scoped-1]: /spaces/concepts/groups -[group-scoped-2]: /spaces/concepts/groups -[group-scoped-3]: /spaces/concepts/groups -[sharedbackupconfig]: /reference/apis/spaces-api/latest -[thanos-object-storage]: https://thanos.io/tip/thanos/storage.md/ -[sharedbackupschedule]: /reference/apis/spaces-api/latest -[cron-formatted]: https://en.wikipedia.org/wiki/Cron -[spaces-api-documentation]: /reference/apis/spaces-api/v1_9 -[sharedbackup]: /reference/apis/spaces-api/latest -[backup]: /reference/apis/spaces-api/latest -[spaces-api-documentation-1]: /reference/apis/spaces-api/v1_9 - - - diff --git a/spaces_versioned_docs/version-v1.15/howtos/cloud-spaces/_category_.json b/spaces_versioned_docs/version-v1.15/howtos/cloud-spaces/_category_.json deleted file mode 100644 index 1e1869a38..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/cloud-spaces/_category_.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "label": "Cloud Spaces", - "position": 1, - "collapsed": true, - "customProps": { - "plan": "standard" - } -} - - diff --git a/spaces_versioned_docs/version-v1.15/howtos/cloud-spaces/dedicated-spaces-deployment.md b/spaces_versioned_docs/version-v1.15/howtos/cloud-spaces/dedicated-spaces-deployment.md deleted file mode 100644 index ebad9493e..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/cloud-spaces/dedicated-spaces-deployment.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: Dedicated Spaces -sidebar_position: 4 -description: A guide to Upbound Dedicated Spaces -plan: business ---- - - -## Benefits - -Dedicated Spaces offer the following benefits: - -- **Single-tenancy** A control plane space where Upbound guarantees you're the only tenant operating in the environment. -- **Connectivity to your private network** Establish secure network connections between your Dedicated Cloud Space running in Upbound and your own resources behind your private network. -- **Reduced Overhead.** Offload day-to-day operational burdens to Upbound while focusing on your job of building your platform. - -## Architecture - -A Dedicated Space is a deployment of the Upbound Spaces software inside an -Upbound-controlled cloud account and network. The control planes you run. - -The diagram below illustrates the high-level architecture of Upbound Dedicated Spaces: - -![Upbound Managed Spaces arch](/img/managed-arch-gcp.png) - -## How to get access to Dedicated Spaces - -If you have an interest in Upbound Dedicated Spaces, contact -[Upbound][contact-us]. We can chat more about your -requirements and see if Dedicated Spaces are a good fit for you. - -[contact-us]: https://www.upbound.io/contact-us -[managed-space]: /spaces/howtos/self-hosted/managed-spaces-deployment diff --git a/spaces_versioned_docs/version-v1.15/howtos/cloud-spaces/gitops-on-upbound.md b/spaces_versioned_docs/version-v1.15/howtos/cloud-spaces/gitops-on-upbound.md deleted file mode 100644 index fa59a8dce..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/cloud-spaces/gitops-on-upbound.md +++ /dev/null @@ -1,318 +0,0 @@ ---- -title: GitOps with Upbound Control Planes -sidebar_position: 80 -description: An introduction to doing GitOps with control planes on Upbound Cloud Spaces -tier: "business" ---- - -:::info Deployment Model -This guide applies to **Upbound Cloud Spaces** (Dedicated and Managed Spaces). For self-hosted Spaces deployments, see [GitOps with ArgoCD in Self-Hosted Spaces](/spaces/howtos/self-hosted/gitops-with-argocd/). -::: - -GitOps is an approach for managing a system by declaratively describing desired resources' configurations in Git and using controllers to realize the desired state. Upbound's control planes are compatible with this pattern and it's strongly recommended you integrate GitOps in the platforms you build on Upbound. - - -## Integrate with Argo CD - - -[Argo CD][argo-cd] is a project in the Kubernetes ecosystem commonly used for GitOps. You can use it in tandem with Upbound control planes to achieve GitOps flows. The sections below explain how to integrate these tools with Upbound. - -### Generate a kubeconfig for your control plane - -Use the up CLI to [generate a kubeconfig][generate-a-kubeconfig] for your control plane. - -```bash -up ctx /// -f - > context.yaml -``` - -### Create an API token - - -You need a personal access token (PAT). You create PATs on a per-user basis in the Upbound Console. Go to [My Account - API tokens][my-account-api-tokens] and select Create New Token. Give the token a name and save the secret value to somewhere safe. - - -### Add the up CLI init container to Argo - -Create a new file called `up-plugin-values.yaml` and paste the following YAML: - -```yaml -controller: - volumes: - - name: up-plugin - emptyDir: {} - - name: up-home - emptyDir: {} - - volumeMounts: - - name: up-plugin - mountPath: /usr/local/bin/up - subPath: up - - name: up-home - mountPath: /home/argocd/.up - - initContainers: - - name: up-plugin - image: xpkg.upbound.io/upbound/up-cli:v0.39.0 - command: ["cp"] - args: - - /usr/local/bin/up - - /plugin/up - volumeMounts: - - name: up-plugin - mountPath: /plugin - -server: - volumes: - - name: up-plugin - emptyDir: {} - - name: up-home - emptyDir: {} - - volumeMounts: - - name: up-plugin - mountPath: /usr/local/bin/up - subPath: up - - name: up-home - mountPath: /home/argocd/.up - - initContainers: - - name: up-plugin - image: xpkg.upbound.io/upbound/up-cli:v0.39.0 - command: ["cp"] - args: - - /usr/local/bin/up - - /plugin/up - volumeMounts: - - name: up-plugin - mountPath: /plugin -``` - -### Install or upgrade Argo using the values file - -Install or upgrade Argo via Helm, including the values from the `up-plugin-values.yaml` file: - -```bash -helm upgrade --install -n argocd -f up-plugin-values.yaml --reuse-values argocd argo/argo-cd -``` - - -### Configure Argo CD - - -To configure Argo CD for Annotation resource tracking, edit the Argo CD ConfigMap in the Argo CD namespace. -Add `application.resourceTrackingMethod: annotation` to the data section as below. -This configuration turns off Argo CD auto pruning, preventing the deletion of Crossplane resources. - -Next, configure the [auto respect RBAC for the Argo CD controller][auto-respect-rbac-for-the-argo-cd-controller]. -By default, Argo CD attempts to discover some Kubernetes resource types that don't exist in a control plane. -You must configure Argo CD to respect the cluster's RBAC rules so that Argo CD can sync. -Add `resource.respectRBAC: normal` to the data section as below. - -```bash -apiVersion: v1 -kind: ConfigMap -metadata: - name: argocd-cm -data: - ... - application.resourceTrackingMethod: annotation - resource.respectRBAC: normal -``` - -:::tip -The `resource.respectRBAC` configuration above tells Argo to respect RBAC for _all_ cluster contexts. If you're using an Argo CD instance to manage more than only control planes, you should consider changing the `clusters` string match for the configuration to apply only to control planes. For example, if every control plane context name followed the convention of being named `controlplane-`, you could set the string match to be `controlplane-*` -::: - - -### Create a cluster context definition - - -Replace the variables and run the following script to configure a new Argo cluster context definition. - -To configure Argo for a control plane in a Connected Space, replace `stringData.server` with the ingress URL of the control plane. This URL is what's outputted when using `up ctx`. - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: my-control-plane - namespace: argocd - labels: - argocd.argoproj.io/secret-type: cluster -type: Opaque -stringData: - name: my-control-plane-context - server: https://.spaces.upbound.io/apis/spaces.upbound.io/v1beta1/namespaces//controlplanes//k8s - config: | - { - "execProviderConfig": { - "apiVersion": "client.authentication.k8s.io/v1", - "command": "up", - "args": [ "org", "token" ], - "env": { - "ORGANIZATION": "", - "UP_TOKEN": "" - } - }, - "tlsClientConfig": { - "insecure": false, - "caData": "" - } - } -``` - - -## GitOps for Upbound resources - - -Like any other cloud service, you can drive the lifecycle of Upbound Cloud resources with Crossplane. This lets you establish GitOps flows to declaratively create and manage: - -- [control plane groups][control-plane-groups] -- [control planes][control-planes] -- [Upbound IAM resources][upbound-iam-resources] - -Use a control plane installed with [provider-upbound][provider-upbound] and [provider-kubernetes][provider-kubernetes] to achieve this. - -### Provider-upbound - -[Provider-upbound][provider-upbound-2] is a Crossplane provider built by Upbound to interact with Upbound resources. use _provider-upbound_ to declaratively create and manage the lifecycle of IAM resources and repositories: - -- [Robots][robots] and their membership to teams -- [Teams][teams] -- [Repositories][repositories] and [permissions][permissions] on those repositories. - -:::tip -This provider defines managed resources for control planes, their auth, and permissions. These resources only applicable for customers who run in Upbound's **Legacy Spaces** control plane hosting environments. Customers should use provider-kubernetes explained below to manage the lifecycle of control planes with Crossplane. -::: - -### Provider-kubernetes - -[Provider-kubernetes][provider-kubernetes-3] is a Crossplane provider that defines an [Object][object] resource. Use _Objects_ as general-purpose resources to wrap _any_ Kubernetes resource for Crossplane to manage. - -Upbound [Space APIs][space-apis] are Kube-like APIs and have implemented support for most Kubernetes-style API concepts. You can use kubectl or any other Kubernetes-compatible tooling to interact with the API. This means you can use _provider-kubernetes_ to drive interactions with Space APIs. - -:::warning -When interacting with a Cloud Space's API, the Kubernetes [watch][watch] feature **isn't implemented.** Argo CD requires _watch_ support to function as expected, meaning you can't point Argo directly at a Cloud Space until it's implemented. -::: - -Use _provider-kubernetes_ to declaratively drive interactions with all [Space APIs][space-apis-1]. Wrap the desired API resource in an _Object_. See the example below for a control plane: - -```yaml -apiVersion: kubernetes.crossplane.io/v1alpha2 -kind: Object -metadata: - name: my-controlplane -spec: - forProvider: - manifest: - apiVersion: spaces.upbound.io/v1beta1 - kind: ControlPlane - metadata: - name: my-controlplane - namespace: default - spec: - crossplane: - autoUpgrade: - channel: Rapid -``` - -[Control plane groups][control-plane-groups-2] are a special case because they technically map to an underlying Kubernetes namespace. You should create a `kind: namespace` with the `spaces.upbound.io/group` label to create a control plane group in a Space. See the example below: - -```yaml -apiVersion: kubernetes.crossplane.io/v1alpha2 -kind: Object -metadata: - name: group1 -spec: - forProvider: - manifest: - apiVersion: v1 - kind: Namespace - metadata: - name: group1 - labels: - spaces.upbound.io/group: "true" - spec: {} -``` - -### Configure auth for provider-kubernetes - -Like any other Crossplane provider, _provider-kubernetes_ requires a valid [ProviderConfig][providerconfig] to authenticate with Upbound before interacting with its APIs. Follow the steps below to configure auth for a ProviderConfig on a control plane that you want to use to interact with Upbound resources. - -1. Define an environment variable for the name of your Upbound org account. Use `up org list` to retrieve this value. -```ini -export UPBOUND_ACCOUNT="" -``` - -2. Create a [personal access token][personal-access-token] and store it as an environment variable. -```shell -export UPBOUND_TOKEN="" -``` - -3. Log on to Upbound. -```shell -up login -``` - -4. Create a kubeconfig for the desired Cloud Space instance you want to interact with. -```shell -export CONTROLPLANE_CONFIG=/tmp/controlplane-kubeconfig -KUBECONFIG=$CONTROLPLANE_CONFIG up ctx $UPBOUND_ACCOUNT/upbound-gcp-us-west-1 # Replace this path with whichever Cloud Space you want to communicate with. -``` - -5. On the control plane you want to use to interact with Upbound resources, create a secret containing the credentials: -```shell -kubectl -n crossplane-system create secret generic cluster-config --from-file=kubeconfig=$CONTROLPLANE_CONFIG -kubectl -n crossplane-system create secret generic upbound-credentials --from-literal=token=$UPBOUND_TOKEN -``` - -6. Create a ProviderConfig that references the credentials created in the prior step. Create this resource in your control plane: -```yaml -apiVersion: kubernetes.crossplane.io/v1alpha1 -kind: ProviderConfig -metadata: - name: default -spec: - credentials: - source: Secret - secretRef: - namespace: crossplane-system - name: cluster-config - key: kubeconfig - identity: - type: UpboundTokens - source: Secret - secretRef: - name: upbound-credentials - namespace: crossplane-system - key: token -``` - -You can now create _Objects_ in the control plane which wrap Space APIs. - -[generate-a-kubeconfig]: /manuals/cli/concepts/contexts -[control-plane-groups]: /spaces/concepts/groups -[control-planes]: /spaces/concepts/control-planes -[upbound-iam-resources]: /manuals/platform/concepts/identity-management -[space-apis]: /reference/apis/spaces-api/v1_9 -[space-apis-1]: /reference/apis/spaces-api/v1_9 -[control-plane-groups-2]: /spaces/concepts/groups - - -[argo-cd]: https://argo-cd.readthedocs.io/en/stable/ -[my-account-api-tokens]: https://accounts.upbound.io/settings/tokens -[auto-respect-rbac-for-the-argo-cd-controller]: https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#auto-respect-rbac-for-controller -[spec-writeconnectionsecrettoref]: /reference/apis/spaces-api/latest -[auto-respect-rbac-for-the-argo-cd-controller-1]: https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#auto-respect-rbac-for-controller -[provider-upbound]: https://marketplace.upbound.io/providers/upbound/provider-upbound -[provider-kubernetes]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes -[provider-upbound-2]: https://marketplace.upbound.io/providers/upbound/provider-upbound -[robots]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/iam.upbound.io/Robot/v1alpha1 -[teams]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/iam.upbound.io/Team/v1alpha1 -[repositories]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/repository.upbound.io/Repository/v1alpha1 -[permissions]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/repository.upbound.io/Permission/v1alpha1 -[provider-kubernetes-3]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes -[object]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes/v0.17.0/resources/kubernetes.crossplane.io/Object/v1alpha2 -[watch]: https://kubernetes.io/docs/reference/using-api/api-concepts/#watch-bookmarks -[providerconfig]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes/v0.17.0/resources/kubernetes.crossplane.io/ProviderConfig/v1alpha1 -[personal-access-token]: https://accounts.upbound.io/settings/tokens diff --git a/spaces_versioned_docs/version-v1.15/howtos/control-plane-topologies.md b/spaces_versioned_docs/version-v1.15/howtos/control-plane-topologies.md deleted file mode 100644 index 9020e5a41..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/control-plane-topologies.md +++ /dev/null @@ -1,566 +0,0 @@ ---- -title: Control Plane Topologies -sidebar_position: 15 -description: Configure scheduling of composites to remote control planes ---- - -:::info API Version Information -This guide is for the Control Plane Topology feature, which is in **private preview**. interested customers with access to this feature, it applies to v1.12+. - -For related API specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). details on feature availability, see the . -::: - -:::important -This feature is in private preview for select customers in Upbound Spaces. If you're interested in this deployment mode, please [contact us](https://www.upbound.io/support/contact). -::: - -Upbound's _Control Plane Topology_ feature lets you build and deploy a platform -of multiple control planes. These control planes work together for a unified platform -experience. - - -With the _Topology_ feature, you can install resource APIs that are -reconciled by other control planes and configure the routing that occurs between -control planes. You can also build compositions that reference other resources -running on your control plane or elsewhere in Upbound. - -This guide explains how to use Control Plane Topology APIs to install, configure -remote APIs, and build powerful compositions that reference other resources. - -## Benefits - -The Control Plane Topology feature provides the following benefits: - -* Decouple your platform architecture into independent offerings to improve your platform's software development lifecycle. -* Install composite APIs from Configurations as CRDs which are fulfilled and reconciled by other control planes. -* Route APIs to other control planes by configuring an _Environment_ resource, which define a set of routable dimensions. - -## How it works - - -Imagine the scenario where you want to let a user reference a subnet when creating a database instance. To your control plane, the `kind: database` and `kind: subnet` are independent resources. To you as the composition author, these resources have an important relationship. It may be that: - -- you don't want your user to ever be able to create a database without specifying a subnet. -- you want to let them create a subnet when they create the database, if it doesn't exist. -- you want to allow them to reuse a subnet that got created elsewhere or gets shared by another user. - -In each of these scenarios, you must resort to writing complex composition logic -to handle each case. The problem is compounded when the resource exists in a -context separate from the current control plane's context. Imagine a scenario -where one control plane manages Database resources and a second control plane -manages networking resources. With the _Topology_ feature, you can offload these -concerns to Upbound machinery. - - -![Control Plane Topology feature arch](/img/topology-arch.png) - -## Prerequisites - -Enable the Control Plane Topology feature in the Space you plan to run your control plane in: - -- Cloud Spaces: Not available yet -- Connected Spaces: Space administrator must enable this feature -- Disconnected Spaces: Space administrator must enable this feature - - - -## Compose resources with _ReferencedObjects_ - - - -_ReferencedObject_ is a resource type available in an Upbound control plane that lets you reference other Kubernetes resources in Upbound. - -:::tip -This feature is useful for composing resources that exist in a -remote context, like another control plane. You can also use -_ReferencedObjects_ to resolve references to any other Kubernetes object -in the current control plane context. This could be a secret, another Crossplane -resource, or more. -::: - -### Declare the resource reference in your XRD - -To compose a _ReferencedObject_, you should start by adding a resource reference -in your Composite Resource Definition (XRD). The convention for the resource -reference follows the shape shown below: - -```yaml -Ref: - type: object - properties: - apiVersion: - type: string - default: "" - enum: [ "" ] - kind: - type: string - default: "" - enum: [ "" ] - grants: - type: array - default: [ "Observe" ] - items: - type: string - enum: [ "Observe", "Create", "Update", "Delete", "*" ] - name: - type: string - namespace: - type: string - required: - - name -``` - -The `Ref` should be the kind of resource you want to reference. The `apiVersion` and `kind` should be the associated API version and kind of the resource you want to reference. - -The `name` and `namespace` strings are inputs that let your users specify the resource instance. - -#### Grants - -The `grants` field is a special array that lets you give users the power to influence the behavior of the referenced resource. You can configure which of the available grants you let your user select and which it defaults to. Similar in behavior as [Crossplane management policies][crossplane-management-policies], each grant value does the following: - -- **Observe:** The composite may observe the state of the referenced resource. -- **Create:** The composite may create the referenced resource if it doesn't exist. -- **Update:** The composite may update the referenced resource. -- **Delete:** The composite may delete the referenced resource. -- **\*:** The composite has full control over the referenced resource. - -Here are some examples that show how it looks in practice: - -
- -Show example for defining the reference to another composite resource - -```yaml -apiVersion: apiextensions.crossplane.io/v1 -kind: CompositeResourceDefinition -metadata: - name: xsqlinstances.database.platform.upbound.io -spec: - type: object - properties: - parameters: - type: object - properties: - networkRef: - type: object - properties: - apiVersion: - type: string - default: "networking.platform.upbound.io" - enum: [ "networking.platform.upbound.io" ] - grants: - type: array - default: [ "Observe" ] - items: - type: string - enum: [ "Observe" ] - kind: - type: string - default: "Network" - enum: [ "Network" ] - name: - type: string - namespace: - type: string - required: - - name -``` - -
- - -
-Show example for defining the reference to a secret -```yaml -apiVersion: apiextensions.crossplane.io/v1 -kind: CompositeResourceDefinition -metadata: - name: xsqlinstances.database.platform.upbound.io -spec: - type: object - properties: - parameters: - type: object - properties: - secretRef: - type: object - properties: - apiVersion: - type: string - default: "v1" - enum: [ "v1" ] - grants: - type: array - default: [ "Observe" ] - items: - type: string - enum: [ "Observe", "Create", "Update", "Delete", "*" ] - kind: - type: string - default: "Secret" - enum: [ "Secret" ] - name: - type: string - namespace: - type: string - required: - - name -``` -
- -### Manually add the jsonPath - -:::important -This step is a known limitation of the preview. We're working on tooling that -removes the need for authors to do this step. -::: - -During the preview timeframe of this feature, you must add an annotation by hand -to the XRD. In your XRD's `metadata.annotations`, set the -`references.upbound.io/schema` annotation. It should be a JSON string in the -following format: - -```json -{ - "apiVersion": "references.upbound.io/v1alpha1", - "kind": "ReferenceSchema", - "references": [ - { - "jsonPath": ".spec.parameters.secretRef", - "kinds": [ - { - "apiVersion": "v1", - "kind": "Secret" - } - ] - } - ] -} -``` - -Flatten this JSON into a string and set the annotation on your XRD. View the -example below for an illustration: - -
-Show example setting the references.upbound.io/schema annotation -```yaml -apiVersion: apiextensions.crossplane.io/v1 -kind: CompositeResourceDefinition -metadata: - name: xthings.networking.acme.com - annotations: - references.upbound.io/schema: '{"apiVersion":"references.upbound.io/v1alpha1","kind":"ReferenceSchema","references":[{"jsonPath":".spec.secretRef","kinds":[{"apiVersion":"v1","kind":"Secret"}]},{"jsonPath":".spec.configMapRef","kinds":[{"apiVersion":"v1","kind":"ConfigMap"}]}]}' -``` -
- -
-Show example for setting multiples references in the references.upbound.io/schema annotation -```yaml -apiVersion: apiextensions.crossplane.io/v1 -kind: CompositeResourceDefinition -metadata: - name: xthings.networking.acme.com - annotations: - references.upbound.io/schema: '{"apiVersion":"references.upbound.io/v1alpha1","kind":"ReferenceSchema","references":[{"jsonPath":".spec.parameters.secretRef","kinds":[{"apiVersion":"v1","kind":"Secret"}]},{"jsonPath":".spec.parameters.configMapRef","kinds":[{"apiVersion":"v1","kind":"ConfigMap"}]}]}' -``` -
- - -You can use a VSCode extension like [vscode-pretty-json][vscode-pretty-json] to make this task easier. - - -### Compose a _ReferencedObject_ - -To pair with the resource reference declared in your XRD, you must compose the referenced resource. Use the _ReferencedObject_ resource type to bring the resource into your composition. _ReferencedObject_ has the following schema: - -```yaml -apiVersion: references.upbound.io/v1alpha1 -kind: ReferencedObject -spec: - managementPolicies: - - Observe - deletionPolicy: Orphan - composite: - apiVersion: - kind: - name: - jsonPath: .spec.parameters.secretRef -``` - -The `spec.composite.apiVersion` and `spec.composite.kind` should match the API version and kind of the `compositeTypeRef` declared in your composition. The `spec.composite.name` should be the name of the composite resource instance. - -The `spec.composite.jsonPath` should be the path to the root of the resource ref you declared in your XRD. - -
-Show example for composing a resource reference to a secret - -```yaml -apiVersion: apiextensions.crossplane.io/v1 -kind: Composition -metadata: - name: demo-composition -spec: - compositeTypeRef: - apiVersion: networking.acme.com/v1alpha1 - kind: XThing - mode: Pipeline - pipeline: - - step: patch-and-transform - functionRef: - name: crossplane-contrib-function-patch-and-transform - input: - apiVersion: pt.fn.crossplane.io/v1beta1 - kind: Resources - resources: - - name: secret-ref-object - base: - apiVersion: references.upbound.io/v1alpha1 - kind: ReferencedObject - spec: - managementPolicies: - - Observe - deletionPolicy: Orphan - composite: - apiVersion: networking.acme.com/v1alpha1 - kind: XThing - name: TO_BE_PATCHED - jsonPath: .spec.parameters.secretRef - patches: - - type: FromCompositeFieldPath - fromFieldPath: metadata.name - toFieldPath: spec.composite.name -``` -
- -By declaring a resource reference in your XRD, Upbound handles resolution of the desired resource. - -## Deploy APIs - -To configure routing resource requests between control planes, you need to deploy APIs in at least two control planes. - -### Deploy into a service-level control plane - -Package the APIs you build into a Configuration package an deploy it on a -control plane in an Upbound Space. In Upbound, it's common to refer to the -control plane where the Configuration package is deployed as a **service-level -control plane**. This control plane runs the controllers that processes the API -requests and provisions underlying resources. In a later section, you learn how -you can use _Topology_ features to [configure routing][configure-routing]. - -### Deploy as Remote APIs on a platform control plane - -You should use the same package source as deployed in the **service-level -control planes**, but this time deploy the Configuration in a separate control -plane as a _RemoteConfiguration_. The _RemoteConfiguration_ installs Kubernetes -CustomResourceDefinitions for the APIs defined in the Configuration package, but -no controllers get deployed. - -### Install a _RemoteConfiguration_ - -_RemoteConfiguration_ is a resource type available in an Upbound manage control -planes that acts like a sort of Crossplane [Configuration][configuration] -package. Unlike standard Crossplane Configurations, which install XRDs, -compositions, and functions into a desired control plane, _RemoteConfigurations_ -install only the CRDs for claimable composite resource types. - -#### Install directly - -Install a _RemoteConfiguration_ by defining the following and applying it to -your control plane: - -```yaml -apiVersion: pkg.upbound.io/v1alpha1 -kind: RemoteConfiguration -metadata: - name: -spec: - package: -``` - -#### Declare as a project dependency - -You can declare _RemoteConfigurations_ as dependencies in your control plane's -[project file][project-file]. Use the up CLI to add the dependency, providing -the `--remote` flag: - -```tsx live -up dep add --remote -``` - -This command adds a declaration in the `spec.apiDependencies` stanza of your -project's `upbound.yaml` as demonstrated below: - -```yaml -apiVersion: meta.dev.upbound.io/v1alpha1 -kind: Project -metadata: - name: service-controlplane -spec: - apiDependencies: - - configuration: xpkg.upbound.io/upbound/remote-configuration - version: '>=v0.0.0' - dependsOn: - - provider: xpkg.upbound.io/upbound/provider-kubernetes - version: '>=v0.0.0' -``` - -Like a Configuration, a _RemoteConfigurationRevision_ gets created when the -package gets installed on a control plane. Unlike Configurations, XRDs and -compositions **don't** get installed by a _RemoteConfiguration_. Only the CRDs -for claimable composite types get installed and Crossplane thereafter manages -their lifecycle. You can tell when a CRD gets installed by a -_RemoteConfiguration_ because it has the `internal.scheduling.upbound.io/remote: -true` label: - -```yaml -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: things.networking.acme.com - labels: - internal.scheduling.upbound.io/remote: "true" -``` - -## Use an _Environment_ to route resources - -_Environment_ is a resource type available in Upbound control planes that works -in tandem with resources installed by _RemoteConfigurations_. _Environment_ is a -namespace-scoped resource that lets you configure how to route remote resources -to other control planes by a set of user-defined dimensions. - -### Define a routing dimension - -To establish a routing dimensions between two control planes, you must do two -things: - -1. Annotate the service control plane with the name and value of a dimension. -2. Configure an environment on another control plane with a dimension matching the field and value of the service control plane. - -The example below demonstrates the creation of a service control plane with a -`region` dimension: - -```yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - labels: - dimension.scheduling.upbound.io/region: "us-east-1" - name: prod-1 - namespace: default -spec: -``` - -Upbound's Spaces controller keeps an inventory of all declared dimensions and -listens for control planes to route to them. - -### Create an _Environment_ - -Next, create an _Environment_ on a separate control plane, referencing the -dimension from before. The example below demonstrates routing all remote -resource requests in the `default` namespace of the control plane based on a -single `region` dimension: - -```yaml -apiVersion: scheduling.upbound.io/v1alpha1 -kind: Environment -metadata: - name: default - namespace: default -spec: - dimensions: - region: us-east-1 -``` - -You can specify whichever dimensions as you want. The example below demonstrates -multiple dimensions: - -```yaml -apiVersion: scheduling.upbound.io/v1alpha1 -kind: Environment -metadata: - name: default - namespace: default -spec: - dimensions: - region: us-east-1 - env: prod - offering: databases -``` - -In order for the routing controller to match, _all_ dimensions must match for a -given service control plane. - -You can specify dimension overrides on a per-resource group basis. This lets you -configure default routing rules for a given _Environment_ and override routing -on a per-offering basis. - -```yaml -apiVersion: scheduling.upbound.io/v1alpha1 -kind: Environment -metadata: - name: default - namespace: default -spec: - dimensions: - region: us-east-1 - resourceGroups: - - name: database.platform.upbound.io # database - dimensions: - region: "us-east-1" - env: "prod" - offering: "databases" - - name: networking.platform.upbound.io # networks - dimensions: - region: "us-east-1" - env: "prod" - offering: "networks" -``` - -### Confirm the configured route - -After you create an _Environment_ on a control plane, the routes selected get -reported in the _Environment's_ `.status.resourceGroups`. This is illustrated -below: - -```yaml -apiVersion: scheduling.upbound.io/v1alpha1 -kind: Environment -metadata: - name: default -... -status: - resourceGroups: - - name: database.platform.upbound.io # database - proposed: - controlPlane: ctp-1 - group: default - space: upbound-gcp-us-central1 - dimensions: - region: "us-east-1" - env: "prod" - offering: "databases" -``` - -If you don't see a response in the `.status.resourceGroups`, this indicates a -match wasn't found or an error establishing routing occurred. - -:::tip -There's no limit to the number of control planes you can route to. You can also -stack routing and form your own topology of control planes, with multiple layers -of routing. -::: - -### Limitations - - -Routing from one control plane to another is currently scoped to control planes -that exist in a single Space. You can't route resource requests to control -planes that exist on a cross-Space boundary. - - -[project-file]: /manuals/cli/howtos/project -[contact-us]: https://www.upbound.io/usage/support/contact -[crossplane-management-policies]: https://docs.crossplane.io/latest/managed-resources/managed-resources/#managementpolicies -[vscode-pretty-json]: https://marketplace.visualstudio.com/items?itemName=chrismeyers.vscode-pretty-json -[configure-routing]: #use-an-environment-to-route-resources -[configuration]: https://docs.crossplane.io/latest/packages/providers diff --git a/spaces_versioned_docs/version-v1.15/howtos/ctp-connector.md b/spaces_versioned_docs/version-v1.15/howtos/ctp-connector.md deleted file mode 100644 index b2cc48c49..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/ctp-connector.md +++ /dev/null @@ -1,508 +0,0 @@ ---- -title: Control Plane Connector -weight: 80 -description: A guide for how to connect a Kubernetes app cluster to a control plane in Upbound using the Control Plane connector feature -plan: "standard" ---- - - - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). Control Plane Connector functionality is consistent across versions. - -For related API specifications and resources, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: - -Control Plane Connector connects arbitrary Kubernetes application clusters outside the -Upbound Spaces to your control planes running in Upbound Spaces. -This lets you interact with your control plane's API from the app cluster. The claim APIs and the namespaced XR APIs -you define via CompositeResourceDefinitions (XRDs) in the control plane, are available in -your app cluster alongside Kubernetes workload APIs like Pod. Control Plane Connector -enables the same experience as a locally installed Crossplane. - -![control plane connector operations flow](/img/ConnectorFlow.png) - -### Control Plane Connector operations - -Control Plane Connector leverages the [Kubernetes API AggregationLayer][kubernetes-api-aggregationlayer] -to create an extension API server and serve the claim APIs and the namespaced XR APIs in the control plane. It -discovers the claim APIs and the namespaced XR APIs available in the control plane and registers corresponding -APIService resources on the app cluster. Those APIService resources refer to the -extension API server of Control Plane Connector. - -The claim APIs and the namespaced XR APIs are available in your Kubernetes cluster, just like all native -Kubernetes APIs. - -The Control Plane Connector processes every request targeting the claim APIs and the namespaced XR APIs and makes the -relevant requests to the connected control plane. - -Only the connected control plane stores and processes all claims and namespaced XRs created in the app -cluster, eliminating any storage use at the application cluster. The control plane -connector provisions a target namespace at the control plane for the app cluster and stores -all claims and namespaced XRs in this target namespace. - -For managing the claims and namespaced XRs, the Control Plane Connector creates a unique identifier for a -resource by combining input parameters from claims, including: -- `metadata.name` -- `metadata.namespace` -- `your cluster name` - - -It employs SHA-256 hashing to generate a hash value and then extracts the first -16 characters of that hash. This ensures the resulting identifier remains within -the 64-character limit in Kubernetes. - - - -For instance, if a claim named `my-bucket` exists in the test namespace in -`cluster-dev`, the system calculates the SHA-256 hash from -`my-bucket-x-test-x-00000000-0000-0000-0000-000000000000` and takes the first 16 -characters. The control plane side then names the claim `claim-c603e518969b413e`. - -For namespaced XRs, the process is similar, only the prefix is different. -The name becomes `nxr-c603e518969b413e`. - - -### Installation - - - - - -Log in with the up CLI: - -```bash -up login -``` - -Connect your app cluster to a namespace in an Upbound control plane with `up controlplane connector install `. This command creates a user token and installs the Control Plane Connector to your cluster. It's recommended you create a values file called `connector-values.yaml` and provide the following below. Select the tab according to which environment your control plane is running in. - - - - - - -```yaml -upbound: - # This is your org account in Upbound e.g. the name displayed after executing `up org list` - account: - # This is a personal access token generated in the Upbound Console - token: - -spaces: - # If your control plane is running in Upbound's GCP Cloud Space, else use upbound-aws-us-east-1.spaces.upbound.io - host: "upbound-gcp-us-west-1.spaces.upbound.io" - insecureSkipTLSVerify: true - controlPlane: - # The name of the control plane you want the Connector to attach to - name: - # The control plane group the control plane resides in - group: - # The namespace within the control plane to sync claims from the app cluster to. NOTE: This must be created before you install the connector. - claimNamespace: -``` - - - - - -1. Create a [kubeconfig][kubeconfig] for the control plane. Update your Upbound context to the path for your desired control plane. -```ini -up login -up ctx /upbound-gcp-us-central-1/default/your-control-plane -up ctx . -f - > context.yaml -``` - -2. Write it to a secret in the cluster where you plan to -install the Control Plane Connector to. -```ini -kubectl create secret generic my-controlplane-kubeconfig --from-file=context.yaml -``` - -3. Reference this secret in the -`spaces.controlPlane.kubeconfigSecret` field below. - -```yaml -spaces: - controlPlane: - # The namespace within the control plane to sync claims from the app cluster to. NOTE: This must be created before you install the connector. - claimNamespace: - kubeconfigSecret: - name: my-controlplane-kubeconfig - key: kubeconfig -``` - - - - - - -Provide the values file above when you run the CLI command: - - -```bash {copy-lines="3"} -up controlplane connector install my-control-plane my-app-ns-1 --file=connector-values.yaml -``` - -The Claim APIs and the namespaced XR APIs from your control plane are now visible in the cluster. -You can verify this with `kubectl api-resources`. - -```bash -kubectl api-resources -``` - -### Uninstall - -Disconnect an app cluster that you prior installed the Control Plane Connector on by -running the following: - -```bash -up ctp connector uninstall -``` - -This command uninstalls the helm chart for the Control Plane Connector from an app -cluster. It moves any claims in the app cluster into the control plane -at the specified namespace. - -:::tip -Make sure your kubeconfig's current context is pointed at the app cluster where -you want to uninstall Control Plane Connector from. -::: - - - - -It's recommended you create a values file called `connector-values.yaml` and -provide the following below. Select the tab according to which environment your -control plane is running in. - - - - - - -```yaml -upbound: - # This is your org account in Upbound e.g. the name displayed after executing `up org list` - account: - # This is a personal access token generated in the Upbound Console - token: - -spaces: - # Upbound GCP US-West-1 upbound-gcp-us-west-1.spaces.upbound.io - # Upbound AWS US-East-1 upbound-aws-us-east-1.spaces.upbound.io - # Upbound GCP US-Central-1 upbound-gcp-us-central-1.spaces.upbound.io - host: "" - insecureSkipTLSVerify: true - controlPlane: - # The name of the control plane you want the Connector to attach to - name: - # The control plane group the control plane resides in - group: - # The namespace within the control plane to sync claims from the app cluster to. - # NOTE: This must be created before you install the connector. - claimNamespace: -``` - - - - -Create a [kubeconfig][kubeconfig-1] for the -control plane. Write it to a secret in the cluster where you plan to -install the Control Plane Connector to. Reference this secret in the -`spaces.controlPlane.kubeconfigSecret` field below. - -```yaml -spaces: - controlPlane: - # The namespace within the control plane to sync claims from the app cluster to. NOTE: This must be created before you install the connector. - claimNamespace: - kubeconfigSecret: - name: my-controlplane-kubeconfig - key: kubeconfig -``` - - - - - - -Provide the values file above when you `helm install` the Control Plane Connector: - - -```bash -helm install --wait mcp-connector oci://xpkg.upbound.io/spaces-artifacts/mcp-connector -n kube-system -f connector-values.yaml -``` -:::tip -Create an API token from the Upbound user account settings page in the console by following [these instructions][these-instructions]. -::: - -### Uninstall - -You can uninstall Control Plane Connector with Helm by running the following: - -```bash -helm uninstall mcp-connector -``` - - - - - -### Example usage - -This example creates a control plane using [Configuration -EKS][configuration-eks]. `KubernetesCluster` is -available as a claim API in your control plane. The following is [an -example][an-example] -object you can create in your control plane. - -```yaml -apiVersion: k8s.starter.org/v1alpha1 -kind: KubernetesCluster -metadata: - name: my-cluster - namespace: default -spec: - id: my-cluster - parameters: - nodes: - count: 3 - size: small - services: - operators: - prometheus: - version: "34.5.1" - writeConnectionSecretToRef: - name: my-cluster-kubeconfig -``` - -After connecting your Kubernetes app cluster to the control plane, you -can create the `KubernetesCluster` object in your app cluster. Although your -local cluster has an Object, the actual resources is in your managed control -plane inside Upbound. - -```bash {copy-lines="3"} -# Applying the claim YAML above. -# kubectl is set up to talk with your Kubernetes cluster. -kubectl apply -f claim.yaml - - -kubectl get claim -A -NAME SYNCED READY CONNECTION-SECRET AGE -my-cluster True True my-cluster-kubeconfig 2m -``` - -Once Kubernetes creates the object, view the console to see your object. - -![Claim by connector in console](/img/ClaimInConsole.png) - -You can interact with the object through your cluster just as if it -lives in your cluster. - -### Migration to control planes - -This guide details the migration of a Crossplane installation to Upbound-managed -control planes using the Control Plane Connector to manage claims on an application -cluster. - -![migration flow application cluster to control plane](/img/ConnectorMigration.png) - -#### Export all resources - -Before proceeding, ensure that you have set the correct kubecontext for your application -cluster. - -```bash -up controlplane migration export --pause-before-export --output=my-export.tar.gz --yes -``` - -This command performs the following: -- Pauses all claim, composite, and managed resources before export. -- Scans the control plane for resource types. -- Exports Crossplane and native resources. -- Archives the exported state into `my-export.tar.gz`. - -Example output: -```bash -Exporting control plane state... - ✓ Pausing all claim resources before export... 1 resources paused! ⏸️ - ✓ Pausing all composite resources before export... 7 resources paused! ⏸️ - ✓ Pausing all managed resources before export... 34 resources paused! ⏸️ - ✓ Scanning control plane for types to export... 231 types found! 👀 - ✓ Exporting 231 Crossplane resources...125 resources exported! 📤 - ✓ Exporting 3 native resources...19 resources exported! 📤 ✓ Archiving exported state... archived to "my-export.tar.gz"! 📦 - -Successfully exported control plane state! -``` - -#### Import all resources - -The system restores the target control plane with the exported -resources, which serves as the destination for the Control Plane Connector. - - -Log into Upbound and select the correct context: - -```bash -up login -up ctx -up ctp create ctp-a -``` - -Output: -```bash -ctp-a created -``` - -Verify that the Crossplane version on both the application cluster and the new managed -control plane matches the core Crossplane version. - -Use the following command to import the resources: -```bash -up controlplane migration import -i my-export.tar.gz \ - --unpause-after-import \ - --mcp-connector-cluster-id=my-appcluster \ - --mcp-connector-claim-namespace=my-appcluster -``` - -This command: -- Note: `--mcp-connector-cluster-id` needs to be unique per application cluster -- Note: `--mcp-connector-claim-namespace` is the namespace the system creates - during the import -- Restores base resources -- Waits for XRDs and packages to establish -- Imports Claims, XRs resources -- Finalizes the import and resumes managed resources - -Example output: -```bash -Importing control plane state... - ✓ Reading state from the archive... Done! 👀 - ✓ Importing base resources... 56 resources imported!📥 - ✓ Waiting for XRDs... Established! ⏳ - ✓ Waiting for Packages... Installed and Healthy! ⏳ - ✓ Importing remaining resources... 88 resources imported! 📥 - ✓ Finalizing import... Done! 🎉 - ✓ Unpausing managed resources ... Done! ▶️ - -fully imported control plane state! -``` - -Verify Imported Claims - - -The Control Plane Connector renames all claims and adds additional labels to them. - -```bash -kubectl get claim -A -``` - -Example output: -```bash -NAMESPACE NAME SYNCED READY CONNECTION-SECRET AGE -my-appcluster cluster.aws.platformref.upbound.io/claim-e708ff592b974f51 True True platform-ref-aws-kubeconfig 3m17s -``` - -Inspect the labels: -```bash -kubectl get -n my-appcluster cluster.aws.platformref.upbound.io/claim-e708ff592b974f51 -o yaml | yq .metadata.labels -``` - -Example output: -```bash -mcp-connector.upbound.io/app-cluster: my-appcluster -mcp-connector.upbound.io/app-namespace: default -mcp-connector.upbound.io/app-resource-name: example -``` - -#### Cleanup the app cluster - -Remove all Crossplane-related resources from the application cluster, including: - -- Managed Resources -- Claims -- Compositions -- XRDs -- Packages (Functions, Configurations, Providers) -- Crossplane and all associated CRDs - - -#### Install Control Plane Connector - - -Follow the preceding installation guide and configure the `connector-values.yaml`: - -```yaml -# NOTE: clusterID needs to match --mcp-connector-cluster-id used in the import on the managed control Plane -clusterID: my-appcluster -upbound: - account: - token: - -spaces: - host: "" - insecureSkipTLSVerify: true - controlPlane: - name: - group: - # NOTE: This is the --mcp-connector-claim-namespace used during the import to the control plane - claimNamespace: -``` -Once the Control Plane Connector installs, verify that resources exist in the application -cluster: - -```bash -kubectl api-resources | grep platform -``` - -Example output: -```bash -awslbcontrollers aws.platform.upbound.io/v1alpha1 true AWSLBController -podidentities aws.platform.upbound.io/v1alpha1 true PodIdentity -sqlinstances aws.platform.upbound.io/v1alpha1 true SQLInstance -clusters aws.platformref.upbound.io/v1alpha1 true Cluster -osss observe.platform.upbound.io/v1alpha1 true Oss -apps platform.upbound.io/v1alpha1 true App -``` - -Restore claims from the control plane to the application cluster: - -```bash -kubectl get claim -A -``` - -Example output: -```bash -NAMESPACE NAME SYNCED READY CONNECTION-SECRET AGE -default cluster.aws.platformref.upbound.io/example True True platform-ref-aws-kubeconfig 127m -``` - -With this guide, you migrated your Crossplane installation to -Upbound-control planes. This ensures seamless integration with your -application cluster using the Control Plane Connector. - -### Connect multiple app clusters to a control plane - -Claims are store in a unique namespace in the Upbound control plane. -Every cluster creates a new control plane namespace. - -![Multi-cluster architecture with control plane connector](/img/ConnectorMulticlusterArch.png) - -There's no limit on the number of clusters connected to a single control plane. -Control plane operators can see all their infrastructure in a central control -plane. - -Without using control planes and Control Plane Connector, users have to install -Crossplane and providers for cluster. Each cluster requires configuration for -providers with necessary credentials. With a single control plane where multiple -clusters connected through Upbound tokens, you don't need to give out any cloud -credentials to the clusters. - - -[kubeconfig]: /manuals/cli/howtos/context-config/#generate-a-kubeconfig-for-a-control-plane-in-a-group -[kubeconfig-1]:/spaces/concepts/control-planes/#connect-directly-to-your-control-plane -[these-instructions]:/manuals/console/#create-a-personal-access-token -[kubernetes-api-aggregationlayer]: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/ -[configuration-eks]: https://github.com/upbound/configuration-eks -[an-example]: https://github.com/upbound/configuration-eks/blob/9f86b6d/.up/examples/cluster.yaml diff --git a/spaces_versioned_docs/version-v1.15/howtos/debugging-a-ctp.md b/spaces_versioned_docs/version-v1.15/howtos/debugging-a-ctp.md deleted file mode 100644 index 521271e40..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/debugging-a-ctp.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -title: Debugging issues on a control plane -sidebar_position: 70 -description: A guide for how to debug resources on a control plane running in Upbound. ---- - -This guide provides troubleshooting guidance for how to identify and fix issues on a control plane. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). Debugging methods are consistent across versions. - -For related control plane specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version-specific features, see the . -::: - -## Start from Upbound Console - - -The Upbound [Console][console] has a built-in control plane explorer experience -that surfaces status and events for the resources on your control plane. The -explorer is claim-based. Resources in this view exist only if they exist in the -reference chain originating from a claim. This view is a helpful starting point -if you are attempting to debug an issue originating from a claim. - -:::tip -If you directly create Crossplane Managed Resources (`MR`s) or Composite -Resources (`XR`s), they won't render in the explorer. -::: - -### Example - -The example below uses the control plane explorer view to inspect why a claim for an EKS Cluster isn't healthy. - -#### Check the health status of claims - -From the API type card, two claims branching from of it: one shows a healthy green icon, while the other shows an unhealthy red icon. - -![Use control plane explorer view to see status of claims](/img/debug-overview.png) - -Select `More details` on the unhealthy claim card and Upbound shows details for the claim. - -![Use control plane explorer view to see details of claims](/img/debug-claim-more-details.png) - -Looking at the three events for this claim: - -- **ConfigureCompositeResource**: this event indicates Upbound created the claimed Composite Resource (`XR`). - -- **BindCompositeResource**: this indicates the Composite Resource (`XR`) that's being "claimed" isn't ready yet. A claim doesn't show `HEALTHY` until the XR it references is ready. - -- **ConfigureCompositeResource**: the error saying, `cannot apply composite resource...the object has been modified; please apply your changes to the latest version and try again` is a generic event from Crossplane resources. It's safe to ignore this error. - -Next, look at the `status` field of the rendered YAML for the resource. - -![Use control plane explorer view to see status details of claims](/img/debug-claim-status.png) - -The status reports a similar message as the event stream: this claim is waiting for a Composite Resource to be ready. Based on this, investigate the Composite Resource referenced by this claim next. - -#### Check the health status of the Composite Resource - - -The control plane explorer only shows the claim cards by default. Selecting the claim card renders the rest of the Crossplane resource tree associated with the selected claim. - - -The previous claim expands into this screenshot: - -![Use control plane explorer view to expand tree of claim](/img/debug-claim-expansion.png) - -This renders the XR referenced by the claim (along with all its references). You can see the XR is showing the same unhealthy status icon in its card. Notice the XR has itself two nested XRs. One of the nested XRs shows a healthy green icon on its card, while the other shows an unhealthy red icon. Like the claim, a Composite Resource doesn't show healthy until all referenced resources also show healthy. - -#### Inspecting Managed Resources - -Select `more details` to inspect one of the unhealthy Managed Resources shows the following: - -![Use control plane explorer view to view events for an MR](/img/debug-mr-event.png) - -This event reveals it's unhealthy because it's waiting on a reference to another Managed Resource. Searching the rendered YAML of the MR for this resource shows the following: - -![Use control plane explorer view to view status for an MR](/img/debug-mr-status.png) - -The rendered YAML shows this MR is referencing a sibling MR that shares the same controller. The same parent XR created both of these managed resources. Inspect the sibling MR to see what its status is. - -![Use control plane explorer view to view status for a sibling MR](/img/debug-mr-dependency-status.png) - -The sibling MR event stream shows the Provider processed the resource create request. Ignore the `CannotInitalizeManagedResrouce` event. EKS clusters can take 15 minutes or more to provision in AWS. The root cause is everything is fine -- all the resources are still provisioning. Waiting longer and then looking at the control plane explorer again, shows all resources are healthy. reference, below is an example status field for a resource that's healthy and provisioned. - -```yaml -... -status: - atProvider: - id: team-b-app-cluster-bhwfb-hwtgs-20230403135452772300000008 - conditions: - - lastTransitionTime: '2023-04-03T13:56:35Z' - reason: Available - status: 'True' - type: Ready - - lastTransitionTime: '2023-04-03T13:54:02Z' - reason: ReconcileSuccess - status: 'True' - type: Synced - - lastTransitionTime: '2023-04-03T13:54:53Z' - reason: Success - status: 'True' - type: LastAsyncOperation - - lastTransitionTime: '2023-04-03T13:54:53Z' - reason: Finished - status: 'True' - type: AsyncOperation -``` - -### Control plane explorer limitations - -The control plane explorer view is currently designed around claims (`XC`s). The control plane explorer doesn't inspect other Crossplane resources. To inspect other Crossplane resources, use the `up` CLI. - -Some examples of Crossplane resources that require the `up` CLI - -- Managed Resources that aren't associated with a claim -- Composite Resources that aren't associated with a claim -- The status of _deleting_ resources -- ProviderConfigs -- Provider events - -## Use direct CLI access - -If your preference is to use a terminal instead of a GUI, Upbound supports direct access to the API server of the control plane. Use [`up ctx`][up-ctx] to connect directly to your control plane. - - -[console]: /manuals/console/upbound-console -[up-ctx]: /reference/cli-reference diff --git a/spaces_versioned_docs/version-v1.15/howtos/managed-service.md b/spaces_versioned_docs/version-v1.15/howtos/managed-service.md deleted file mode 100644 index 40b983a76..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/managed-service.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: Managed Upbound control planes -description: "Learn about the managed service capabilities of a Space" -sidebar_position: 10 ---- - -Control planes in Upbound are fully isolated [Upbound Crossplane][uxp] instances -that Upbound manages for you. This means: - -- the underlying lifecycle of infrastructure (compute, memory, and storage) required to power your instance. -- scaling of the infrastructure. -- the maintenance of the core Upbound Crossplane components that make up a control plane. - -This lets users focus on building their APIs and operating their control planes, -while Upbound handles the rest. Each control plane has its own dedicated API -server connecting users to their control plane. - -## Learn about Upbound control planes - -Read the [concept][ctp-concept] documentation to learn about Upbound control planes. - -[uxp]: /manuals/uxp/overview -[ctp-concept]: /spaces/concepts/control-planes \ No newline at end of file diff --git a/spaces_versioned_docs/version-v1.15/howtos/mcp-connector-guide.md b/spaces_versioned_docs/version-v1.15/howtos/mcp-connector-guide.md deleted file mode 100644 index 8a3866d07..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/mcp-connector-guide.md +++ /dev/null @@ -1,169 +0,0 @@ ---- -title: Consume control plane APIs in an app cluster with control plane connector -sidebar_position: 99 -description: A tutorial to configure a Space with Argo to declaratively create and - manage control planes ---- - -In this tutorial, you learn how to configure a Kubernetes app cluster to communicate with a control plane in an Upbound self-hosted Space. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Control plane connector functionality is consistent across versions. - -For related API specifications and resources, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . -::: - -The [control plane connector][control-plane-connector] bridges your Kubernetes application clusters---running outside of Upbound--to your control planes running in Upbound. This allows you to interact with your control plane's API right from the app cluster. The claim APIs you define via `CompositeResourceDefinitions` are available alongside Kubernetes workload APIs like `Pod`. In effect, control plane connector provides the same experience as a locally installed Crossplane. - -## Prerequisites - -To complete this tutorial, you need the following: - -- Have already deployed an Upbound Space. -- Have already deployed an Kubernetes cluster (referred to as `app cluster`). - -## Create a control plane - -Create a new control plane in your self-hosted Space. Run the following command in a terminal: - -```bash -up ctp create my-control-plane -``` - -Once the control plane is ready, connect to it. - -```bash -up ctp connect my-control-plane -``` - -For convenience, install a an Upbound [platform reference Configuration][platform-reference-configuration] from the marketplace. production scenarios, replace this with your own Crossplane Configurations or compositions. - -```bash -up ctp configuration install xpkg.upbound.io/upbound/platform-ref-aws:v1.4.0 -``` - -## Fetch the control plane's connection details - -Run the following command in a terminal: - -```shell -kubectl get secret kubeconfig-my-control-plane -n default -o jsonpath='{.data.kubeconfig}' | base64 -d > kubeconfig-my-control-plane.yaml -``` - -This command saves the kubeconfig for the control plane to a file in your working directory. - -## Install control plane connector in your app cluster - -Switch contexts to your Kubernetes app cluster. To install the control plane connector in your app cluster, you must first provide a secret containing your control plane's kubeconfig at install-time. Run the following command in a terminal: - -:::important -Make sure the following commands are executed against your **app cluster**, not your control plane. -::: - -```bash -kubectl create secret generic kubeconfig-my-control-plane -n kube-system --from-file=kubeconfig=./kubeconfig-my-control-plane.yaml -``` - -Set the environment variable below to configure which namespace _in your control plane_ you wish to sync the app cluster's claims to. - -```shell -export CONNECTOR_CTP_NAMESPACE=app-cluster-1 -``` - -Install the Control Plane Connector in the app cluster and point it to your control plane. - -```bash -up ctp connector install my-control-plane $CONNECTOR_CTP_NAMESPACE --control-plane-secret=kubeconfig-my-control-plane -``` - -## Inspect your app cluster - -After you install Control Plane Connector in the app cluster, you can now see APIs which live on the control plane. You can confirm this is the case by running the following command on your app cluster: - -```bash {copy-lines="1"} -kubectl api-resources | grep upbound - -# The output should look like this: -sqlinstances aws.platform.upbound.io/v1alpha1 true SQLInstance -clusters aws.platformref.upbound.io/v1alpha1 true Cluster -osss observe.platform.upbound.io/v1alpha1 true Oss -apps platform.upbound.io/v1alpha1 true App -``` - -## Claim a database instance on your app cluster - -Create a database claim against the `SQLInstance` API and observe resources get created by your control plane. Apply the following resources to your app cluster: - -```yaml -cat < --output - ``` - - The command exports your existing Crossplane control plane configuration/state into an archive file. - -::: note -By default, the export command doesn't make any changes to your existing Crossplane control plane state, leaving it intact. Use the `--pause-before-export` flag to pause the reconciliation on managed resources before exporting the archive file. - -This safety mechanism ensures the control plane you migrate state to doesn't assume ownership of resources before you're ready. -::: - -2. Use the control plane [create command][create-command] to create a managed -control plane in Upbound: - - ```bash - up controlplane create my-controlplane - ``` - -3. Use [`up ctx`][up-ctx] to connect to the control plane created in the previous step: - - ```bash - up ctx "///my-controlplane" - ``` - - The command configures your local `kubeconfig` to connect to the control plane. - -4. Run the following command to import the archive file into the control plane: - - ```bash - up controlplane migration import --input - ``` - -:::note -By default, the import command leaves the control plane in an inactive state by pausing the reconciliation on managed -resources. This pause gives you an opportunity to review the imported configuration/state before activating the control plane. -Use the `--unpause-after-import` flag to change the default behavior and activate the control plane immediately after -importing the archive file. -::: - - - -5. Review and validate the imported configuration/state. When you are ready, activate your managed - control plane by running the following command: - - ```bash - kubectl annotate managed --all crossplane.io/paused- - ``` - - At this point, you can delete the source Crossplane control plane. - -## CLI options - -### Filtering - -The migration tool captures the state of a Control Plane. The only filtering -supported is Kubernetes namespace and Kubernetes resource Type filtering. - -You can exclude namespaces using the `--exclude-namespaces` CLI option. This can prevent the CLI from including unwanted resources in the export. - -```bash ---exclude-namespaces=kube-system,kube-public,kube-node-lease,local-path-storage,... - -# A list of specific namespaces to exclude from the export. Defaults to 'kube-system', 'kube-public','kube-node-lease', and 'local-path-storage'. -``` - -You can exclude Kubernetes Resource types by using the `--exclude-resources` CLI option: - -```bash ---exclude-resources=EXCLUDE-RESOURCES,... - -# A list of resource types to exclude from the export in "resource.group" format. No resources are excluded by default. -``` - -For example, here's an example for excluding the CRDs installed by Crossplane functions (since they're not needed): - -```bash -up controlplane migration export \ - --exclude-resources=gotemplates.gotemplating.fn.crossplane.io,kclinputs.template.fn.crossplane.io -``` - -:::warning -You must specify resource names in lowercase "resource.group" format (for example, `gotemplates.gotemplating.fn.crossplane.io`). Using only the resource kind (for example, `GoTemplate`) isn't supported. -::: - - -:::tip Function Input CRDs - -Exclude function input CRDs (`inputs.template.fn.crossplane.io`, `resources.pt.fn.crossplane.io`, `gotemplates.gotemplating.fn.crossplane.io`, `kclinputs.template.fn.crossplane.io`) from migration exports. Upbound automatically recreates these resources during import. Function input CRDs typically have owner references to function packages and may have restricted RBAC access. Upbound installs these CRDs during the import when function packages are restored. - -::: - - -After export, users can also change the archive file to only include necessary resources. - -### Export non-Crossplane resources - -Use the `--include-extra-resources=` CLI option to select other CRD types to include in the export. - -### Set the kubecontext - -Currently `--context` isn't supported in the migration CLI. You should be able to use the `--kubeconfig` CLI option to use a file that's set to the correct context. example: - -```bash -up controlplane migration export --kubeconfig -``` - -Use this in tandem with `up ctx` to export a control plane's kubeconfig: - -```bash -up ctx --kubeconfig ~/.kube/config - -# To list the current contet -up ctx . --kubeconfig ~/.kube/config -``` - -## Export archive - -The migration CLI exports an archive upon successful completion. Below is an example export of a control plane that excludes several CRD types and skips the confirmation prompt. A file gets written to the working directory, unless you select another output file: - -
- -View the example export - -```bash -$ up controlplane migration export --exclude-resources=gotemplates.gotemplating.fn.crossplane.io,kclinputs.template.fn.crossplane.io --yes -Exporting control plane state... -✓ Scanning control plane for types to export... 121 types found! 👀 -✓ Exporting 121 Crossplane resources...60 resources exported! 📤 -✓ Exporting 3 native resources...8 resources exported! 📤 -✓ Archiving exported state... archived to "xp-state.tar.gz"! 📦 -``` - -
- - -When an export occurs, a file named `xp-state.tar.gz` by default gets created in the working directory. You can unzip the file and all the contents of the export are all text YAML files. - -- Each CRD (for example `vpcs.ec2.aws.upbound.io`) gets its own directory -which contains: - - A `metadata.yaml` file that contains Kubernetes Object Metadata - - A list of Kubernetes Categories the resource belongs to -- A `cluster` directory that contains YAML manifests for all resources provisioned -using the CRD. - -Sample contents for a Cluster with a single `XNetwork` Composite from -[configuration-aws-network][configuration-aws-network] is show below: - - -
- -View the example cluster content - -```bash -├── compositionrevisions.apiextensions.crossplane.io -│ ├── cluster -│ │ ├── kcl.xnetworks.aws.platform.upbound.io-4ca6a8a.yaml -│ │ └── xnetworks.aws.platform.upbound.io-9859a34.yaml -│ └── metadata.yaml -├── configurations.pkg.crossplane.io -│ ├── cluster -│ │ └── configuration-aws-network.yaml -│ └── metadata.yaml -├── deploymentruntimeconfigs.pkg.crossplane.io -│ ├── cluster -│ │ └── default.yaml -│ └── metadata.yaml -├── export.yaml -├── functions.pkg.crossplane.io -│ ├── cluster -│ │ ├── crossplane-contrib-function-auto-ready.yaml -│ │ ├── crossplane-contrib-function-go-templating.yaml -│ │ └── crossplane-contrib-function-kcl.yaml -│ └── metadata.yaml -├── internetgateways.ec2.aws.upbound.io -│ ├── cluster -│ │ └── borrelli-backup-test-xgl4q.yaml -│ └── metadata.yaml -├── mainroutetableassociations.ec2.aws.upbound.io -│ ├── cluster -│ │ └── borrelli-backup-test-t2qh7.yaml -│ └── metadata.yaml -├── namespaces -│ └── cluster -│ ├── crossplane-system.yaml -│ ├── default.yaml -│ └── upbound-system.yaml -├── providerconfigs.aws.upbound.io -│ ├── cluster -│ │ └── default.yaml -│ └── metadata.yaml -├── providerconfigusages.aws.upbound.io -│ ├── cluster -│ │ ├── 0a2a3ec6-ef13-45f9-9cf0-63af7f4a6b6b.yaml -...redacted -│ │ └── f7092b0f-3a78-4bfe-82c8-57e5085a9b11.yaml -│ └── metadata.yaml -├── providers.pkg.crossplane.io -│ ├── cluster -│ │ ├── upbound-provider-aws-ec2.yaml -│ │ └── upbound-provider-family-aws.yaml -│ └── metadata.yaml -├── routes.ec2.aws.upbound.io -│ ├── cluster -│ │ └── borrelli-backup-test-dt9cj.yaml -│ └── metadata.yaml -├── routetableassociations.ec2.aws.upbound.io -│ ├── cluster -│ │ ├── borrelli-backup-test-mr2sd.yaml -│ │ ├── borrelli-backup-test-ngq5h.yaml -│ │ ├── borrelli-backup-test-nrkgg.yaml -│ │ └── borrelli-backup-test-wq752.yaml -│ └── metadata.yaml -├── routetables.ec2.aws.upbound.io -│ ├── cluster -│ │ └── borrelli-backup-test-dv4mb.yaml -│ └── metadata.yaml -├── secrets -│ └── namespaces -│ ├── crossplane-system -│ │ ├── cert-token-signing-gateway-pub.yaml -│ │ ├── mxp-hostcluster-certs.yaml -│ │ ├── package-pull-secret.yaml -│ │ └── xgql-tls.yaml -│ └── upbound-system -│ └── aws-creds.yaml -├── securitygrouprules.ec2.aws.upbound.io -│ ├── cluster -│ │ ├── borrelli-backup-test-472f4.yaml -│ │ └── borrelli-backup-test-qftmw.yaml -│ └── metadata.yaml -├── securitygroups.ec2.aws.upbound.io -│ ├── cluster -│ │ └── borrelli-backup-test-w5jch.yaml -│ └── metadata.yaml -├── storeconfigs.secrets.crossplane.io -│ ├── cluster -│ │ └── default.yaml -│ └── metadata.yaml -├── subnets.ec2.aws.upbound.io -│ ├── cluster -│ │ ├── borrelli-backup-test-8btj6.yaml -│ │ ├── borrelli-backup-test-gbmrm.yaml -│ │ ├── borrelli-backup-test-m7kh7.yaml -│ │ └── borrelli-backup-test-nttt5.yaml -│ └── metadata.yaml -├── vpcs.ec2.aws.upbound.io -│ ├── cluster -│ │ └── borrelli-backup-test-7hwgh.yaml -│ └── metadata.yaml -└── xnetworks.aws.platform.upbound.io -├── cluster -│ └── borrelli-backup-test.yaml -└── metadata.yaml -43 directories, 87 files -``` - -
- - -The `export.yaml` file contains metadata about the export, including the configuration of the export, Crossplane information, and what's included in the export bundle. - -
- -View the export - -```yaml -version: v1alpha1 -exportedAt: 2025-01-06T17:39:53.173222Z -options: - excludedNamespaces: - - kube-system - - kube-public - - kube-node-lease - - local-path-storage - includedResources: - - namespaces - - configmaps - - secrets - excludedResources: - - gotemplates.gotemplating.fn.crossplane.io - - kclinputs.template.fn.crossplane.io -crossplane: - distribution: universal-crossplane - namespace: crossplane-system - version: 1.17.3-up.1 - featureFlags: - - --enable-provider-identity - - --enable-environment-configs - - --enable-composition-functions - - --enable-usages -stats: - total: 68 - nativeResources: - configmaps: 0 - namespaces: 3 - secrets: 5 - customResources: - amicopies.ec2.aws.upbound.io: 0 - amilaunchpermissions.ec2.aws.upbound.io: 0 - amis.ec2.aws.upbound.io: 0 - availabilityzonegroups.ec2.aws.upbound.io: 0 - capacityreservations.ec2.aws.upbound.io: 0 - carriergateways.ec2.aws.upbound.io: 0 - compositeresourcedefinitions.apiextensions.crossplane.io: 0 - compositionrevisions.apiextensions.crossplane.io: 2 - compositions.apiextensions.crossplane.io: 0 - configurationrevisions.pkg.crossplane.io: 0 - configurations.pkg.crossplane.io: 1 -...redacted -``` - -
- -### Skipped resources - -Along with to the resources excluded via CLI options, the following resources aren't -included in the backup: - -- The `kube-root-ca.crt` ConfigMap, since this is cluster-specific -- Resources directly managed via Helm (ArgoCD's helm implementation, which templates -Helm resources and then applies them, get included in the backup). The migration creates the exclusion list by looking for: - - Any Resource with the label `"app.kubernetes.io/managed-by" == "Helm"` - - Kubernetes Secrets with the label prefix `helm.sh/release`. example, `helm.sh/release.v1` -- Resources installed via a Crossplane package. These have an `ownerReference` with -a prefix `pkg.crossplane.io`. The expectation is that during import, the Crossplane Package Manager bears responsibility for installing the resources. -- Crossplane Locks: Any `Lock.pkg.crossplane.io` resource isn't included in the -export. - -## Restore - -The following is an example of a successful import run. At the end of the import, all Managed Resources are in a paused state. - -
- -View the migration import - -```bash -$ up controlplane migration import -Importing control plane state... -✓ Reading state from the archive... Done! 👀 -✓ Importing base resources... 18 resources imported! 📥 -✓ Waiting for XRDs... Established! ⏳ -✓ Waiting for Packages... Installed and Healthy! ⏳ -✓ Importing remaining resources... 50 resources imported! 📥 -✓ Finalizing import... Done! 🎉 -``` - -
- -Your scenario may involve migrating resources which already exist through other automation on the platform. When executing an import in these circumstances, the importer applies the new manifests to the cluster. If the resource already exists, the restore sets fields to what's in the backup. - -The importer restores all resources in the export archive. Managed Resources get imported with the `crossplane.io/paused: "true"` annotation set. Use the `--unpause-after-import` CLI argument to automatically un-pause resources that got -paused during backup, or remove the annotation manually. - -### Restore order - -The importer restores based on Kubernetes types. The restore order doesn't include parent/child relationships. - -Because Crossplane Composites create new Managed Resources if not present on the cluster, all -Claims, Composites and Managed Resources get imported in a paused state. You can un-pause them after the restore completes. - -The first step of import is installing Base Resources into the cluster. These resources (such has -packages and XRDs) must be ready before proceeding with the import. -Base Resources are: - -- Kubernetes Resources - - ConfigMaps - - Namespaces - - Secrets -- Crossplane Resources - - ControllerConfigs: `controllerconfigs.pkg.crossplane.io` - - DeploymentRuntimeConfigs: `deploymentruntimeconfigs.pkg.crossplane.io` - - StoreConfigs: `storeconfigs.secrets.crossplane.io` -- Crossplane Packages - - Providers: `providers.pkg.crossplane.io` - - Functions: `functions.pkg.crossplane.io` - - Configurations: `configurations.pkg.crossplane.io` - -Restore waits for the base resources to be `Ready` before moving on to the next step. Next, restore walks through the archive and restores all the manifests present. - -During import, the `crossplane.io/paused` annotation gets added to Managed Resources, Claims -and Composites. - -To manually un-pause managed resources after an import, remove the annotation by running: - -```bash -kubectl annotate managed --all crossplane.io/paused- -``` - -You can also run import again with the `--unpause-after-import` flag to remove the annotations. - -```bash -up controlplane migration import --unpause-after-import -``` - -### Restoring resource status - -The importer applies the status of all resources during import. The importer determines if the CRD version has a status field defined based on the stored CRD version. - - -[cli-command]: /reference/cli-reference -[up-cli]: /reference/cli-reference -[up-cli-1]: /manuals/cli/overview -[create-command]: /reference/cli-reference -[up-ctx]: /reference/cli-reference -[configuration-aws-network]: https://marketplace.upbound.io/configurations/upbound/configuration-aws-network diff --git a/spaces_versioned_docs/version-v1.15/howtos/observability.md b/spaces_versioned_docs/version-v1.15/howtos/observability.md deleted file mode 100644 index 8fc5c3278..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/observability.md +++ /dev/null @@ -1,395 +0,0 @@ ---- -title: Observability -sidebar_position: 50 -description: A guide for how to use the integrated observability pipeline feature - in a Space. -plan: "enterprise" ---- - - - -This guide explains how to configure observability in Upbound Spaces. Upbound -provides integrated observability features built on -[OpenTelemetry][opentelemetry] to collect, process, and export logs, metrics, -and traces. - -Upbound Spaces offers two levels of observability: - -1. **Space-level observability** - Observes the cluster infrastructure where Spaces software is installed (Self-Hosted only) -2. **Control plane observability** - Observes workloads running within individual control planes - - - - - -:::info API Version Information & Version Selector -This guide applies to all supported versions (v1.9-v1.15+). Observability features evolved: - -- **v1.9-v1.12**: SharedTelemetryConfig available (alpha until v1.11) -- **v1.11+**: Observability promoted to stable with logs export support -- **v1.14+**: Both space-level and control-plane observability GA - -**View API Reference for Your Version**: -| Version | Status | Link | -|---------|--------|------| -| v1.15 | Latest | [Spaces API v1.15](../../../next/reference/apis/spaces-api/) | -| v1.14 | Supported | [Spaces API v1.14](../../../1.14/reference/apis/spaces-api/) | -| v1.13 | Supported | [Spaces API v1.13](../../../1.13/reference/apis/spaces-api/) | -| v1.12 | Supported | [Spaces API v1.12](../../../1.12/reference/apis/spaces-api/) | -| v1.11 | Supported | [Spaces API v1.11](../../../1.11/reference/apis/spaces-api/) | -| v1.10 | Limited | [Spaces API v1.10](../../../1.10/reference/apis/spaces-api/) | -| v1.9 | EOL | [Spaces API v1.9](../../../1.9/reference/apis/spaces-api/) | - -For version support policy and feature availability, see and . -::: - -:::important -**Space-level observability** (available since v1.6.0, GA in v1.14.0): -- Disabled by default -- Requires manual enablement and configuration -- Self-Hosted Spaces only - -**Control plane observability** (available since v1.13.0, GA in v1.14.0): -- Enabled by default -- No additional configuration required -::: - - - - -## Prerequisites - - -**Control plane observability** is enabled by default. No additional setup is -required. - - - -### Self-hosted Spaces - -1. **Enable the observability feature** when installing Spaces: - ```bash - up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - ... - --set "observability.enabled=true" - ``` - -Set `features.alpha.observability.enabled=true` instead if using Spaces version -before `v1.14.0`. - -2. **Install OpenTelemetry Operator** (required for Space-level observability): - ```bash - kubectl apply -f https://github.com/open-telemetry/opentelemetry-operator/releases/download/v0.116.0/opentelemetry-operator.yaml - ``` - - :::important - If running Spaces `v1.11` or later, use OpenTelemetry Operator `v0.110.0` or later due to breaking changes. - ::: - - -## Space-level Observability - -Space-level observability is only available for self-hosted Spaces and allows -administrators to observe the cluster infrastructure. - -### Configuration - -Configure Space-level observability using the `spacesCollector` value in your -Spaces Helm chart: - -```yaml -observability: - spacesCollector: - config: - exporters: - otlphttp: - endpoint: "" - headers: - api-key: YOUR_API_KEY - exportPipeline: - logs: - - otlphttp - metrics: - - otlphttp -``` - -This configuration exports metrics and logs from: - -- Crossplane installation -- Spaces infrastructure (controller, API, router, etc.) - -### Router metrics - -The Spaces router uses Envoy as a reverse proxy and automatically exposes -metrics when you enable Space-level observability. These metrics provide -visibility into: - -- Traffic routing to control planes and services -- Request status codes, timeouts, and retries -- Circuit breaker state preventing cascading failures -- Client connection patterns and request volume -- Request latency (P50, P95, P99) - -For more information about available metrics, example queries, and how to enable -this feature, see the [Space-level observability guide][space-level-o11y]. - -## Control plane observability - -Control plane observability collects telemetry data from workloads running -within individual control planes using `SharedTelemetryConfig` resources. - -The pipeline deploys [OpenTelemetry Collectors][opentelemetry-collectors] per -control plane, defined by a `SharedTelemetryConfig` at the group level. -Collectors pass data to external observability backends. - -:::important -From Spaces `v1.13` and beyond, telemetry only includes user-facing control -plane workloads (Crossplane, providers, functions). - -Self-hosted users can include system workloads (`api-server`, `etcd`) by setting -`observability.collectors.includeSystemTelemetry=true` in Helm. -::: - -:::important -Spaces validates `SharedTelemetryConfig` resources before applying them by -sending telemetry to configured exporters. self-hosted Spaces, ensure that -`spaces-controller` can reach the exporter endpoints. -::: - -### `SharedTelemetryConfig` - -`SharedTelemetryConfig` is a group-scoped custom resource that defines telemetry -configuration for control planes. - -#### New Relic example - -```yaml -apiVersion: observability.spaces.upbound.io/v1alpha1 -kind: SharedTelemetryConfig -metadata: - name: newrelic - namespace: default -spec: - controlPlaneSelector: - labelSelectors: - - matchLabels: - org: foo - exporters: - otlphttp: - endpoint: https://otlp.nr-data.net - headers: - api-key: YOUR_API_KEY - exportPipeline: - metrics: [otlphttp] - traces: [otlphttp] - logs: [otlphttp] -``` - -#### Datadog Example - -```yaml -apiVersion: observability.spaces.upbound.io/v1alpha1 -kind: SharedTelemetryConfig -metadata: - name: datadog - namespace: default -spec: - controlPlaneSelector: - labelSelectors: - - matchLabels: - org: foo - exporters: - datadog: - api: - site: ${DATADOG_SITE} - key: ${DATADOG_API_KEY} - exportPipeline: - metrics: [datadog] - traces: [datadog] - logs: [datadog] -``` - -### Control plane selection - -Use `spec.controlPlaneSelector` to specify which control planes should use the -telemetry configuration. - -#### Label-based selection - -```yaml -spec: - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production -``` - -#### Expression-based selection - -```yaml -spec: - controlPlaneSelector: - labelSelectors: - - matchExpressions: - - { key: environment, operator: In, values: [production,staging] } -``` - -#### Name-based selection - -```yaml -spec: - controlPlaneSelector: - names: - - controlplane-dev - - controlplane-staging - - controlplane-prod -``` - -### Manage sensitive data - -:::important -Available from Spaces `v1.10` -::: - -Store sensitive data in Kubernetes secrets and reference them in your -`SharedTelemetryConfig`: - -1. **Create the secret:** - ```bash - kubectl create secret generic sensitive -n \ - --from-literal=apiKey='YOUR_API_KEY' - ``` - -2. **Reference in SharedTelemetryConfig:** - ```yaml - apiVersion: observability.spaces.upbound.io/v1alpha1 - kind: SharedTelemetryConfig - metadata: - name: newrelic - spec: - configPatchSecretRefs: - - name: sensitive - key: apiKey - path: exporters.otlphttp.headers.api-key - controlPlaneSelector: - labelSelectors: - - matchLabels: - org: foo - exporters: - otlphttp: - endpoint: https://otlp.nr-data.net - headers: - api-key: dummy # Replaced by secret value - exportPipeline: - metrics: [otlphttp] - traces: [otlphttp] - logs: [otlphttp] - ``` - -### Telemetry processing - -:::important -Available from Spaces `v1.11` -::: - -Configure processing pipelines to transform telemetry data using the [transform -processor][transform-processor]. - -#### Add labels to metrics - -```yaml -spec: - processors: - transform: - error_mode: ignore - metric_statements: - - context: datapoint - statements: - - set(attributes["newLabel"], "someLabel") - processorPipeline: - metrics: [transform] -``` - -#### Remove labels - -From metrics: -```yaml -processors: - transform: - metric_statements: - - context: datapoint - statements: - - delete_key(attributes, "kubernetes_namespace") -``` - -From logs: -```yaml -processors: - transform: - log_statements: - - context: log - statements: - - delete_key(attributes, "log.file.name") -``` - -#### Modify log messages - -```yaml -processors: - transform: - log_statements: - - context: log - statements: - - set(attributes["original"], body) - - set(body, Concat(["log message:", body], " ")) -``` - -### Monitor status - -Check the status of your `SharedTelemetryConfig`: - -```bash -kubectl get stc -NAME SELECTED FAILED PROVISIONED AGE -datadog 1 0 1 63s -``` - -- `SELECTED`: Number of control planes selected -- `FAILED`: Number of control planes that failed provisioning -- `PROVISIONED`: Number of successfully running collectors - -For detailed status information: - -```bash -kubectl describe stc -``` - -## Supported exporters - -Both Space-level and control plane observability support: -- `datadog` -. Datadog integration -- `otlphttp` - General-purpose exporter (used by New Relic, among others) -- `debug` -. troubleshooting - -## Considerations - -- **Control plane conflicts**: Each control plane can only use one `SharedTelemetryConfig`. Multiple configs selecting the same control plane conflict. -- **Custom collector image**: Both Space-level and control plane observability use the same custom OpenTelemetry Collector image with supported exporters. -- **Resource scope**: `SharedTelemetryConfig` resources are group-scoped, allowing different telemetry configurations per group. - -For more advanced configuration options, review the [Helm chart -reference][helm-chart-reference] and [OpenTelemetry Transformation Language -documentation][opentelemetry-transformation-language]. - - -[opentelemetry]: https://opentelemetry.io/ -[opentelemetry-collectors]: https://opentelemetry.io/docs/collector/ -[opentelemetry-collector-configuration]: https://opentelemetry.io/docs/collector/configuration/#exporters -[opentelemetry-operator]: https://opentelemetry.io/docs/kubernetes/operator/ -[transform-processor]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/transformprocessor/README.md -[opentelemetry-transformation-language]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl -[space-level-o11y]: /spaces/howtos/self-hosted/space-observability -[helm-chart-reference]: /reference/helm-reference -[opentelemetry-transformation-language-functions]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/ottlfuncs/README.md -[opentelemetry-transformation-language-contexts]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts -[guide-on-ottl]: https://betterstack.com/community/guides/observability/ottl/#a-brief-overview-of-the-ottl-grammar diff --git a/spaces_versioned_docs/version-v1.15/howtos/query-api.md b/spaces_versioned_docs/version-v1.15/howtos/query-api.md deleted file mode 100644 index 78163de2f..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/query-api.md +++ /dev/null @@ -1,320 +0,0 @@ ---- -title: Query API -sidebar_position: 40 -description: Use the `up` CLI to query objects and resources ---- - - - - -Upbound's Query API allows users to inspect objects and resources within their control planes. The read-only `up alpha query` and `up alpha get` CLI commands allow you to gather information on your control planes in a fast and efficient package. These commands follow the [`kubectl` conventions][kubectl-conventions] for filtering, sorting, and retrieving information from your Space. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). Query API is available in Cloud Spaces since v1.6 and Self-Hosted since v1.8. - -For detailed deployment procedures, see [Deploy Query API infrastructure - Self-Hosted](./self-hosted/query-api.md). version compatibility details, see the . -::: - - - -## Using the Query API - - -The Query API allows you to retrieve control plane information faster than traditional `kubectl` commands. This feature lets you debug your Crossplane resources with the CLI or within the Upbound Console's enhanced management views. - -### Query within a single control plane - -Use the `up alpha get` command to retrieve information about objects within the current control plane context. This command uses the **Query** endpoint and targets the current control plane. - -To switch between control plane groups, use the [`up ctx` ][up-ctx] and change to your desired context with an interactive prompt or specify with your control plane path: - -```shell -up ctx /// -``` - -You can query within a single control plane with the [`up alpha get` command][up-alpha-get-command] to return more information about a given object within the current kubeconfig context. - -The `up alpha get` command can query resource types and aliases to return objects in your control plane. - -```shell -up alpha get managed -NAME READY SYNCED AGE -custom-account1-5bv5j-sa True True 15m -custom-cluster1-bq6dk-net True True 15m -custom-account1-5bv5j-subnet True True 15m -custom-cluster1-bq6dk-nodepool True True 15m -custom-cluster1-bq6dk-cluster True True 15m -custom-account1-5bv5j-net True True 15m -custom-cluster1-bq6dk-subnet True True 15m -custom-cluster1-bq6dk-sa True True 15m -``` - -The [`-A` flag][a-flag] queries for objects across all namespaces. - -```shell -up alpha get configmaps -A -NAMESPACE NAME AGE -crossplane-system uxp-versions-config 18m -crossplane-system universal-crossplane-config 18m -crossplane-system kube-root-ca.crt 18m -upbound-system kube-root-ca.crt 18m -kube-system kube-root-ca.crt 18m -kube-system coredns 18m -default kube-root-ca.crt 18m -kube-node-lease kube-root-ca.crt 18m -kube-public kube-root-ca.crt 18m -kube-system kube-apiserver-legacy-service-account-token-tracking 18m -kube-system extension-apiserver-authentication 18m -``` - -To query for [multiple resource types][multiple-resource-types], you can add the name or alias for the resource as a comma separated string. - -```shell -up alpha get providers,providerrevisions - -NAME HEALTHY REVISION IMAGE STATE DEP-FOUND DEP-INSTALLED AGE -providerrevision.pkg.crossplane.io/crossplane-contrib-provider-nop-ecc25c121431 True 1 xpkg.upbound.io/crossplane-contrib/provider-nop:v0.2.1 Active 18m -NAME INSTALLED HEALTHY PACKAGE AGE -provider.pkg.crossplane.io/crossplane-contrib-provider-nop True True xpkg.upbound.io/crossplane-contrib/provider-nop:v0.2.1 18m -``` - -### Query multiple control planes - -The [`up alpha query` command][up-alpha-query-command] returns a list of objects of any kind within all the control planes in your Space. This command uses either the **SpaceQuery** or **GroupQuery** endpoints depending on your query scope. The `-A` flag switches the query context from the group level to the entire Space - -The `up alpha query` command accepts resources and aliases to return objects across your group or Space. - -```shell -up alpha query crossplane - -NAME ESTABLISHED OFFERED AGE -compositeresourcedefinition.apiextensions.crossplane.io/xnetworks.platform.acme.co True True 20m -compositeresourcedefinition.apiextensions.crossplane.io/xaccountscaffolds.platform.acme.co True True 20m - - -NAME XR-KIND XR-APIVERSION AGE -composition.apiextensions.crossplane.io/xaccountscaffolds.platform.acme.co XAccountScaffold platform.acme.co/v1alpha1 20m -composition.apiextensions.crossplane.io/xnetworks.platform.acme.co XNetwork platform.acme.co/v1alpha1 20m - - -NAME REVISION XR-KIND XR-APIVERSION AGE -compositionrevision.apiextensions.crossplane.io/xaccountscaffolds.platform.acme.co-5ae9da5 1 XAccountScaffold platform.acme.co/v1alpha1 20m -compositionrevision.apiextensions.crossplane.io/xnetworks.platform.acme.co-414ce80 1 XNetwork platform.acme.co/v1alpha1 20m - -NAME READY SYNCED AGE -nopresource.nop.crossplane.io/custom-cluster1-bq6dk-subnet True True 19m -nopresource.nop.crossplane.io/custom-account1-5bv5j-net True True 19m - -## Output truncated... - -``` - - -The [`--sort-by` flag][sort-by-flag] allows you to return information to your specifications. You can construct your sort order in a JSONPath expression string or integer. - - -```shell -up alpha query crossplane -A --sort-by="{.metadata.name}" - -CONTROLPLANE NAME AGE -default/test deploymentruntimeconfig.pkg.crossplane.io/default 10m - -CONTROLPLANE NAME AGE TYPE DEFAULT-SCOPE -default/test storeconfig.secrets.crossplane.io/default 10m Kubernetes crossplane-system -``` - -To query for multiple resource types, you can add the name or alias for the resource as a comma separated string. - -```shell -up alpha query namespaces,configmaps -A - -CONTROLPLANE NAME AGE -default/test namespace/upbound-system 15m -default/test namespace/crossplane-system 15m -default/test namespace/kube-system 16m -default/test namespace/default 16m - -CONTROLPLANE NAMESPACE NAME AGE -default/test crossplane-system configmap/uxp-versions-config 15m -default/test crossplane-system configmap/universal-crossplane-config 15m -default/test crossplane-system configmap/kube-root-ca.crt 15m -default/test upbound-system configmap/kube-root-ca.crt 15m -default/test kube-system configmap/coredns 16m -default/test default configmap/kube-root-ca.crt 16m - -## Output truncated... - -``` - -The Query API also allows you to return resource types with specific [label columns][label-columns]. - -```shell -up alpha query composite -A --label-columns=crossplane.io/claim-namespace - -CONTROLPLANE NAME SYNCED READY COMPOSITION AGE CLAIM-NAMESPACE -query-api-test/test xeks.argo.discover.upbound.io/test-k7xbk False xeks.argo.discover.upbound.io 51d default - -CONTROLPLANE NAME EXTERNALDNS SYNCED READY COMPOSITION AGE CLAIM-NAMESPACE -spaces-clusters/controlplane-query-api-test-spaces-playground xexternaldns.externaldns.platform.upbound.io/spaces-cluster-0-xd8v2-lhnl7 6.34.2 True True xexternaldns.externaldns.platform.upbound.io 19d default -default/query-api-test xexternaldns.externaldns.platform.upbound.io/space-awg-kine-f7dxq-nkk2q 6.34.2 True True xexternaldns.externaldns.platform.upbound.io 55d default - -## Output truncated... - -``` - -### Query API request format - -The CLI can also return a version of your query request with the [`--debug` flag][debug-flag]. This flag returns the API spec request for your query. - -```shell -up alpha query composite -A -d - -apiVersion: query.spaces.upbound.io/v1alpha1 -kind: SpaceQuery -metadata: - creationTimestamp: null -spec: - cursor: true - filter: - categories: - - composite - controlPlane: {} - limit: 500 - objects: - controlPlane: true - table: {} - page: {} -``` - -For more complex queries, you can interact with the Query API like a Kubernetes-style API by creating a query and applying it with `kubectl`. - -The example below is a query for `claim` resources in every control plane from oldest to newest and returns specific information about those claims. - - -```yaml -apiVersion: query.spaces.upbound.io/v1alpha1 -kind: SpaceQuery -spec: - filter: - categories: - - claim - order: - - creationTimestamp: Asc - cursor: true - count: true - objects: - id: true - controlPlane: true - object: - kind: true - apiVersion: true - metadata: - name: true - uid: true - spec: - containers: - image: true -``` - - -The Query API is served by the Spaces API endpoint. You can use `up ctx` to -switch the kubectl context to the Spaces API ingress. After that, you can use -`kubectl create` and receive the `response` for your query parameters. - - -```shell -kubectl create -f spaces-query.yaml -o yaml -``` - -Your `response` should look similar to this example: - -```yaml {copy-lines="none"} -apiVersion: query.spaces.upbound.io/v1alpha1 -kind: SpaceQuery -metadata: - creationTimestamp: "2024-08-08T14:41:46Z" - name: default -response: - count: 3 - cursor: - next: "" - page: 0 - pageSize: 100 - position: 0 - objects: - - controlPlane: - name: query-api-test - namespace: default - id: default/query-api-test/823b2781-7e70-4d91-a6f0-ee8f455d67dc - object: - apiVersion: spaces.platform.upbound.io/v1alpha1 - kind: Space - metadata: - name: space-awg-kine - resourceVersion: "803868" - uid: 823b2781-7e70-4d91-a6f0-ee8f455d67dc - spec: {} - - controlPlane: - name: test-1 - namespace: test - id: test/test-1/08a573dd-851a-42cc-a600-b6f6ed37ee8d - object: - apiVersion: argo.discover.upbound.io/v1alpha1 - kind: EKS - metadata: - name: test-1 - resourceVersion: "4270320" - uid: 08a573dd-851a-42cc-a600-b6f6ed37ee8d - spec: {} - - controlPlane: - name: controlplane-query-api-test-spaces-playground - namespace: spaces-clusters - id: spaces-clusters/controlplane-query-api-test-spaces-playground/b5a6770f-1f85-4d09-8990-997c84bd4159 - object: - apiVersion: spaces.platform.upbound.io/v1alpha1 - kind: Space - metadata: - name: spaces-cluster-0 - resourceVersion: "1408337" - uid: b5a6770f-1f85-4d09-8990-997c84bd4159 - spec: {} -``` - - -## Query API Explorer - - - -import CrdDocViewer from '@site/src/components/CrdViewer'; - -### Query - -The Query resource allows you to query objects in a single control plane. - - - -### GroupQuery - -The GroupQuery resource allows you to query objects across a group of control planes. - - - -### SpaceQuery - -The SpaceQuery resource allows you to query objects across all control planes in a space. - - - - - - -[documentation]: /spaces/howtos/self-hosted/query-api -[up-ctx]: /reference/cli-reference -[up-alpha-get-command]: /reference/cli-reference -[a-flag]: /reference/cli-reference -[multiple-resource-types]: /reference/cli-reference -[up-alpha-query-command]: /reference/cli-reference -[sort-by-flag]: /reference/cli-reference -[label-columns]: /reference/cli-reference -[debug-flag]: /reference/cli-reference -[kubectl-conventions]: https://kubernetes.io/docs/reference/kubectl/generated/kubectl_get/ diff --git a/spaces_versioned_docs/version-v1.15/howtos/secrets-management.md b/spaces_versioned_docs/version-v1.15/howtos/secrets-management.md deleted file mode 100644 index 88e730ae5..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/secrets-management.md +++ /dev/null @@ -1,719 +0,0 @@ ---- -title: Secrets Management -sidebar_position: 20 -description: A guide for how to configure synchronizing external secrets into control - planes in a Space. ---- - -Upbound's _Shared Secrets_ is a built in secrets management feature that -provides an integrated way to manage secrets across your platform. It allows you -to store sensitive data like passwords and certificates for your managed control -planes as secrets in an external secret store. - -This feature is a wrapper around the [External Secrets Operator (ESO)][external-secrets-operator-eso] that pulls secrets from external vaults and distributes them across your platform. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). The SharedSecretStore and SharedExternalSecret CRDs have been stable since v1.9. - -For API specifications and configuration details, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: - -## Benefits - -The Shared Secrets feature allows you to: - -* Access secrets from a variety of external secret stores without operation overhead -* Configure synchronization for multiple control planes in a group -* Store and manage all your secrets centrally -* Use Shared Secrets across all Upbound environments(Cloud and Disconnected Spaces) -* Synchronize secrets across groups of control planes while maintaining clear security boundaries -* Manage secrets at scale programmatically while ensuring proper isolation and access control - -## Understanding the Architecture - -The Shared Secrets feature uses a hierarchical approach to centrally manage -secrets and effectively control their distribution. - -![Shared Secrets workflow diagram](/img/shared-secrets-workflow.png) - -1. The flow begins at the group level, where you define your secret sources and distribution rules -2. These rules automatically create corresponding resources in your control planes -3. In each control plane, specific namespaces receive the secrets -4. Changes at the group level automatically propagate through this chain - -## Component configuration - -Upbound Shared Secrets consists of two components: - -1. **SharedSecretStore**: Defines connections to external secret providers -2. **SharedExternalSecret**: Specifies which secrets to synchronize and where - - -### Connect to an External Vault - - -The `SharedSecretStore` component is the connection point to your external -secret vaults. It provisions ClusterSecretStore resources into control planes -within the group. - - -#### AWS Secrets Manager - - - -In this example, you'll create a `SharedSecretStore` to connect to AWS -Secrets Manager in `us-west-2`. Then apply access to all control planes labeled with -`environment: production`, and make these secrets available in the `default` and -`crossplane-system` namespaces. - - -You can configure access to AWS Secrets Manager using static credentials or -workload identity. - -:::important -While the underlying ESO API supports more auth methods, static credentials are currently the only supported auth method in Cloud Spaces. -::: - -##### Static credentials - -1. Use the AWS CLI to create access credentials. - - -2. Create your access credentials. -```ini -# Create a text file with AWS credentials -cat > aws-credentials.txt << EOF -[default] -aws_access_key_id = -aws_secret_access_key = -EOF -``` - -3. Next,store the access credentials in a secret in the namespace you want to have access to the `SharedSecretStore`. -```shell -kubectl create secret \ - generic aws-credentials \ - -n default \ - --from-file=creds=./aws-credentials.txt -``` - -4. Create a `SharedSecretStore` custom resource file called `secretstore.yaml`. - Paste the following configuration: -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: aws-secrets -spec: - # Define which control planes should receive this configuration - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production - - # Define which namespaces within those control planes can access secrets - namespaceSelector: - names: - - default - - crossplane-system - - # Configure the connection to AWS Secrets Manager - provider: - aws: - service: SecretsManager - region: us-west-2 - auth: - secretRef: - accessKeyIDSecretRef: - name: aws-credentials - key: access-key-id - secretAccessKeySecretRef: - name: aws-credentials - key: secret-access-key -``` - - - -##### Workload Identity with IRSA - - - -You can also use AWS IAM Roles for Service Accounts (IRSA) depending on your -organizations needs: - -1. Ensure you have deployed the Spaces software into an IRSA-enabled EKS cluster. -2. Follow the AWS instructions to create an IAM OIDC provider with your EKS OIDC - provider URL. -3. Determine the Spaces-generated `controlPlaneID` of your control plane: -```shell -kubectl get controlplane -o jsonpath='{.status.controlPlaneID}' -``` - -4. Create an IAM trust policy in your AWS account to match the control plane. -```yaml -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "arn:aws:iam:::oidc-provider/" - }, - "Action": "sts:AssumeRoleWithWebIdentity", - "Condition": { - "StringEquals": { - ":aud": "sts.amazonaws.com", - ":sub": [ -"system:serviceaccount:mxp--system:external-secrets-controller"] - } - } - } - ] -} -``` - -5. Update your Spaces deployment to annotate the SharedSecrets service account - with the role ARN. -```shell -up space upgrade ... \ - --set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."eks\.amazonaws\.com/role-arn"="" -``` - -6. Create a SharedSecretStore and reference the SharedSecrets service account: -```ini {copy-lines="all"} -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: aws-sm - namespace: default -spec: - provider: - aws: - service: SecretsManager - region: - auth: - jwt: - serviceAccountRef: - name: external-secrets-controller - controlPlaneSelector: - names: - - - namespaceSelector: - names: - - default -``` - -When you create a `SharedSecretStore` the underlying mechanism: - -1. Applies at the group level -2. Determines which control planes should receive this configuration by the `controlPlaneSelector` -3. Automatically creates a ClusterSecretStore inside each identified control plane -4. Maintains a connection in each control plane with the ClusterSecretStore - credentials and configuration from the parent SharedSecretStore - -Upbound automatically generates a ClusterSecretStore in each matching control -plane when you create a SharedSecretStore. - -```yaml {copy-lines="none"} -# Automatically created in each matching control plane -apiVersion: external-secrets.io/v1beta1 -kind: ClusterSecretStore -metadata: - name: aws-secrets # Name matches the parent SharedSecretStore -spec: - provider: - upboundspaces: - storeRef: - name: aws-secret -``` - -When you create the SharedSecretStore controller, it replaces the provider with -a special provider called `upboundspaces`. This provider references the -SharedSecretStore object in the Spaces API. This avoids copying the actual cloud -credentials from Spaces to each control plane. - -This workflow allows you to configure the store connection only once at the -group level and automatically propagates to each control plane. Individual control -planes can use the store without exposure to the group-level configuration and -updates all child ClusterSecretStores when updated. - - -#### Azure Key Vault - - -:::important -While the underlying ESO API supports more auth methods, static credentials are currently the only supported auth method in Cloud Spaces. -::: - -##### Static credentials - -1. Use the Azure CLI to create a service principal and authentication file. -2. Create a service principal and save credentials in a file: -```json -{ - "appId": "myAppId", - "displayName": "myServicePrincipalName", - "password": "myServicePrincipalPassword", - "tenant": "myTentantId" -} -``` - -3. Store the credentials as a Kubernetes secret: -```shell -kubectl create secret \ - generic azure-secret-sp \ - -n default \ - --from-file=creds=./azure-credentials.json -``` - -4. Create a SharedSecretStore referencing these credentials: -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: azure-kv -spec: - provider: - azurekv: - tenantId: "" - vaultUrl: "" - authSecretRef: - clientId: - name: azure-secret-sp - key: ClientID - clientSecret: - name: azure-secret-sp - key: ClientSecret - controlPlaneSelector: - names: - - - namespaceSelector: - names: - - default -``` - -##### Workload Identity - - -You can also use Entra Workload Identity Federation to access Azure Key Vault -without needing to manage secrets. - -To use Entra Workload ID with AKS: - - -1. Deploy the Spaces software into a [workload identity-enabled AKS cluster][workload-identity-enabled-aks-cluster]. -2. Retrieve the OIDC issuer URL of the AKS cluster: -```ini -az aks show --name "" \ - --resource-group "" \ - --query "oidcIssuerProfile.issuerUrl" \ - --output tsv -``` - -3. Use the Azure CLI to make a managed identity: -```ini -az identity create \ - --name "" \ - --resource-group "" \ - --location "" \ - --subscription "" -``` - -4. Look up the managed identity's client ID: -```ini -az identity show \ - --resource-group "" \ - --name "" \ - --query 'clientId' \ - --output tsv -``` - -5. Update your Spaces deployment to annotate the SharedSecrets service account with the associated Entra application client ID from the previous step: -```ini -up space upgrade ... \ - --set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."azure\.workload\.identity/client-id"="" \ - --set-string controlPlanes.sharedSecrets.pod.customLabels."azure\.workload\.identity/use"="true" -``` - -6. Determine the Spaces-generated `controlPlaneID` of your control plane. When you deploy a `kind: controlplane` in a Space, the Spaces software deploys a set of pods in a new namespace following the format `mxp--system`. -```ini -kubectl get controlplane -o jsonpath='{.status.controlPlaneID}' -``` - -7. Create a federated identity credential. -```ini -FEDERATED_IDENTITY_CREDENTIAL_NAME= -USER_ASSIGNED_IDENTITY_NAME= -RESOURCE_GROUP= -AKS_OIDC_ISSUER= -CONTROLPLANE_ID= -az identity federated-credential create --name ${FEDERATED_IDENTITY_CREDENTIAL_NAME} --identity-name "${USER_ASSIGNED_IDENTITY_NAME}" --resource-group "${RESOURCE_GROUP}" --issuer "${AKS_OIDC_ISSUER}" --subject system:serviceaccount:"mxp-${CONTROLPLANE_ID}-system:external-secrets-controller" --audience api://AzureADTokenExchange -``` - -8. Assign the `Key Vault Secrets User` role to the user-assigned managed identity that you created earlier. This step gives the managed identity permission to read secrets from the key vault: -```ini -az role assignment create \ - --assignee-object-id "${IDENTITY_PRINCIPAL_ID}" \ - --role "Key Vault Secrets User" \ - --scope "${KEYVAULT_RESOURCE_ID}" \ - --assignee-principal-type ServicePrincipal -``` - -:::important -You must manually restart a workload's pod when you add the annotation to the running pod's service account. The Entra workload identity mutating admission webhook requires a restart to inject the necessary environment. -::: - -8. Create a `SharedSecretStore`. Replace `vaultURL` with the URL of your Azure Key Vault instance. Replace `identityId` with the client ID of the managed identity created earlier: -```yaml {copy-lines="all"} -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: azure-kv -spec: - provider: - azurekv: - authType: WorkloadIdentity - vaultUrl: "" - controlPlaneSelector: - names: - - - namespaceSelector: - names: - - default -``` - - - - -#### Google Cloud Secret Manager - - - -You can configure access to Google Cloud Secret Manager using static credentials or workload identity. Below are instructions for configuring either. See the [ESO provider API][eso-provider-api] for more information. - -:::important -While the underlying ESO API supports more auth methods, static credentials are currently the only supported auth method in Cloud Spaces. -::: - -##### Static credentials - -1. Use the [GCP CLI][gcp-cli] to create access credentials. -2. Save the output in a file called `gcp-credentials.json`. -3. Store the access credentials in a secret in the same namespace as the `SharedSecretStore`. - ```shell {label="kube-create-secret",copy-lines="all"} - kubectl create secret \ - generic gcpsm-secret \ - -n default \ - --from-file=creds=./gcp-credentials.json - ``` - -4. Create a `SharedSecretStore`, referencing the secret created earlier. Replace `projectID` with your GCP Project ID: -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: gcp-sm -spec: - provider: - gcpsm: - auth: - secretRef: - secretAccessKeySecretRef: - name: gcpsm-secret - key: creds - projectID: - controlPlaneSelector: - names: - - - namespaceSelector: - names: - - default -``` - -:::tip -The example above maps a Shared Secret Store into a single namespace of a single control plane. Read [control plane selection][control-plane-selection] and [namespace selection][namespace-selection] to learn how to map into one or more namespaces of one or more control planes. -::: - - -##### Workload identity with Service Accounts to IAM Roles - - -To configure, grant the `roles/iam.workloadIdentityUser` role to the Kubernetes -service account in the control plane namespace to impersonate the IAM service -account. - -1. Ensure you've deployed Spaces on a [Workload Identity Federation-enabled][workload-identity-federation-enabled] GKE cluster. -2. Determine the Spaces-generated `controlPlaneID` of your control plane. When you deploy a `kind: controlplane` in a Space, the Spaces software deploys a set of pods in a new namespace following the format `mxp--system`. -```ini -kubectl get controlplane -o jsonpath='{.status.controlPlaneID}' -``` - -3. Create a GCP IAM service account with the [GCP CLI][gcp-cli-1]: -```ini -gcloud iam service-accounts create \ - --project= -``` - -4. Grant the IAM service account the role to access GCP Secret Manager: -```ini -SA_NAME= -IAM_SA_PROJECT_ID= -gcloud projects add-iam-policy-binding IAM_SA_PROJECT_ID \ - --member "serviceAccount:SA_NAME@IAM_SA_PROJECT_ID.iam.gserviceaccount.com" \ - --role roles/secretmanager.secretAccessor -``` - -5. When you enable the Shared Secrets feature, a service account gets created in each control plane for the External Secrets Operator. Apply a [GCP IAM policy binding][gcp-iam-policy-binding] to associate this service account with the desired GCP IAM role. -```ini -PROJECT_ID= -PROJECT_NUMBER= -CONTROLPLANE_ID= -gcloud projects add-iam-policy-binding projects/${PROJECT_ID} \ - --role "roles/iam.workloadIdentityUser" \ - --member=principal://iam.googleapis.com/projects/${PROJECT_NUMBER}/locations/global/workloadIdentityPools/${PROJECT_ID}.svc.id.goog/subject/ns/mxp-${CONTROLPLANE_ID}-system/sa/external-secrets-controller -``` - -6. Update your Spaces deployment to annotate the SharedSecrets service account with GCP IAM service account's identifier: -```ini -up space upgrade ... \ - --set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."iam\.gke\.io/gcp-service-account"="" -``` - -7. Create a `SharedSecretStore`. Replace `projectID` with your GCP Project ID: -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: gcp-sm -spec: - provider: - gcpsm: - projectID: - controlPlaneSelector: - names: - - - namespaceSelector: - names: - - default -``` - -:::tip -The example above maps a Shared Secret Store into a single namespace of a single control plane. Read [control plane selection][control-plane-selection-1] and [namespace selection][namespace-selection-2] to learn how to map into one or more namespaces of one or more control planes. -::: - -### Manage your secret distribution - -After you create your SharedSecretStore, you can define which secrets to -distribute using SharedExternalSecret: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedExternalSecret -metadata: - name: database-credentials - namespace: default -spec: - # Select the same control planes as your SharedSecretStore - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production - - externalSecretSpec: - refreshInterval: 1h - secretStoreRef: - name: aws-secrets # References the SharedSecretStore name - kind: ClusterSecretStore - target: - name: db-credentials - data: - - secretKey: username - remoteRef: - key: prod/database/credentials - property: username - - secretKey: password - remoteRef: - key: prod/database/credentials - property: password -``` - -This configuration: - -* Pulls database credentials from your external secret provider -* Creates secrets in all production control planes -* Refreshes the secrets every hour -* Creates a secret called `db-credentials` in each control plane - -When you create a SharedExternalSecret at the group level, Upbound's system -creates a template for the corresponding ClusterExternalSecrets in each selected -control plane. - -The example below simulates the ClusterExternalSecret that Upbound creates: - -```yaml -# Inside each matching control plane: -apiVersion: external-secrets.io/v1beta1 -kind: ClusterExternalSecret -metadata: - name: database-credentials -spec: - refreshInterval: 1h - secretStoreRef: - name: aws-secrets - kind: ClusterSecretStore - data: - - secretKey: username - remoteRef: - key: prod/database/credentials - property: username -``` - -The hierarchy in this configuration is: - -1. SharedExternalSecret (group level) defines what secrets to distribute -2. ClusterExternalSecret (control plane level) manages the distribution within - each control plane - -3. Kubernetes Secrets (namespace level) are created in specified namespaces - - -#### Control plane selection - -To configure which control planes in a group you want to project a SecretStore into, use the `spec.controlPlaneSelector` field. You can either use `labelSelectors` or the `names` of a control plane directly. A control plane matches if any of the label selectors match. - -This example matches all control planes in the group that have `environment: production` as a label: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: my-secret-store -spec: - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production -``` - -You can use the more complex `matchExpressions` to match labels based on an expression. This example matches control planes that have label `environment: production` or `environment: staging`: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: my-secret-store -spec: - controlPlaneSelector: - labelSelectors: - - matchExpressions: - - { key: environment, operator: In, values: [production,staging] } -``` - -You can also specify the names of control planes directly: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: my-secret-store -spec: - controlPlaneSelector: - names: - - controlplane-dev - - controlplane-staging - - controlplane-prod -``` - - -#### Namespace selection - -To configure which namespaces **within each matched control plane** to project the secret store into, use `spec.namespaceSelector` field. The projected secret store only appears in the namespaces matching the provided selector. You can either use `labelSelectors` or the `names` of namespaces directly. A control plane matches if any of the label selectors match. - -**For all control planes matched by** `spec.controlPlaneSelector`, This example matches all namespaces in each selected control plane that have `team: team1` as a label: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: my-secret-store -spec: - namespaceSelector: - labelSelectors: - - matchLabels: - team: team1 -``` - -You can use the more complex `matchExpressions` to match labels based on an expression. This example matches namespaces that have label `team: team1` or `team: team2`: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: my-secret-store -spec: - namespaceSelector: - labelSelectors: - - matchExpressions: - - { key: team, operator: In, values: [team1,team2] } -``` - -You can also specify the names of namespaces directly: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: my-secret-store -spec: - namespaceSelector: - names: - - team1-namespace - - team2-namespace -``` - -## Configure secrets directly in a control plane - - -The above explains using group-scoped resources to project secrets into multiple control planes. You can also use ESO API types directly in a control plane as you would in standalone Crossplane or Kubernetes. - - -See the [ESO documentation][eso-documentation] for a full guide on using the API types. - -## Best practices - -When you configure secrets management in your Upbound environment, keep the -following best practices in mind: - -**Use consistent labeling schemes** across your control planes for predictable -and manageable secret distribution. - -**Organize your secrets** in your external provider using a hierarchical -structure that mirrors your control plane organization. - -**Set appropriate refresh intervals** based on your security requires and the -nature of the secrets. - -**Use namespace selection sparingly** to limit secret distribution to only the -namespaces that need them. - -**Use separate tokens for each environment.** Keep them in distinct -SharedSecretStores. Users could bypass SharedExternalSecret selectors by -creating ClusterExternalSecrets directly in control planes. This grants access to all -secrets available to that token. - -**Document your secret management architecture**, including which control planes -should receive which secrets. - -[control-plane-selection]: #control-plane-selection -[namespace-selection]: #namespace-selection -[control-plane-selection-1]: #control-plane-selection -[namespace-selection-2]: #namespace-selection - -[external-secrets-operator-eso]: https://external-secrets.io -[workload-identity-enabled-aks-cluster]: https://learn.microsoft.com/en-us/azure/aks/workload-identity-deploy-cluster -[eso-provider-api]: https://external-secrets.io/latest/provider/google-secrets-manager/ -[gcp-cli]: https://cloud.google.com/iam/docs/creating-managing-service-account-keys -[workload-identity-federation-enabled]: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#enable_on_clusters_and_node_pools -[gcp-cli-1]: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#kubernetes-sa-to-iam -[gcp-iam-policy-binding]: https://cloud.google.com/sdk/gcloud/reference/iam/service-accounts/add-iam-policy-binding -[eso-documentation]: https://external-secrets.io/latest/introduction/getting-started/ diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/_category_.json b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/_category_.json deleted file mode 100644 index 5bf23bb0a..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/_category_.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "label": "Self-Hosted Spaces", - "position": 2, - "collapsed": true, - "customProps": { - "plan": "business" - } - -} - - diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/administer-features.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/administer-features.md deleted file mode 100644 index ce878014e..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/administer-features.md +++ /dev/null @@ -1,121 +0,0 @@ ---- -title: Administer features -sidebar_position: 12 -description: Enable and disable features in Spaces ---- - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Feature flags and their availability may vary by version. - -For detailed feature availability across versions, see the . -::: - -This guide shows how to enable or disable features in your self-hosted Space. - -## Shared secrets - -**Status:** Preview - -This feature is enabled by default in Cloud Spaces. - -To enable this feature in a self-hosted Space, set -`features.alpha.sharedSecrets.enabled=true` when installing the Space: - -```bash -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - ... - --set "features.alpha.sharedSecrets.enabled=true" \ -``` - - -## Observability - -**Status:** GA -**Available from:** Spaces v1.13+ - -This feature is enabled by default in Cloud Spaces. - - - -To enable this feature in a self-hosted Space, set -`observability.enabled=true` (`features.alpha.observability.enabled=true` before `v1.14.0`) when installing the Space: - -```bash -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - ... - --set "observability.enabled=true" \ -``` - -The observability feature collects telemetry data from user-facing control -plane workloads like: - -* Crossplane -* Providers -* Functions - -Self-hosted Spaces users can add control plane system workloads such as the -`api-server`, `etcd` by setting the -`observability.collectors.includeSystemTelemetry` Helm flag to true. - -### Sensitive data - -To avoid exposing sensitive data in the `SharedTelemetryConfig` resource, use -Kubernetes secrets to store the sensitive data and reference the secret in the -`SharedTelemetryConfig` resource. - -Create the secret in the same namespace/group as the `SharedTelemetryConfig` -resource. The example below uses `kubectl create secret` to create a new secret: - -```bash -kubectl create secret generic sensitive -n \ - --from-literal=apiKey='YOUR_API_KEY' -``` - -Next, reference the secret in the `SharedTelemetryConfig` resource: - -```yaml -apiVersion: observability.spaces.upbound.io/v1alpha1 -kind: SharedTelemetryConfig -metadata: - name: newrelic -spec: - configPatchSecretRefs: - - name: sensitive - key: apiKey - path: exporters.otlphttp.headers.api-key - controlPlaneSelector: - labelSelectors: - - matchLabels: - org: foo - exporters: - otlphttp: - endpoint: https://otlp.nr-data.net - headers: - api-key: dummy # This value is replaced by the secret value, can be omitted - exportPipeline: - metrics: [otlphttp] - traces: [otlphttp] - logs: [otlphttp] -``` - -The `configPatchSecretRefs` field in the `spec` specifies the secret `name`, -`key`, and `path` values to inject the secret value in the -`SharedTelemetryConfig` resource. - -## Shared backups - -As of Spaces `v.12.0`, this feature is enabled by default. - -To disable in a self-hosted Space, pass the `features.alpha.sharedBackup.enabled=false` as a Helm chart value. -`--set "features.alpha.sharedBackup.enabled=false"` - -## Query API - -**Status:** Preview -The Query API is available in the Cloud Space offering and enabled by default. - -Query API is required for self-hosted deployments with connected Spaces. See the -related [documentation][documentation] -to enable this feature. - -[documentation]: /spaces/howtos/query-api/ diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/attach-detach.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/attach-detach.md deleted file mode 100644 index 1465921cf..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/attach-detach.md +++ /dev/null @@ -1,198 +0,0 @@ ---- -title: Connect or disconnect a Space -sidebar_position: 12 -description: Enable and connect self-hosted Spaces to the Upbound console ---- -:::info API Version Information -This guide applies to **Spaces v1.8.0 and later** (Self-Hosted only). Connection to Upbound console requires Query API and RBAC to be enabled. - -For version-specific features and requirements, see the . Query API setup details, see [Deploy Query API infrastructure](./query-api.md). -::: - -:::important -This feature is in preview. Starting in Spaces `v1.8.0` and later, you must -deploy and [enable the Query API][enable-the-query-api] and [enable Upbound -RBAC][enable-upbound-rbac] to connect a Space to Upbound. -::: - -[Upbound][upbound] allows you to connect self-hosted Spaces and enables a streamlined operations and debugging experience in your Console. - -## Usage - -### Connect - -Before you begin, make sure you have: - -- An existing Upbound [organization][organization] in Upbound SaaS. -- The `up` CLI installed and logged into your organization -- `kubectl` installed with the kubecontext of your self-hosted Space cluster. -- A `token.json` license, provided by your Upbound account representative. -- You enabled the [Query API][query-api] in the self-hosted Space. - -Create a new `UPBOUND_SPACE_NAME`. If you don't create a name, `up` automatically generates one for you: - -```ini -export UPBOUND_SPACE_NAME=your-self-hosted-space -``` - -#### With up CLI - -:::tip -The command tries to connect the Space to the org account context pointed at by your `up` CLI profile. Make sure you've logged into Upbound SaaS with `up login -a ` before trying to connect the Space. -::: - -Connect the Space to the Console: - -```bash -up space connect "${UPBOUND_SPACE_NAME}" -``` - -This command installs a Connect agent, creates a service account, and configures permissions in your Upbound cloud organization in the `upbound-system` namespace of your Space. - -#### With Helm - -Export your Upbound org account name to an environment variable called `UPBOUND_ORG_NAME`. You can see this value by running `up org list` after logging on to Upbound. - -```ini -export UPBOUND_ORG_NAME=your-org-name -``` - -Create a new robot token and export it to an environment variable called `UPBOUND_TOKEN`: - -```bash -up robot create "${UPBOUND_SPACE_NAME}" --description="Robot used for authenticating Space '${UPBOUND_SPACE_NAME}' with Upbound Connect" -export UPBOUND_TOKEN=$(up robot token create "$UPBOUND_SPACE_NAME" "$UPBOUND_SPACE_NAME" --file - | jq -r '.token') -``` - -:::note -Follow the [`jq` installation guide][jq-install] if your machine doesn't include -it by default. -::: - -Create a secret containing the robot token: - -```bash -kubectl create secret -n upbound-system generic connect-token --from-literal=token=${UPBOUND_TOKEN} -``` - -Specify your username and password for the helm OCI registry: - -```bash -jq -r .token $SPACES_TOKEN_PATH | helm registry login xpkg.upbound.io -u $(jq -r .accessId $SPACES_TOKEN_PATH) --password-stdin -``` - -In the same cluster where you installed the Spaces software, install the Upbound connect agent with your token secret. - -```bash -helm -n upbound-system upgrade --install agent \ - oci://xpkg.upbound.io/spaces-artifacts/agent \ - --version "0.0.0-441.g68777b9" \ - --set "image.repository=xpkg.upbound.io/spaces-artifacts/agent" \ - --set "registration.image.repository=xpkg.upbound.io/spaces-artifacts/register-init" \ - --set "imagePullSecrets[0].name=upbound-pull-secret" \ - --set "registration.enabled=true" \ - --set "space=${UPBOUND_SPACE_NAME}" \ - --set "organization=${UPBOUND_ORG_NAME}" \ - --set "tokenSecret=connect-token" \ - --wait -``` - - -#### View your Space in the Console - - -Go to the [Upbound Console][upbound-console], log in, and choose the newly connected Space from the Space selector dropdown. - -![A screenshot of the Upbound Console space selector dropdown](/img/attached-space.png) - -:::note -You can only connect a self-hosted Space to a single organization at a time. -::: - -### Disconnect - -#### With up CLI - -To disconnect a self-hosted Space or a deleted self-hosted Space, run the following command: - -```bash -up space disconnect "${UPBOUND_SPACE_NAME}" -``` - -If the Space still exists, this command uninstalls the Connect agent and deletes the associated service account and permissions. - -#### With Helm - -To disconnect a self-hosted Space or a deleted self-hosted Space, run the following command: - -```bash -helm delete -n upbound-system agent -``` - -Clean up the robot token you created for this self-hosted Space: - -```bash -up robot delete "${UPBOUND_SPACE_NAME}" --force -``` - -## Security model - -### Architecture - -![An architectural diagram of a self-hosted Space attached to Upbound](/img/console-attach-architecture.jpg) - -:::note -This diagram illustrates a self-hosted Space running in AWS connected to the global Upbound Console. The same model applies to a Space running in AKS, GKE, or other Kubernetes environments. -::: - -### Data path - -Upbound uses a Pub/Sub model over TLS to communicate between Upbound's global -console and your self-hosted Space. Self-hosted Spaces establishes a secure -connection with `connect.upbound.io`. `api.upbound.io`, and `auth.upbound.io` and subscribes to an -endpoint. - -:::important -Add `connect.upbound.io`, `api.upbound.io`, and `auth.upbound.io` to your organization's list of -allowed endpoints. -::: - -The -Upbound Console communicates to the Space through that endpoint. The data flow -is: - -1. Users sign in to the Upbound Console, redirecting to authenticate with an organization's configured Identity Provider via SSO. -2. Once authenticated, actions in the Console, like listing control planes or specific resource types from a control plane. These requests post as messages to the Upbound Connect service. -3. A user's self-hosted Space polls the Upbound Connect service periodically for new messages, verifies the authenticity of the message, and fulfills the request contained. -4. A user's self-hosted Space returns the results of the request to the Upbound Connect service and the Console renders the results in the user's browser session. - -**Upbound never stores data originated from a self-hosted Space.** The data is transient and only exposed in the user's browser session. The Console needs this data to render your resources and control planes in the UI. - -### Data transmitted - -Users interact with the Upbound Console to generate request queries to the Upbound Connect Service while exploring, managing, or debugging a self-hosted Space. These requests send data back to the user's browser session in the Console, including: - -* Metadata for the Space -* Metadata for control planes in the state -* Configuration manifests for various resource types within your Space: Crossplane managed resources, composite resources, composite resource claims, Upbound shared secrets, Upbound shared backups, Crossplane providers, ProviderConfigs, Configurations, and Crossplane Composite Functions. - -:::important -This data only concerns resource configuration. The data _inside_ the managed -resource in your Space isn't visible at any point. -::: - -**Upbound can't see your data.** Upbound doesn't have access to session-based data rendered for your users in the Upbound Console. Upbound has no information about your self-hosted Space, other than that you've connected a self-hosted Space. - -### Threat vectors - -Only users with editor or administrative permissions can make changes using the Console like creating or deleting control planes or groups. - - -[enable-the-query-api]: /spaces/howtos/self-hosted/query-api -[enable-upbound-rbac]: /manuals/platform/concepts/authorization/upbound-rbac -[upbound]: /manuals/console/upbound-console -[organization]: /manuals/platform/concepts/identity-management/organizations -[query-api]: /spaces/howtos/self-hosted/query-api -[jq-install]: https://jqlang.org/download/ - -[upbound-console]: https://console.upbound.io diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/billing.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/billing.md deleted file mode 100644 index 145ff9f03..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/billing.md +++ /dev/null @@ -1,307 +0,0 @@ ---- -title: Self-Hosted Space Billing -sidebar_position: 50 -description: A guide for how billing works in an Upbound Space ---- - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Billing models may evolve between versions; see Capacity Licensing for alternative models. - -For version-specific features and capacity-based licensing details, see the . reference specifications, see [Capacity Licensing](/spaces/howtos/self-hosted/capacity-licensing). -::: - -Spaces are a self-hosting feature of Upbound's [flagship product][flagship-product] for platform teams to deploy control planes in their self-managed environments. You can install Spaces into any Kubernetes cluster in your own cloud account, on-premises data center, or on the edge. The pricing usage-based and requires an Upbound account and subscription. The billing unit is a `Loop`. - - -:::info -This guide describes the traditional usage-based billing model using object storage. disconnected or air-gapped environments, consider [Capacity Licensing](/spaces/howtos/self-hosted/capacity-licensing), which provides a simpler fixed-capacity model with local usage tracking. -::: - -## Billing details - -Spaces **aren't connected** to Upbound's global service. To enable proper billing, the Spaces software ships a controller whose responsibility is to collect billing data from your Spaces deployment. The collection and storage of your billing data happens expressly locally within your environment; no data is automatically emitted back to Upbound's global service. This data gets written to object storage of your choice. AWS, Azure, and GCP are currently supported. The Spaces software exports billing usage software every ~15 seconds. - -Spaces customers must periodically provide the billing data to Upbound. Contact your Upbound sales representative to learn more. - - - -## AWS S3 - - - -Configure billing to write to an S3 bucket by providing the following values at install-time. Create an S3 bucket if you don't already have one. - -### IAM policy - -You must create an IAM policy and attach it to the IAM user (for static credentials) or IAM role (for assumed -roles). - -The policy example below enables the necessary S3 permissions: - -```json -{ - "Sid":"EnableS3Permissions", - "Effect":"Allow", - "Action": [ - "s3:PutObject", - "s3:GetObject", - "s3:ListBucket", - "s3:DeleteObject" - ], - "Resource": [ - "arn:aws:s3:::your-bucket-name/*", - "arn:aws:s3:::your-bucket-name" - ] -}, -{ - "Sid": "ListBuckets", - "Effect": "Allow", - "Action": "s3:ListAllMyBuckets", - "Resource": "*" -} -``` - -### Authentication with static credentials - -In your Spaces install cluster, create a secret in the `upbound-system` -namespace. This secret must contain keys `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`. - -```bash -kubectl create secret generic billing-credentials -n upbound-system \ - --from-literal=AWS_ACCESS_KEY_ID= \ - --from-literal=AWS_SECRET_ACCESS_KEY= -``` - -Install the Space software, providing the billing details to the other required values. - - - - - - -```bash {hl_lines="2-6"} -helm -n upbound-system upgrade --install spaces ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=aws" \ - --set "billing.storage.aws.region=" \ - --set "billing.storage.aws.bucket=" \ - --set "billing.storage.secretRef.name=billing-credentials" - ... -``` - - - - - -```bash {hl_lines="2-6"} -up space init ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=aws" \ - --set "billing.storage.aws.region=" \ - --set "billing.storage.aws.bucket=" \ - --set "billing.storage.secretRef.name=billing-credentials" - ... -``` - - - - - - - -### Authentication with an IAM role - - -To use short-lived credentials with an assumed IAM role, create an IAM role with -established trust to the `vector`-serviceaccount in all `mxp-*-system` -namespaces. - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "arn:aws:iam::12345678912:oidc-provider/oidc.eks.eu-west-2.amazonaws.com/id/YOUROIDCPROVIDERID" - }, - "Action": "sts:AssumeRoleWithWebIdentity", - "Condition": { - "StringLike": { - "oidc.eks.eu-west-2.amazonaws.com/id/YOUROIDCPROVIDERID:sub": "system:serviceaccount:mxp-*-system:vector" - } - } - } - ] -} -``` - -For more information about workload identities, review the [Workload-identity -Configuration documentation][workload-identity-configuration-documentation] - - - - - - -```bash {hl_lines="2-7"} -helm -n upbound-system upgrade --install spaces ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=aws" \ - --set "billing.storage.aws.region=" \ - --set "billing.storage.aws.bucket=" \ - --set "billing.storage.secretRef.name=" \ - --set "controlPlanes.vector.serviceAccount.customAnnotations[eks.amazonaws.com/role-arn]=" - ... -``` - - - - - -```bash {hl_lines="2-7"} -up space init ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=aws" \ - --set "billing.storage.aws.region=" \ - --set "billing.storage.aws.bucket=" \ - --set "billing.storage.secretRef.name=" \ - --set "controlPlanes.vector.serviceAccount.customAnnotations[eks.amazonaws.com/role-arn]=" - ... -``` - - - - - - -*Note*: You must set `billing.storage.secretRef.name` to an empty string when using an assumed role. - - -## Azure blob storage - -Configure billing to write to a blob in Azure by providing the following values at install-time. Create a storage account and container if you don't already have one. - -Then, on the cluster where you installed the Spaces software, create a secret in `upbound-system`. This secret must contain keys `AZURE_TENANT_ID`, `AZURE_CLIENT_ID`, and `AZURE_CLIENT_SECRET`. Make sure to replace the values with details generated from your Azure account. - -```bash -kubectl create secret generic billing-credentials -n upbound-system \ - --from-literal=AZURE_TENANT_ID= \ - --from-literal=AZURE_CLIENT_ID= \ - --from-literal=AZURE_CLIENT_SECRET= -``` - -Install the Space software, providing the billing details to the other required values. - - - - - - -```bash {hl_lines="2-6"} -helm -n upbound-system upgrade --install spaces ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=azure" \ - --set "billing.storage.azure.storageAccount=" \ - --set "billing.storage.azure.container=" \ - --set "billing.storage.secretRef.name=billing-credentials" - ... -``` - - - - - -```bash {hl_lines="2-6"} -up space init ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=azure" \ - --set "billing.storage.azure.storageAccount=" \ - --set "billing.storage.azure.container=" \ - --set "billing.storage.secretRef.name=billing-credentials" - ... -``` - - - - - - - -## GCP Cloud Storage Buckets - - -Configure billing to write to a Cloud Storage bucket in GCP by providing the following values at install-time. Create a bucket if you don't already have one. - -Then, on the cluster where you installed the Spaces software, create a secret in `upbound-system`. This secret must contain the key `google_application_credentials`. Make sure to replace the value with a GCP service account key JSON generated from your GCP account. - -```bash -kubectl create secret generic billing-credentials -n upbound-system \ - --from-literal=google_application_credentials= -``` - -Install the Space software, providing the billing details to the other required values. - - - - - - -```bash {hl_lines="2-5"} -helm -n upbound-system upgrade --install spaces ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=gcp" \ - --set "billing.storage.gcp.bucket=" \ - --set "billing.storage.secretRef.name=billing-credentials" - ... -``` - - - - - -```bash {hl_lines="2-5"} -up space init ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=gcp" \ - --set "billing.storage.gcp.bucket=" \ - --set "billing.storage.secretRef.name=billing-credentials" - ... -``` - - - - - -## Export billing data to send to Upbound - -To prepare the billing data to send to Upbound, do the following: - -Ensure the current context of your kubeconfig points at the Spaces cluster. Then, run the [export][export] command. - - -:::important -Your current CLI must have read access to the bucket to run this command. -::: - - -The example below exports billing data stored in AWS: - -```bash -up space billing export --provider=aws \ - --bucket=spaces-billing-bucket \ - --account=your-upbound-org \ - --billing-month=2024-07 \ - --force-incomplete -``` - -The command creates a billing report that's zipped up in your current working directory. Send the output to your Upbound sales representative. - - -You can find full instructions and command options in the up [CLI reference][cli-reference] docs. - - -[export]: /reference/cli-reference -[cli-reference]: /reference/cli-reference -[flagship-product]: https://www.upbound.io/platform -[workload-identity-configuration-documentation]: https://docs.upbound.io/operate/accounts/authentication/oidc-configuration diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/capacity-licensing.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/capacity-licensing.md deleted file mode 100644 index a1dc6c101..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/capacity-licensing.md +++ /dev/null @@ -1,591 +0,0 @@ ---- -title: Capacity Licensing -sidebar_position: 60 -description: A guide for capacity-based licensing in self-hosted Spaces -plan: "enterprise" ---- - - - - - -This guide explains how to configure and monitor capacity-based licensing in -self-hosted Upbound Spaces. Capacity licensing provides a simplified billing -model for disconnected or air-gapped environments where automated usage -reporting isn't possible. - -:::info -Spaces `v1.15` and later support Capacity Licensing as an -alternative to the traditional usage-based billing model described in the -[Self-Hosted Space Billing][space-billing] guide. -::: - -## Overview - -Capacity licensing allows organizations to purchase a fixed capacity of -resources upfront. The Spaces software tracks usage locally and provides -visibility into consumption against your purchased capacity, all without -requiring external connectivity to Upbound's services. - -### Key concepts - -- **Resource Hours**: The primary billing unit representing all resources - managed by Crossplane over time. This includes managed resources, - composites (XRs), claims (XRCs), and all composed resources - essentially - everything Crossplane manages. The system aggregates resource counts over each - hour using trapezoidal integration to accurately account for changes in - resource count throughout the hour. -- **Operations**: The number of Operations invoked by Crossplane. -- **License Capacity**: The total amount of resource hours and operations included in your license. -- **Usage Tracking**: Continuous monitoring of consumption with real-time utilization percentages. - -### How it works - -1. Upbound provides you with a license file containing your purchased capacity -2. You configure a `SpaceLicense` in your Spaces cluster -3. The metering system automatically: - - Collects measurements from all control planes every minute - - Aggregates usage data into hourly intervals - - Stores usage data in a local PostgreSQL database - - Updates the `SpaceLicense` status with current consumption - -## Prerequisites - -### PostgreSQL database - -Capacity licensing requires a PostgreSQL database to store usage measurements. You can use: - -- An existing PostgreSQL instance -- A managed PostgreSQL service (AWS RDS, Azure Database, Google Cloud SQL) -- A PostgreSQL instance deployed in your cluster - -The database must be: - -- Accessible from the Spaces cluster -- Configured with a dedicated database and credentials - -#### Example: Deploy PostgreSQL with CloudNativePG - -If you don't have an existing PostgreSQL instance, you can deploy one in your -cluster using [CloudNativePG] (CNPG). CNPG is a Kubernetes operator that -manages PostgreSQL clusters. - -1. Install the CloudNativePG operator: - -```bash -kubectl apply -f https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.24/releases/cnpg-1.24.1.yaml -``` - -2. Create a PostgreSQL cluster for metering: - -```yaml -apiVersion: postgresql.cnpg.io/v1 -kind: Cluster -metadata: - name: metering-postgres - namespace: upbound-system -spec: - instances: 1 - imageName: ghcr.io/cloudnative-pg/postgresql:16 - bootstrap: - initdb: - database: metering - owner: metering - postInitApplicationSQL: - - ALTER ROLE "metering" CREATEROLE; - storage: - size: 5Gi - # Optional: Configure resources for production use - # resources: - # requests: - # memory: "512Mi" - # cpu: "500m" - # limits: - # memory: "1Gi" - # cpu: "1000m" ---- -apiVersion: v1 -kind: Secret -metadata: - name: metering-postgres-app - namespace: upbound-system - labels: - cnpg.io/reload: "true" -stringData: - username: metering - password: "your-secure-password-here" -type: kubernetes.io/basic-auth -``` - -```bash -kubectl apply -f metering-postgres.yaml -``` - -3. Wait for the cluster to be ready: - -```bash -kubectl wait --for=condition=ready cluster/metering-postgres -n upbound-system --timeout=5m -``` - -4. You can access the PostgreSQL cluster at `metering-postgres-rw.upbound-system.svc.cluster.local:5432`. - -:::tip -For production deployments, consider: -- Increasing `instances` to 3 for high availability -- Configuring [backups] to object storage -- Setting appropriate resource requests and limits -- Using a dedicated storage class with good I/O performance -::: - -### License file - -Contact your Upbound sales representative to obtain a license file for your organization. The license file contains: -- Your unique license ID -- Purchased capacity (resource hours and operations) -- License validity period -- Any usage restrictions (such as cluster UUID pinning) - -## Configuration - -### Step 1: Create database credentials secret - -Create a Kubernetes secret containing your PostgreSQL password using the pgpass format: - -```bash -# Create a pgpass file with format: hostname:port:database:username:password -# Note: The database name and username must be 'metering' -# For CNPG clusters, use the read-write service endpoint: -rw..svc.cluster.local -echo "metering-postgres-rw.upbound-system.svc.cluster.local:5432:metering:metering:your-secure-password-here" > pgpass - -# Create the secret -kubectl create secret generic metering-postgres-credentials \ - -n upbound-system \ - --from-file=pgpass=pgpass - -# Clean up the pgpass file -rm pgpass -``` - -The secret must contain a single key: -- **`pgpass`**: PostgreSQL password file in the format `hostname:port:metering:metering:password` - -:::note -The database name and username are fixed as `metering`. Ensure your PostgreSQL instance has a database named `metering` with a user `metering` that has appropriate permissions. - -If you deployed PostgreSQL using CNPG as shown in the example above, the password should match what you set in the `metering-postgres-app` secret. -::: - -:::tip -For production environments, consider using external secret management solutions: -- [External Secrets Operator][eso] -- Cloud-specific secret managers (AWS Secrets Manager, Azure Key Vault, GCP Secret Manager) -::: - -### Step 2: Enable metering in Spaces - -Enable the metering feature when installing or upgrading Spaces: - - - - - -```bash {hl_lines="2-7"} -helm -n upbound-system upgrade --install spaces ... \ - --set "metering.enabled=true" \ - --set "metering.storage.postgres.connection.url=metering-postgres-rw.upbound-system.svc.cluster.local:5432" \ - --set "metering.storage.postgres.connection.credentials.secret.name=metering-postgres-credentials" \ - --set "metering.interval=1m" \ - --set "metering.workerCount=10" \ - --set "metering.aggregationInterval=1h" \ - --set "metering.measurementRetentionDays=30" - ... -``` - - - - - -```bash {hl_lines="2-7"} -up space init ... \ - --set "metering.enabled=true" \ - --set "metering.storage.postgres.connection.url=metering-postgres-rw.upbound-system.svc.cluster.local:5432" \ - --set "metering.storage.postgres.connection.credentials.secret.name=metering-postgres-credentials" \ - --set "metering.interval=1m" \ - --set "metering.workerCount=10" \ - --set "metering.aggregationInterval=1h" \ - --set "metering.measurementRetentionDays=30" - ... -``` - - - - - -#### Configuration options - -| Option | Default | Description | -|--------|---------|-------------| -| `metering.enabled` | `false` | Enable the metering feature | -| `metering.storage.postgres.connection.url` | - | PostgreSQL host and port (format: `host:port`, required) | -| `metering.storage.postgres.connection.credentials.secret.name` | - | Name of the secret containing PostgreSQL credentials (required) | -| `metering.storage.postgres.connection.sslmode` | `require` | SSL mode for PostgreSQL connection (`disable`, `allow`, `prefer`, `require`, `verify-ca`, `verify-full`) | -| `metering.storage.postgres.connection.ca.name` | - | Name of the secret containing CA certificate for TLS connections (optional) | -| `metering.interval` | `1m` | How often to collect measurements from control planes | -| `metering.workerCount` | `10` | Number of parallel workers for measurement collection | -| `metering.aggregationInterval` | `1h` | How often to aggregate measurements into hourly usage data | -| `metering.measurementRetentionDays` | `30` | Days to retain raw measurements (0 = indefinite) | - - -#### Database sizing and retention - -The metering system uses two PostgreSQL tables to track usage: - -**Raw measurements table** (`measurements`): -- Stores point-in-time snapshots collected every measurement interval (default: 1 minute) -- One row per control plane per interval -- Affected by the `measurementRetentionDays` setting -- Used for detailed auditing and troubleshooting - -**Aggregated usage table** (`hourly_usage`): -- Stores hourly aggregated resource hours and operations per license -- One row per hour per license -- Never deleted (required for accurate license tracking) -- Grows much slower than raw measurements - -##### Storage sizing guidelines - -Estimate your PostgreSQL storage needs based on these factors: - - -| Deployment Size | Control Planes | Measurement Interval | Retention Days | Raw Measurements | Indexes & Overhead | Total Storage | -|----------------|----------------|---------------------|----------------|------------------|-------------------|---------------| -| Small | 10 | 1m | 30 | ~85 MB | ~40 MB | **~125 MB** | -| Medium | 50 | 1m | 30 | ~430 MB | ~215 MB | **~645 MB** | -| Large | 200 | 1m | 30 | ~1.7 GB | ~850 MB | **~2.5 GB** | -| Large (90-day retention) | 200 | 1m | 90 | ~5.2 GB | ~2.6 GB | **~7.8 GB** | - -The aggregated hourly usage table adds minimal overhead (~50 KB per year per license). - -**Formula for custom calculations**: -``` -Daily measurements per control plane = (24 * 60) / interval_minutes -Total rows = control_planes × daily_measurements × retention_days -Storage (MB) ≈ (total_rows × 200 bytes) / 1,048,576 × 1.5 (with indexes) -``` - -##### Retention behavior - -The `measurementRetentionDays` setting controls retention of raw measurement data: - -- **Default: 30 days** - Balances audit capabilities with storage efficiency -- **Set to 0**: Disables cleanup, retains all raw measurements indefinitely -- **Cleanup runs**: Every aggregation interval (default: hourly) -- **What's kept forever**: Aggregated hourly usage data (needed for license tracking) -- **What's cleaned up**: Raw point-in-time measurements older than retention period - -**Recommendations**: -- **30 days**: For most troubleshooting and short-term auditing -- **60 to 90 days**: For environments requiring extended audit trails -- **Unlimited (0)**: Only for environments with ample storage or specific compliance requirements - -:::note -Increasing retention period linearly increases storage requirements for raw measurements. The aggregated hourly data is always retained regardless of this setting. -::: - -### Step 3: Apply your license - -Use the `up` CLI to apply your license file: - -```bash -up space license apply /path/to/license.json -``` - -This command automatically: -- Creates a secret containing your license file in the `upbound-system` namespace -- Creates the `SpaceLicense` resource configured to use that secret - -:::tip -You can specify a different namespace for the license secret using the `--namespace` flag: -```bash -up space license apply /path/to/license.json --namespace my-namespace -``` -::: - -
-Alternative: Manual kubectl approach - -If you prefer not to use the `up` CLI, you can manually create the resources: - -1. Create the license secret: - -```bash -kubectl create secret generic space-license \ - -n upbound-system \ - --from-file=license.json=/path/to/license.json -``` - -2. Create the SpaceLicense resource: - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceLicense -metadata: - name: space -spec: - secretRef: - name: space-license - namespace: upbound-system - key: license.json -``` - -```bash -kubectl apply -f spacelicense.yaml -``` - -:::important -You **must** name the `SpaceLicense` resource `space`. This resource is a singleton and only one can exist in the cluster. -::: - -
- -## Monitoring usage - -### Check license status - -Use the `up` CLI to view your license details and current usage: - -```bash -up space license show -``` - -Example output: - -``` -Spaces License Status: Valid (License is valid) - -Created: 2024-01-01T00:00:00Z -Expires: 2025-01-01T00:00:00Z - -Plan: enterprise - -Resource Hour Limit: 1000000 -Operation Limit: 500000 - -Enabled Features: -- spaces -- query-api -- backup-restore -``` - -The output shows: -- License validity status and any validation messages -- Creation and expiration dates -- Your commercial plan tier -- Capacity limits for resource hours and operations -- Enabled features in your license -- Any restrictions (such as cluster UUID pinning) - -
-Alternative: View detailed status with kubectl - -For detailed information including usage statistics, use kubectl: - -```bash -kubectl get spacelicense space -o yaml -``` - -Example output showing usage data: - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceLicense -metadata: - name: space -spec: - secretRef: - name: space-license - namespace: upbound-system -status: - conditions: - - type: LicenseValid - status: "True" - reason: Valid - message: "License is valid" - id: "lic_abc123xyz" - plan: "enterprise" - capacity: - resourceHours: 1000000 - operations: 500000 - usage: - resourceHours: 245680 - operations: 12543 - resourceHoursUtilization: "24.57%" - operationsUtilization: "2.51%" - firstMeasurement: "2024-01-15T10:00:00Z" - lastMeasurement: "2024-02-10T14:30:00Z" - createdAt: "2024-01-01T00:00:00Z" - expiresAt: "2025-01-01T00:00:00Z" - enabledFeatures: - - "spaces" - - "query-api" - - "backup-restore" -``` - -
- -### Understanding the status fields - -| Field | Description | -|-------|-------------| -| `status.id` | Unique license identifier | -| `status.plan` | Your commercial plan (community, standard, enterprise) | -| `status.capacity` | Total capacity included in your license | -| `status.usage.resourceHours` | Total resource hours consumed | -| `status.usage.operations` | Total operations performed | -| `status.usage.resourceHoursUtilization` | Percentage of resource hours capacity used | -| `status.usage.operationsUtilization` | Percentage of operations capacity used | -| `status.usage.firstMeasurement` | When usage tracking began | -| `status.usage.lastMeasurement` | Most recent usage update | -| `status.expiresAt` | License expiration date | - -### Monitor with kubectl - -Watch your license utilization in real-time: - -```bash -kubectl get spacelicense space -w -``` - -Short output format: - -``` -NAME PLAN VALID REASON AGE -space enterprise True Valid 45d -``` - -## Managing licenses - -### Updating your license - -To update your license with a new license file (for example, when renewing or upgrading capacity), apply the new license: - -```bash -up space license apply /path/to/new-license.json -``` - -This command replaces the existing license secret and updates the SpaceLicense resource. - -### Removing a license - -To remove a license: - -```bash -up space license remove -``` - -This command: -- Prompts for confirmation before proceeding -- Removes the license secret - -To skip the confirmation prompt, use the `--force` flag: - -```bash -up space license remove --force -``` - -## Troubleshooting - -### License not updating - -If the license status doesn't update with usage data: - -1. **Check metering controller logs**: - ```bash - kubectl logs -n upbound-system deployment/spaces-controller -c metering - ``` - -2**Check if the system captures your measurements**: - - ```bash - # Connect to PostgreSQL and query the measurements table - kubectl exec -it -- psql -U -d \ - -c "SELECT COUNT(*) FROM measurements WHERE timestamp > NOW() - INTERVAL '1 hour';" - ``` - -### High utilization warnings - -If you're approaching your capacity limits: - -1. **Review resource usage** by control plane to identify high consumers -2. **Contact your Upbound sales representative** to discuss capacity expansion -3. **Optimize managed resources** by cleaning up unused resources - -### License validation failures - -If your license shows as invalid: - -1. **Check expiration date**: `kubectl get spacelicense space -o jsonpath='{.status.expiresAt}'` -2. **Verify license file integrity**: Ensure the secret contains valid JSON -3. **Check for cluster UUID restrictions**: Upbound pins some licenses to - specific clusters -4. **Review controller logs** for detailed error messages - -## Differences from traditional billing - -### Capacity licensing - -- ✅ Works in disconnected environments -- ✅ Provides real-time usage visibility -- ✅ No manual data export required -- ✅ Requires PostgreSQL database -- ✅ Fixed capacity model - -### Traditional billing (object storage) - - -- ❌ Requires periodic manual export -- ❌ Delayed visibility into usage -- ✅ Works with S3/Azure Blob/GCS -- ❌ Requires cloud storage access -- ✅ Pay-as-you-go model - -## Best practices - -### Database management - -1. **Regular backups**: Back up your metering database regularly to preserve usage history -2. **Monitor database size**: Set appropriate retention periods to manage storage growth -3. **Use managed databases**: Consider managed PostgreSQL services for production -4. **Connection pooling**: Use connection pooling for better performance at scale - -### License management - -1. **Monitor utilization**: Set up alerts before reaching 80% capacity -2. **Plan renewals early**: Start renewal discussions 60 days before expiration -3. **Track grace periods**: Note the `gracePeriodEndsAt` date for planning -4. **Secure license files**: Treat license files as sensitive credentials - -### Operational monitoring - -1. **Set up dashboards**: Create Grafana dashboards for usage trends -2. **Enable alerting**: Configure alerts for high utilization and expiration -3. **Regular audits**: Periodically review usage patterns across control planes -4. **Capacity planning**: Use historical data to predict future capacity needs - -## Next steps - -- Learn about [Observability] to monitor your Spaces deployment -- Explore [Backup and Restore][backup-restore] to protect your control plane data -- Review [Self-Hosted Space Billing][space-billing] for the traditional billing model -- Contact [Upbound Sales][sales] to discuss capacity licensing options - - -[space-billing]: /spaces/howtos/self-hosted/billing -[CloudNativePG]: https://cloudnative-pg.io/ -[backups]: https://cloudnative-pg.io/documentation/current/backup_recovery/ -[backup-restore]: /spaces/howtos/backup-and-restore -[sales]: https://www.upbound.io/contact -[eso]: https://external-secrets.io/ -[Observability]: /spaces/howtos/observability - - diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/certs.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/certs.md deleted file mode 100644 index e517c250e..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/certs.md +++ /dev/null @@ -1,274 +0,0 @@ ---- -title: Istio Ingress Gateway With Custom Certificates -sidebar_position: 20 -description: Install self hosted spaces using istio ingress gateway in a Kind cluster ---- - -:::important -Prerequisites - -- Spaces Token available in a file -- `docker login xpkg.upbound.io -u -p ` -- [`istioctl`][istioctl] installation -- `jq` installation -::: - -This document describes the installation of a self hosted space on an example `kind` -cluster along with Istio Ingress Gateway and certificates. The service mesh and certificates -installation is transferable to self hosted spaces in arbitrary clouds. - -## Create a kind cluster - -```shell -cat < -## Install Istio - - - -:::important -This is an example and not recommended for use in production. -::: - - -1. Create the `istio-values.yaml` file - -```shell -cat > istio-values.yaml << 'EOF' -apiVersion: install.istio.io/v1alpha1 -kind: IstioOperator -spec: - hub: gcr.io/istio-release - components: - ingressGateways: - - enabled: true - name: istio-ingressgateway - k8s: - nodeSelector: - ingress-ready: "true" - overlays: - - apiVersion: apps/v1 - kind: Deployment - name: istio-ingressgateway - patches: - - path: spec.template.spec.containers.[name:istio-proxy].ports - value: - - containerPort: 8080 - hostPort: 80 - - containerPort: 8443 - hostPort: 443 -EOF -``` - -2. Install istio via `istioctl` - -```shell -istioctl install -f istio-values.yaml -``` - -## Create a self-signed Certificate via cert-manager - -:::important -This Certificate manifest creates a self-signed certificate for a proof of concept -environment and isn't recommended for production use cases. -::: - -1. Create the upbound-system namespace - -```shell -kubectl create namespace upbound-system -``` - -2. Create a self-signed certificate - -```shell -cat < -## Create an Istio Gateway and VirtualService - - - - -Configure an Istio Gateway and VirtualService to use TLS passthrough. - - -```shell -cat < spaces-values.yaml << 'EOF' -# Configure spaces-router to use the TLS secret created by cert-manager. -externalTLS: - tlsSecret: - name: example-tls-secret - caBundleSecret: - name: example-tls-secret - key: ca.crt -ingress: - provision: false - # Allow Istio Ingress Gateway to communicate to the spaces-router - namespaceLabels: - kubernetes.io/metadata.name: istio-system - podLabels: - app: istio-ingressgateway - istio: ingressgateway -EOF -``` - -2. Set the required environment variables - -```shell -# Update these according to your account/token file -export SPACES_TOKEN_PATH= -export UPBOUND_ACCOUNT= -# Replace SPACES_ROUTER_HOST with your Spaces ingress hostname -export SPACES_ROUTER_HOST="proxy.upbound-127.0.0.1.nip.io" -export SPACES_VERSION="1.14.1" -``` - -3. Create an image pull secret for Spaces - -```shell -kubectl -n upbound-system create secret docker-registry upbound-pull-secret \ - --docker-server=https://xpkg.upbound.io \ - --docker-username="$(jq -r .accessId $SPACES_TOKEN_PATH)" \ - --docker-password="$(jq -r .token $SPACES_TOKEN_PATH)" -``` - -4. Install the Spaces helm chart - -```shell -# Login to xpkg.upbound.io -jq -r .token $SPACES_TOKEN_PATH | helm registry login xpkg.upbound.io -u $(jq -r .accessId $SPACES_TOKEN_PATH) --password-stdin - -# Install spaces helm chart -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "${SPACES_VERSION}" \ - --set "ingress.host=${SPACES_ROUTER_HOST}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "authentication.hubIdentities=true" \ - --set "authorization.hubRBAC=true" \ - --wait -f spaces-values.yaml -``` - -## Validate the installation - -Successful access of the `up` command to interact with your self hosted space validates the -certificate installation. - -- `up ctx .` - -You can also issue control plane creation, list and deletion commands. - -- `up ctp create cert-test` -- `up ctp list` -- `up ctx disconnected/kind-kind/default/cert-test && kubectl get namespace` -- `up ctp delete cert-test` - -:::note -If `up` can't connect to your control plane, follow [this guide to create a new profile][up-profile]. -::: - -## Troubleshooting - -Examine your certificate with `openssl`: - -```shell -openssl s_client -connect proxy.upbound-127.0.0.1.nip.io:443 -showcerts -``` - -[istioctl]: https://istio.io/latest/docs/ops/diagnostic-tools/istioctl/ -[up-profile]: /manuals/cli/howtos/profile-config/ diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/configure-ha.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/configure-ha.md deleted file mode 100644 index ddf36c55e..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/configure-ha.md +++ /dev/null @@ -1,450 +0,0 @@ ---- -title: Production Scaling and High Availability -description: Configure your Self-Hosted Space for production -sidebar_position: 5 ---- - - - -This guide explains how to configure an existing Upbound Space deployment for -production operation at scale. - -Use this guide when you're ready to deploy production scaling, high availability, -and monitoring in your Space. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. - -For API specifications on ControlPlane resources and configurations, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . -::: - -## Prerequisites - -Before you begin scaling your Spaces deployment, make sure you have: - - -* A working Space deployment -* Cluster administrator access -* An understanding of load patterns and growth in your organization -* A familiarity with node affinity, tainting, and Horizontal Pod Autoscaling - (HPA) - - -## Production scaling strategy - - -In this guide, you will: - - - -* Create dedicated node pools for different component types -* Configure high-availability to ensure there are no single points of failure -* Set dynamic scaling for variable workloads -* Optimize your storage and component operations -* Monitor your deployment health and performance - -## Spaces architecture - -The basic Spaces workflow follows the pattern below: - - -![Spaces workflow][spaces-workflow] - -## Node architecture - -You can mitigate resource contention and improve reliability by separating system -components into dedicated node pools. - -### `etcd` dedicated nodes - -`etcd` performance directly impacts your entire Space, so isolate it for -consistent performance. - -1. Create a dedicated `etcd` node pool - - **Requirements:** - - **Minimum**: 3 nodes for HA - - **Instance type**: General purpose with high network throughput/low latency - - **Storage**: High performance storage (`etcd` is I/O sensitive) - -2. Taint `etcd` nodes to reserve them - - ```bash - kubectl taint nodes target=etcd:NoSchedule - ``` - -3. Configure `etcd` storage - - `etcd` is sensitive to storage I/O performance. Review the [`etcd` scaling - documentation][scaling] - for specific storage guidance. - -### API server dedicated nodes - -API servers handle all control plane requests and should run on dedicated -infrastructure. - -1. Create dedicated API server nodes - - **Requirements:** - - **Minimum**: 2 nodes for HA - - **Instance type**: Compute-optimized, memory-optimized, or general-purpose - - **Scaling**: Scale vertically based on API server load patterns - -2. Taint API server nodes - - ```bash - kubectl taint nodes target=apiserver:NoSchedule - ``` - -### Configure cluster autoscaling - -Enable cluster autoscaling for all node pools. - -For AWS EKS clusters, Upbound recommends using [`Karpenter`][karpenter] for -improved bin-packing and instance type selection. - -For GCP GKE clusters, follow the [GKE autoscaling][gke-autoscaling] guide. - -For Azure AKS clusters, follow the [AKS autoscaling][aks-autoscaling] guide. - - -## Configure high availability - -Ensure control plane components can survive node and zone failures. - -### Enable high availability mode - -1. Configure control planes for high availability - - ```yaml - controlPlanes: - ha: - enabled: true - ``` - - This configures control plane pods to run with multiple replicas and - associated pod disruption budgets. - -### Configure component distribution - -1. Set up API server pod distribution - - ```yaml - controlPlanes: - vcluster: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: target - operator: In - values: - - apiserver - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - vcluster - topologyKey: "kubernetes.io/hostname" - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - vcluster - topologyKey: topology.kubernetes.io/zone - weight: 100 - ``` - -2. Configure `etcd` pod distribution - - ```yaml - controlPlanes: - etcd: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: target - operator: In - values: - - etcd - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - vcluster-etcd - topologyKey: "kubernetes.io/hostname" - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - vcluster-etcd - topologyKey: topology.kubernetes.io/zone - weight: 100 - ``` - -### Configure tolerations - -Allow control plane pods to schedule on the tainted dedicated nodes (available -in Spaces v1.14+). - -1. Add tolerations for `etcd` pods - - ```yaml - controlPlanes: - etcd: - tolerations: - - key: "target" - operator: "Equal" - value: "etcd" - effect: "NoSchedule" - ``` - -2. Add tolerations for API server pods - - ```yaml - controlPlanes: - vcluster: - tolerations: - - key: "target" - operator: "Equal" - value: "apiserver" - effect: "NoSchedule" - ``` - - -## Configure autoscaling for Spaces components - - -Set up the Spaces system components to handle variable load automatically. - -### Scale API and `apollo` services - -1. Configure minimum replicas for availability - - ```yaml - api: - replicaCount: 2 - - features: - alpha: - apollo: - enabled: true - replicaCount: 2 - ``` - - Both services support horizontal and vertical scaling based on load patterns. - -### Configure router autoscaling - -The `spaces-router` is the entry point for all traffic and needs intelligent -scaling. - - -1. Enable Horizontal Pod Autoscaler - - ```yaml - router: - hpa: - enabled: true - minReplicas: 2 - maxReplicas: 8 - targetCPUUtilizationPercentage: 80 - targetMemoryUtilizationPercentage: 80 - ``` - -2. Monitor scaling factors - - **Router scaling behavior:** - - **Vertical scaling**: Scales based on number of control planes - - **Horizontal scaling**: Scales based on request volume - - **Resource monitoring**: Monitor CPU and memory usage - - - -### Configure controller scaling - -The `spaces-controller` manages Space-level resources and requires vertical -scaling. - -1. Configure adequate resources with headroom - - ```yaml - controller: - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "2000m" - memory: "4Gi" - ``` - - **Important**: The controller can spike when reconciling large numbers of - control planes, so provide adequate headroom for resource spikes. - -## Set up production storage - - -### Configure Query API database - - -1. Use a managed PostgreSQL database - - **Recommended services:** - - [AWS RDS][rds] - - [Google Cloud SQL][gke-sql] - - [Azure Database for PostgreSQL][aks-sql] - - **Requirements:** - - Minimum 400 IOPS performance - - -## Monitoring - - - -Monitor key metrics to ensure healthy scaling and identify issues quickly. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -### Control plane health - -Track these `spaces-controller` metrics: - -1. **Total control planes** - - ``` - spaces_control_plane_exists - ``` - - Tracks the total number of control planes in the system. - -2. **Degraded control planes** - - ``` - spaces_control_plane_degraded - ``` - - Returns control planes that don't have a `Synced`, `Ready`, and - `Healthy` state. - -3. **Stuck control planes** - - ``` - spaces_control_plane_stuck - ``` - - Control planes stuck in a provisioning state. - -4. **Deletion issues** - - ``` - spaces_control_plane_deletion_stuck - ``` - - Control planes stuck during deletion. - -### Alerting - -Configure alerts for critical scaling and health metrics: - -- **High error rates**: Alert when 4xx/5xx response rates exceed thresholds -- **Control plane health**: Alert when degraded or stuck control planes exceed acceptable counts - -## Architecture overview - -**Spaces System Components:** - -- **`spaces-router`**: Entry point for all endpoints, dynamically builds routes to control plane API servers -- **`spaces-controller`**: Reconciles Space-level resources, serves webhooks, works with `mxp-controller` for provisioning -- **`spaces-api`**: API for managing groups, control planes, shared secrets, and telemetry objects (accessed only through spaces-router) -- **`spaces-apollo`**: Hosts the Query API, connects to PostgreSQL database populated by `apollo-syncer` pods - - -**Control Plane Components (per control plane):** -- **`mxp-controller`**: Handles provisioning tasks, serves webhooks, installs UXP and `XGQL` -- **`XGQL`**: GraphQL API powering console views -- **`kube-state-metrics`**: Collects usage metrics for billing (updated by `mxp-controller` when CRDs change) -- **`vector`**: Works with `kube-state-metrics` to send usage data to external storage for billing -- **`apollo syncer`**: Syncs `etcd` data into PostgreSQL for the Query API - - -### `up ctx` workflow - - - up ctx workflow diagram - - -### Access a control plane API server via kubectl - - - kubectl workflow diagram - - -### Query API/Apollo - - - query API workflow diagram - - -## See also - -* [Upbound Spaces deployment requirements][deployment] -* [Upbound `etcd` scaling resources][scaling] - -[up-ctx-workflow]: /img/up-ctx-workflow.png -[kubectl]: /img/kubectl-workflow.png -[query-api]: /img/query-api-workflow.png -[spaces-workflow]: /img/up-basic-flow.png -[rds]: https://aws.amazon.com/rds/postgresql/ -[gke-sql]: https://cloud.google.com/kubernetes-engine/docs/tutorials/stateful-workloads/postgresql -[aks-sql]: https://learn.microsoft.com/en-us/azure/aks/deploy-postgresql-ha?tabs=azuredisk -[deployment]: https://docs.upbound.io/spaces/howtos/self-hosted/deployment-reqs/ -[karpenter]: https://docs.aws.amazon.com/eks/latest/best-practices/karpenter.html -[gke-autoscaling]: https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-autoscaler -[aks-autoscaling]: https://learn.microsoft.com/en-us/azure/aks/cluster-autoscaler-overview -[scaling]: https://docs.upbound.io/deploy/self-hosted-spaces/scaling-resources#scaling-etcd-storage diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/controllers.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/controllers.md deleted file mode 100644 index 692740638..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/controllers.md +++ /dev/null @@ -1,389 +0,0 @@ ---- -title: Controllers -weight: 250 -description: A guide to how to wrap and deploy an Upbound controller into control planes on Upbound. ---- - -:::important -This feature is in private preview for select customers in Upbound Spaces. If you're interested in this feature, please [contact us](https://www.upbound.io/contact-us). -::: - -Upbound's _Controllers_ feature lets you build and deploy control plane software from the Kubernetes ecosystem. With the _Controllers_ feature, you're not limited to just managing resource types defined by Crossplane. Now you can create resources from _CustomResourceDefinitions_ defined by other Kubernetes ecosystem tooling. - -This guide explains how to bundle and deploy control plane software from the Kubernetes ecosystem on a control plane in Upbound. - -## Benefits - -The Controllers feature provides the following benefits: - -* Deploy control plane software from the Kubernetes ecosystem. -* Use your control plane's package manager to handle the lifecycle of the control plane software and define dependencies between package. -* Build powerful compositions that combine both Crossplane and Kubernetes _CustomResources_. - -## How it works - -A _Controller_ is a package type that bundles control plane software from the Kubernetes ecosystem. Examples of such software includes: - -- Kubernetes policy engines -- CI/CD tooling -- Your own private custom controllers defined by your organization - -You build a _Controller_ package by wrapping a helm chart along with its requisite _CustomResourceDefinitions_. Your _Controller_ package gets pushed to an OCI registry, and from there you can apply it to a control plane like you would any other Crossplane package. Your control plane's package manager is responsible for managing the lifecycle of the software once applied. - -## Prerequisites - -Enable the Controllers feature in the Space you plan to run your control plane in: - -- Cloud Spaces: Not available yet -- Connected Spaces: Space administrator must enable this feature -- Disconnected Spaces: Space administrator must enable this feature - -Packaging a _Controller_ requires [up CLI][cli] `v0.39.0` or later. - - - -## Build a _Controller_ package - - - -_Controllers_ are a package type that get administered by your control plane's package manager. - -### Prepare the package - -To define a _Controller_, you need a Helm chart. This guide assumes the control plane software you want to build into a _Controller_ already has a Helm chart available. - -Start by making a working directory to assemble the necessary parts: - -```ini -mkdir controller-package -cd controller-package -``` - -Inside the working directory, pull the Helm chart: - -```shell -export CHART_REPOSITORY= -export CHART_NAME= -export CHART_VERSION= - -helm pull $CHART_NAME --repo $CHART_REPOSITORY --version $CHART_VERSION -``` - -Be sure to update the Helm chart repository, name, and version with your own. - -Move the Helm chart into its own folder: - -```ini -mkdir helm -mv $CHART_NAME-$CHART_VERSION.tgz helm/chart.tgz -``` - -Unpack the CRDs from the Helm chart into their own directory: - -```shell -export RELEASE_NAME= -export RELEASE_NAMESPACE= - -mkdir crds -helm template $RELEASE_NAME helm/chart.tgz -n $RELEASE_NAMESPACE --include-crds | \ - yq e 'select(.kind == "CustomResourceDefinition")' - | \ - yq -s '("crds/" + .metadata.name + ".yaml")' - -``` -Be sure to update the Helm release name, and namespace with your own. - -:::info -The instructions above assume your CRDs get deployed as part of your Helm chart. If they're deployed another way, you need to manually copy your CRDs instead. -::: - -Create a `crossplane.yaml` with your controller metadata: - -```yaml -cat < crossplane.yaml -apiVersion: meta.pkg.upbound.io/v1alpha1 -kind: Controller -metadata: - annotations: - friendly-name.meta.crossplane.io: Controller - meta.crossplane.io/description: | - A brief description of what the controller does. - meta.crossplane.io/license: Apache-2.0 - meta.crossplane.io/maintainer: - meta.crossplane.io/readme: | - An explanation of your controller. - meta.crossplane.io/source: - name: -spec: - packagingType: Helm - helm: - releaseName: - releaseNamespace: - # Value overrides for the helm release can be provided below. - # values: - # foo: bar -EOF -``` - -Your controller's file structure should look like this: - -```ini -. -├── crds -│ ├── your-crd.yaml -│ ├── second-crd.yaml -│ └── another-crd.yaml -├── crossplane.yaml -└── helm - └── chart.tgz -``` - -### Package and push the _Controller_ - -At the root of your controller's working directory, build the contents into an xpkg: - -```ini -up xpkg build -``` - -This causes an xpkg to get saved to your current directory with a name like `controller-f7091386b4c0.xpkg`. - -Push the package to your desired OCI registry: - -```shell -export UPBOUND_ACCOUNT= -export CONTROLLER_NAME= -export CONTROLLER_VERSION= -export XPKG_FILENAME= - -up xpkg push xpkg.upbound.io/$UPBOUND_ACCOUNT/$CONTROLLER_NAME:$CONTROLLER_VERSION -f $XPKG_FILENAME -``` - - - -## Deploy a _Controller_ package - - - -:::important -_Controllers_ are only installable on control planes running Crossplane `v1.19.0` or later. -::: - -Set your kubecontext to the desired control plane in Upbound. Change the package path to the OCI registry you pushed it to. Then, deploy the _Controller_ directly: - -```shell -export CONTROLLER_NAME= -export CONTROLLER_VERSION= - -cat < crossplane.yaml -apiVersion: meta.pkg.upbound.io/v1alpha1 -kind: Controller -metadata: - annotations: - friendly-name.meta.crossplane.io: Controller ArgoCD - meta.crossplane.io/description: | - The ArgoCD Controller enables continuous delivery and declarative configuration - management for Kubernetes applications using GitOps principles. - meta.crossplane.io/license: Apache-2.0 - meta.crossplane.io/maintainer: Upbound Maintainers - meta.crossplane.io/readme: | - ArgoCD is a declarative GitOps continuous delivery tool for Kubernetes that - follows the GitOps methodology to manage infrastructure and application - configurations. - meta.crossplane.io/source: https://github.com/argoproj/argo-cd - name: argocd -spec: - packagingType: Helm - helm: - releaseName: argo-cd - releaseNamespace: argo-system - # values: - # foo: bar -EOF -``` - -Your controller's file structure should look like this: - -```ini -. -├── crds -│ ├── applications.argoproj.io.yaml -│ ├── applicationsets.argoproj.io.yaml -│ └── appprojects.argoproj.io.yaml -├── crossplane.yaml -└── helm - └── chart.tgz -``` - -### Package and push controller-argocd - -At the root of your controller's working directory, build the contents into an xpkg: - -```ini -up xpkg build -``` - -This causes an xpkg to get saved to your current directory with a name like `argocd-f7091386b4c0.xpkg`. - -Push the package to your desired OCI registry: - -```shell -export UPBOUND_ACCOUNT= -export CONTROLLER_NAME=controller-argocd -export CONTROLLER_VERSION=v7.8.8 -export XPKG_FILENAME= - -up xpkg push --create xpkg.upbound.io/$UPBOUND_ACCOUNT/$CONTROLLER_NAME:$CONTROLLER_VERSION -f $XPKG_FILENAME -``` - -### Deploy controller-argocd to a control plane - -Set your kubecontext to the desired control plane in Upbound. Change the package path to the OCI registry you pushed it to. Then, deploy the _Controller_ directly: - -```ini -cat < - -## Frequently asked questions - -
-Can I package any software or are there any prerequisites to be a Controller? - -We define a *Controller* as a software that has at least one Custom Resource Definition (CRD) and a Kubernetes controller for that CRD. This is the minimum requirement to be a *Controller*. We have some checks to enforce this at packaging time. - -
- -
-How can I package my software as a Controller? - -Currently, we support Helm charts as the underlying package format for *Controllers*. As long as you have a Helm chart, you can package it as a *Controller*. - -If you don't have a Helm chart, you can't deploy the software. We only support Helm charts as the underlying package format for *Controllers*. We may extend this to support other packaging formats like Kustomize in the future. - -
- -
-Can I package Crossplane XRDs/Compositions as a Helm chart to deploy as a Controller? - -This is not recommended. For packaging Crossplane XRDs/ and Compositions, we recommend using the `Configuration` package format. A helm chart only with Crossplane XRDs/Compositions does not qualify as a *Controller*. - -
- -
-How can I override the Helm values when deploying a Controller? - -Overriding the Helm values is possible at two levels: -- During packaging time, in the package manifest file. -- At runtime, using a `ControllerRuntimeConfig` resource (similar to Crossplane `DeploymentRuntimeConfig`). - -
- -
-How can I configure the helm release name and namespace for the controller? - -Right now, it is not possible to configure this at runtime. The package author configures release name and namespace during packaging, so it is hardcoded inside the package. Unlike a regular application that is deployed by a Helm chart, *Controllers* can only be deployed once in a given control plane, so, we hope it should be ok to rely on predefined release names and namespaces. We may consider exposing these in `ControllerRuntimeConfig` later, but, we would like to keep it opinionated unless there are strong reasons to do so. - -
- -
-Can I deploy more than one instance of a Controller package? - -No, this is not possible. Remember, a *Controller* package introduces CRDs which are cluster-scoped objects. Just like one cannot deploy more than one instance of the same Crossplane Provider package today, it is not possible to deploy more than one instance of a *Controller*. - -
- -
-Do I need a specific Crossplane version to run Controllers? - -Yes, you need to use Crossplane v1.19.0 or later to use *Controllers*. This is because of the changes in the Crossplane codebase to support third-party package formats in dependencies. - -Spaces `v1.12.0` supports Crossplane `v1.19` in the *Rapid* release channel. - -
- -
-Can I deploy Controllers outside of an Upbound control plane? With UXP? - -No, *Controllers* are a proprietary package format and are only available for control planes running in Spaces hosting environments in Upbound. - -
- - -[cli]: /manuals/uxp/overview - diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/ctp-audit-logs.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/ctp-audit-logs.md deleted file mode 100644 index 52f52c776..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/ctp-audit-logs.md +++ /dev/null @@ -1,549 +0,0 @@ ---- -title: Control plane audit logging ---- - -This guide explains how to enable and configure audit logging for control planes -in Self-Hosted Upbound Spaces. - -Starting in Spaces `v1.14.0`, each control plane contains an API server that -supports audit log collection. You can use audit logging to track creation, -updates, and deletions of Crossplane resources. Control plane audit logs -use observability features to collect audit logs with `SharedTelemetryConfig` and -send logs to an OpenTelemetry (`OTEL`) collector. - -:::info API Version Information -This guide applies to **Spaces v1.14.0 and later** (Self-Hosted only). Audit logging is not available in earlier versions. - -For API specifications on observability resources, see the [Spaces API Reference - v1.14](../../../../../1.14/reference/apis/spaces-api/). details on observability evolution across versions, see the . -::: - -## Prerequisites - -Before you begin, make sure you have: - -* Spaces `v1.14.0` or greater -* Admin access to your Spaces host cluster -* `kubectl` configured to access the host cluster -* `helm` installed -* `yq` installed -* `up` CLI installed and logged in to your organization - -## Enable observability - - -Observability graduated to General Available in `v1.14.0` but is disabled by -default. - - - - - -### Before `v1.14` -To enable the GA Observability feature, upgrade your Spaces installation to `v1.14.0` -or later and update your installation setting to the new flag: - -```diff -helm upgrade spaces upbound/spaces -n upbound-system \ -- --set "features.alpha.observability.enabled=true" -+ --set "observability.enabled=true" -``` - - - -### After `v1.14` - -To enable the GA Observability feature for `v1.14.0` and later, pass the feature -flag: - -```sh -helm upgrade spaces upbound/spaces -n upbound-system \ - --set "observability.enabled=true" - -``` - - - - -To confirm Observability is enabled, run the `helm get values` command: - - -```shell -helm get values --namespace upbound-system spaces | yq .observability -``` - -Your output should return: - -```shell-noCopy - enabled: true -``` - -## Install an observability backend - -:::note -If you already have an observability backend in your environment, skip to the -next section. -::: - - -For this guide, you'll use Grafana's `docker-otel-lgtm` bundle to validate audit log -generation. production environments, configure a dedicated observability -backend like Datadog, Splunk, or an enterprise-grade Grafana stack. - - - -First, make sure your `kubectl` context points to your Spaces host cluster: - -```shell -kubectl config current-context -``` - -The output should return your cluster name. - -Next, install `docker-otel-lgtm` as a deployment using port-forwarding to -connect to Grafana. Create a manifest file and paste the -following configuration: - -```yaml title="otel-lgtm.yaml" -apiVersion: v1 -kind: Namespace -metadata: - name: observability ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app: otel-lgtm - name: otel-lgtm - namespace: observability -spec: - ports: - - name: grpc - port: 4317 - protocol: TCP - targetPort: 4317 - - name: http - port: 4318 - protocol: TCP - targetPort: 4318 - - name: grafana - port: 3000 - protocol: TCP - targetPort: 3000 - selector: - app: otel-lgtm ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: otel-lgtm - labels: - app: otel-lgtm - namespace: observability -spec: - replicas: 1 - selector: - matchLabels: - app: otel-lgtm - template: - metadata: - labels: - app: otel-lgtm - spec: - containers: - - name: otel-lgtm - image: grafana/otel-lgtm - ports: - - containerPort: 4317 - - containerPort: 4318 - - containerPort: 3000 -``` - -Next, apply the manifest: - -```shell -kubectl apply --filename otel-lgtm.yaml -``` - -Your output should return the resources: - -```shell -namespace/observability created - service/otel-lgtm created - deployment.apps/otel-lgtm created -``` - -To verify your resources deployed, use `kubectl get` to display resources with -an `ACTIVE` or `READY` status. - -Next, forward the Grafana port: - -```shell -kubectl port-forward svc/otel-lgtm --namespace observability 3000:3000 -``` - -Now you can access the Grafana UI at http://localhost:3000. - - -## Create an audit-enabled control plane - -To enable audit logging for a control plane, you need to label it so the -`SharedTelemetryConfig` can identify and apply audit settings. This section -creates a new control plane with the `audit-enabled: "true"` label. The -`audit-enabled: "true"` label marks this control plane for audit logging. The -`SharedTelemetryConfig` (created in the next section) finds control planes with -this label and enables audit logging on them. - -Create a new manifest file and paste the configuration below: - -
-```yaml title="ctp-audit.yaml" -apiVersion: v1 -kind: Namespace -metadata: - name: audit-test ---- -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - labels: - audit-enabled: "true" - name: ctp1 - namespace: audit-test -spec: - writeConnectionSecretToRef: - name: kubeconfig-ctp1 - namespace: audit-test -``` -
- -The `metadata.labels` section contains the `audit-enabled` setting. - -Apply the manifest: - -```shell -kubectl apply --filename ctp-audit.yaml -``` - -Confirm your control plane reaches the `READY` status: - -```shell -kubectl get --filename ctp-audit.yaml -``` - -## Create a `SharedTelemetryConfig` - -The `SharedTelemetryConfig` applies to all control plane objects in a namespace -and enables audit logging and routes logs to your `OTEL` endpoint. - -Create a `SharedTelemetryConfig` manifest file and paste the configuration -below: - -
-```yaml title="sharedtelemetryconfig.yaml" -apiVersion: observability.spaces.upbound.io/v1alpha1 -kind: SharedTelemetryConfig -metadata: - name: apiserver-audit - namespace: audit-test -spec: - apiServer: - audit: - enabled: true - exporters: - otlphttp: - endpoint: http://otel-lgtm.observability:4318 - exportPipeline: - logs: [otlphttp] - controlPlaneSelector: - labelSelectors: - - matchLabels: - audit-enabled: "true" -``` -
- -This configuration: - -* Sets `apiServer.audit.enabled` to `true` -* Configures the `otlphttp` exporter to point to the `docker-otel-lgtm` service -* Uses `controlPlaneSelector` to match any control plane in the namespace with the `audit-enabled` label set to `true` - -:::note -You can configure the `SharedTelemetryConfig` to select control planes in -several ways. more information on control plane selection, see the [control -plane selection][ctp-selection] documentation. -::: - -Apply the `SharedTelemetryConfig`: - -```shell -kubectl apply --filename sharedtelemetryconfig.yaml -``` - -Confirm the configuration selected the control plane: - -```shell -kubectl get --filename sharedtelemetryconfig.yaml -``` - -The output should return `SELECTED` as `1` and `VALIDATED` as `TRUE`. - -For more detailed status information, use `kubectl get`: - -```shell -kubectl get --filename sharedtelemetryconfig.yaml --output yaml | yq .status -``` - -## Generate and monitor audit events - -You enabled telemetry on your new control plane and can now generate events to -test the audit logging. This guide uses the `nop-provider` to simulate resource -operations. - -Switch your `up` context to the new control plane: - -```shell -up ctx /// -``` - -Create a new Provider manifest: - -```yaml title="provider-nop.yaml" -apiVersion: pkg.crossplane.io/v1 - kind: Provider - metadata: - name: crossplane-contrib-provider-nop - spec: - package: xpkg.upbound.io/crossplane-contrib/provider-nop:v0.4.0 -``` - -Apply the provider manifest: - -```shell -kubectl apply --filename provider-nop.yaml -``` - -Verify the provider installed and returns `HEALTHY` status as `TRUE`. - -Apply an example resource to kick off event generation: - - -```shell -kubectl apply --filename https://raw.githubusercontent.com/crossplane-contrib/provider-nop/refs/heads/main/examples/nopresource.yaml -``` - -In your Grafana dashboard, navigate to **Drilldown** > **Logs** under the -Grafana menu. - - -Filter for `controlplane-audit` log messages. - -Create a query to find `create` events on `nopresources` by filtering: - -* The `verb` field for `create` events -* The `objectRef_resource` field to match the Kind `nopresources` - -Review the audit log results. The log stream displays: - -*The client applying the create operation -* The resource kind -* Client details -* The response code - -Expand the example below for an audit log entry: - -
- Audit log entry - -```json -{ - "level": "Metadata", - "auditID": "51bbe609-14ad-4874-be78-1289c10d506a", - "stage": "ResponseComplete", - "requestURI": "/apis/nop.crossplane.io/v1alpha1/nopresources?fieldManager=kubectl-client-side-apply&fieldValidation=Strict", - "verb": "create", - "user": { - "username": "kubernetes-admin", - "groups": ["system:masters", "system:authenticated"] - }, - "impersonatedUser": { - "username": "upbound:spaces:host:masterclient", - "groups": [ - "system:authenticated", - "upbound:controlplane:admin", - "upbound:spaces:host:system:masters" - ] - }, - "sourceIPs": ["10.244.0.135", "127.0.0.1"], - "userAgent": "kubectl/v1.32.2 (darwin/arm64) kubernetes/67a30c0", - "objectRef": { - "resource": "nopresources", - "name": "example", - "apiGroup": "nop.crossplane.io", - "apiVersion": "v1alpha1" - }, - "responseStatus": { "metadata": {}, "code": 201 }, - "requestReceivedTimestamp": "2025-09-19T23:03:24.540067Z", - "stageTimestamp": "2025-09-19T23:03:24.557583Z", - "annotations": { - "authorization.k8s.io/decision": "allow", - "authorization.k8s.io/reason": "RBAC: allowed by ClusterRoleBinding \"controlplane-admin\" of ClusterRole \"controlplane-admin\" to Group \"upbound:controlplane:admin\"" - } - } -``` -
- -## Customize the audit policy - -Spaces `v1.14.0` includes a default audit policy. You can customize this policy -by creating a configuration file and passing the values to -`observability.collectors.apiServer.auditPolicy` in the helm values file. - -An example custom audit policy: - -```yaml -observability: - controlPlanes: - apiServer: - auditPolicy: | - apiVersion: audit.k8s.io/v1 - kind: Policy - rules: - # ============================================================================ - # RULE 1: Exclude health check and version endpoints - # ============================================================================ - - level: None - nonResourceURLs: - - '/healthz*' - - '/readyz*' - - /version - # ============================================================================ - # RULE 2: ConfigMaps - Write operations only - # ============================================================================ - - level: Metadata - resources: - - group: "" - resources: - - configmaps - verbs: - - create - - update - - patch - - delete - omitStages: - - RequestReceived - - ResponseStarted - # ============================================================================ - # RULE 3: Secrets - ALL operations - # ============================================================================ - - level: Metadata - resources: - - group: "" - resources: - - secrets - verbs: - - get - - list - - watch - - create - - update - - patch - - delete - omitStages: - - RequestReceived - - ResponseStarted - # ============================================================================ - # RULE 4: Global exclusion of read-only operations - # ============================================================================ - - level: None - verbs: - - get - - list - - watch - # ========================================================================== - # RULE 5: Exclude standard Kubernetes resources from write operation logging - # ========================================================================== - - level: None - resources: - - group: "" - - group: "apps" - - group: "networking.k8s.io" - - group: "policy" - - group: "rbac.authorization.k8s.io" - - group: "storage.k8s.io" - - group: "batch" - - group: "autoscaling" - - group: "metrics.k8s.io" - - group: "node.k8s.io" - - group: "scheduling.k8s.io" - - group: "coordination.k8s.io" - - group: "discovery.k8s.io" - - group: "events.k8s.io" - - group: "flowcontrol.apiserver.k8s.io" - - group: "internal.apiserver.k8s.io" - - group: "authentication.k8s.io" - - group: "authorization.k8s.io" - - group: "admissionregistration.k8s.io" - verbs: - - create - - update - - patch - - delete - # ============================================================================ - # RULE 6: Catch-all for ALL custom resources and any missed resources - # ============================================================================ - - level: Metadata - verbs: - - create - - update - - patch - - delete - omitStages: - - RequestReceived - - ResponseStarted - # ============================================================================ - # RULE 7: Final catch-all - exclude everything else - # ============================================================================ - - level: None - omitStages: - - RequestReceived - - ResponseStarted -``` -You can apply this policy during Spaces installation or upgrade using the helm values file. - -Audit policies use rules evaluated in order from top to bottom where the first -matching rule applies. Control plane audit policies follow Kubernetes conventions and use the -following logging levels: - -* **None** - Don't log events matching this rule -* **Metadata** - Log request metadata (user, timestamp, resource, verb) but not request or response bodies -* **Request** - Log metadata and request body but not response body -* **RequestResponse** - Log metadata, request body, and response body - -For more information, review the Kubernetes [Auditing] documentation. - -## Disable audit logging - -You can disable audit logging on a control plane by removing it from the -`SharedTelemetryConfig` selector or by deleting the `SharedTelemetryConfig`. - -### Disable for specific control planes - -Remove the `audit-enabled` label from control planes that should stop sending audit logs: - -```bash -kubectl label controlplane --namespace audit-enabled- -``` - -The `SharedTelemetryConfig` no longer selects this control plane, and audit log collection stops. - -### Disable for all control planes - -Delete the `SharedTelemetryConfig` to stop audit logging for all control planes it manages: - -```bash -kubectl delete sharedtelemetryconfig --namespace -``` - -[ctp-selection]: /spaces/howtos/observability/#control-plane-selection -[Auditing]: https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/ diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/declarative-ctps.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/declarative-ctps.md deleted file mode 100644 index 2c3e5331b..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/declarative-ctps.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -title: Declaratively create control planes -sidebar_position: 99 -description: A tutorial to configure a Space with Argo to declaratively create and - manage control planes ---- - -In this tutorial, you learn how to configure [Argo CD][argo-cd] to communicate with a self-hosted Space. This flow allows you to declaratively create and manage control planes from Git. Argo CD is a continuous delivery tool for Kubernetes that you can use to drive GitOps flows for your control plane infrastructure. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. - -For API specifications on ControlPlane resources and their declarative creation, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . -::: - -## Prerequisites - -To complete this tutorial, you need the following: - -- Have already deployed an Upbound Space. -- Have already deployed an instance of Argo CD on a Kubernetes cluster. - -## Connect your Space to Argo CD - -Fetch the kubeconfig for the Space cluster, the Kubernetes cluster where you installed the Upbound Spaces software. You must add the Space cluster as a context to Argo. - -```ini -export SPACES_CLUSTER_SERVER="https://url" -export SPACES_CLUSTER_NAME="cluster" -``` - -Switch contexts to the Kubernetes cluster where you've installed Argo. Create a secret on the Argo cluster whose data contains the connection details of the Space cluster. - -:::important -Make sure the following commands are executed against your **Argo** cluster, not your Space cluster. -::: - -Run the following command in a terminal: - -```yaml -cat < -When you install a Crossplane provider on a control plane, memory gets consumed -according to the number of custom resources it defines. Upbound [Official Provider families][official-provider-families] provide higher fidelity control -to platform teams to install providers for only the resources they need, -reducing the bloat of needlessly installing unused custom resources. Still, you -must factor provider memory usage into your calculations to ensure you've -rightsized the memory available in your Spaces cluster. - - -:::important -Be careful not to conflate `managed resource` with `custom resource definition`. -The former is an "instance" of an external resource in Crossplane, while the -latter defines the API schema of that resource. -::: - -It's estimated that each custom resource definition consumes ~3 MB of memory. -The calculation is: - -```bash -number_of_managed_resources_defined_in_provider x 3 MB = memory_required -``` - -For example, if you plan to use [provider-aws-ec2][provider-aws-ec2], [provider-aws-s3][provider-aws-s3], and [provider-aws-iam][provider-aws-iam], the resulting calculation is: - -```bash -provider-aws-ec2: 98 x 3 MB = 294 MB -provider-aws-s3: 23 x 3 MB = 69 MB -provider-aws-iam 22 x 3 MB = 66 MB ---- -total memory: 429 MB -``` - -In this scenario, you should budget ~430 MB of memory for provider usage on this control plane. - -:::tip -Do this calculation for each provider you plan to install on your control plane. -Then do this calculation for each control plane you plan to run in your Space. -::: - - -#### Total memory usage - -Add the memory usage from the previous sections. Given the preceding examples, -they result in a recommendation to budget ~1 GB memory for each control plane -you plan to run in the Space. - -:::important - -The 1 GB recommendation is an example. -You should input your own provider requirements to arrive at a final number for -your own deployment. - -::: - -### CPU considerations - -#### Managed resource CPU usage - -The number of managed resources under management by a control plane is the largest contributing factor for CPU usage in a Space. CPU usage scales linearly according to the number of managed resources under management by your control plane. In Upbound's testing, CPU usage requirements _does_ vary from provider to provider. Using the Upbound Official Provider families as a baseline: - - -| Provider | MR create operation (CPU core seconds) | MR update or reconciliation operation (CPU core seconds) | -| ---- | ---- | ---- | -| provider-family-aws | 10 | 2 to 3 | -| provider-family-gcp | 7 | 1.5 | -| provider-family-azure | 7 to 10 | 1.5 to 3 | - - -When resources are in a non-ready state, Crossplane providers reconcile often (as fast as every 15 seconds). Once a resource reaches `READY`, each Crossplane provider defaults to a 10 minute poll interval. Given this, a 16-core machine has `16x10x60 = 9600` CPU core seconds available. Interpreting this table: - -- A single control plane that needs to create 100 AWS MRs concurrently would consume 1000 CPU core seconds, or about 1.5 cores. -- A single control plane that continuously reconciles 100 AWS MRs once they've reached a `READY` state would consume 300 CPU core seconds, or a little under half a core. - -Since `provider-family-aws` has the highest recorded numbers for CPU time required, you can use that as an upper limit in your calculations. - -Using these calculations and extrapolating values, given a 16 core machine, it's recommended you don't exceed a single control plane managing 1000 MRs. Suppose you plan to run 10 control planes, each managing 1000 MRs. You want to make sure your node pool has capacity for 160 cores. If you are using a machine type that has 16 cores per machine, that would mean having a node pool of size 10. If you are using a machine type that has 32 cores per machine, that would mean having a node pool of size 5. - -#### Cloud API latency - -Oftentimes, you are using Crossplane providers to talk to external cloud APIs. Those external cloud APIs often have global API rate limits (examples: [Azure limits][azure-limits], [AWS EC2 limits][aws-ec2-limits]). - -For Crossplane providers built on [Upjet][upjet] (such as Upbound Official Provider families), these providers use Terraform under the covers. They expose some knobs (such as `--max-reconcile-rate`) you can use to tweak reconciliation rates. - -### Resource buffers - -The guidance in the preceding sections explains how to calculate CPU and memory usage requirements for: - -- a set of control planes in a Space -- tuned to the number of providers you plan to use -- according to the number of managed resource instances you plan to have managed by your control planes - -Upbound recommends budgeting an extra buffer of 20% to your resource capacity calculations. The numbers shared in the preceding sections don't account for peaks or surges since they're based off average measurements. Upbound recommends budgeting this buffer to account for these things. - -## Deploying more than one Space - -You are welcome to deploy more than one Space. You just need to make sure you have a 1:1 mapping of Space to Kubernetes clusters. Spaces are by their nature constrained to a single Kubernetes Cluster, which are regional entities. If you want to offer control planes in multiple cloud environments or multiple public clouds entirely, these are justifications for deploying >1 Spaces. - -## Cert-manager - -A Spaces deployment uses the [Certificate Custom Resource] from cert-manager to -provision certificates within the Space. This establishes a nice API boundary -between what your platform may need and the Certificate requirements of a -Space. - - -In the event you would like more control over the issuing Certificate Authority -for your deployment or the deployment of cert-manager itself, this guide is for -you. - - -### Deploying - -An Upbound Space deployment doesn't have any special requirements for the -cert-manager deployment itself. The only expectation is that cert-manager and -the corresponding Custom Resources exist in the cluster. - -You should be free to install cert-manager in the cluster in any way that makes -sense for your organization. You can find some [installation ideas] in the -cert-manager docs. - -### Issuers - -A default Upbound Space install includes a [ClusterIssuer]. This `ClusterIssuer` -is a `selfSigned` issuer that other certificates are minted from. You have a -couple of options available to you for changing the default deployment of the -Issuer: -1. Changing the issuer name. -2. Providing your own ClusterIssuer. - - -#### Changing the issuer name - -The `ClusterIssuer` name is controlled by the `certificates.space.clusterIssuer` -Helm property. You can adjust this during installation by providing the -following parameter (assuming your new name is 'SpaceClusterIssuer'): -```shell ---set "certificates.space.clusterIssuer=SpaceClusterIssuer" -``` - - - -#### Providing your own ClusterIssuer - -To provide your own `ClusterIssuer`, you need to first setup your own -`ClusterIssuer` in the cluster. The cert-manager docs have a variety of options -for providing your own. See the [Issuer Configuration] docs for more details. - -Once you have your own `ClusterIssuer` set up in the cluster, you need to turn -off the deployment of the `ClusterIssuer` included in the Spaces deployment. -To do that, provide the following parameter during installation: -```shell ---set "certificates.provision=false" -``` - -###### Considerations -If your `ClusterIssuer` has a name that's different from the default name that -the Spaces installation expects ('spaces-selfsigned'), you need to also specify -your `ClusterIssuer` name during install using: -```shell ---set "certificates.space.clusterIssuer=" -``` - -## Ingress - -To route requests from an external client (kubectl, ArgoCD, etc) to a -control plane, a Spaces deployment includes a default [Ingress] manifest. In -order to ease getting started scenarios, the current `Ingress` includes -configurations (properties and annotations) that assume that you installed the -commonly used [ingress-nginx ingress controller] in the cluster. This section -walks you through using a different `Ingress`, if that's something that your -organization needs. - -### Default manifest - -An example of what the current `Ingress` manifest included in a Spaces install -is below: - -```yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: mxe-router-ingress - namespace: upbound-system - annotations: - nginx.ingress.kubernetes.io/use-regex: "true" - nginx.ingress.kubernetes.io/ssl-redirect: "false" - nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" - nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" - nginx.ingress.kubernetes.io/proxy-request-buffering: "off" - nginx.ingress.kubernetes.io/proxy-body-size: "0" - nginx.ingress.kubernetes.io/proxy-http-version: "1.1" - nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" - nginx.ingress.kubernetes.io/proxy-ssl-verify: "on" - nginx.ingress.kubernetes.io/proxy-ssl-secret: "upbound-system/mxp-hostcluster-certs" - nginx.ingress.kubernetes.io/proxy-ssl-name: spaces-router - nginx.ingress.kubernetes.io/configuration-snippet: | - more_set_headers "X-Request-Id: $req_id"; - more_set_headers "Request-Id: $req_id"; - more_set_headers "Audit-Id: $req_id"; -spec: - ingressClassName: nginx - tls: - - hosts: - - {{ .Values.ingress.host }} - secretName: mxe-router-tls - rules: - - host: {{ .Values.ingress.host }} - http: - paths: - - path: "/v1/controlPlanes" - pathType: Prefix - backend: - service: - name: spaces-router - port: - name: http -``` - -The notable pieces are: -1. Namespace - - - -This property represents the namespace that the spaces-router is deployed to. -In most cases this is `upbound-system`. - - - -2. proxy-ssl-* annotations - -The spaces-router pod terminates TLS using certificates located in the -mxp-hostcluster-certs `Secret` located in the `upbound-system` `Namespace`. - -3. proxy-* annotations - -Requests coming into the ingress-controller can be variable depending on what -the client is requesting. For example, `kubectl get crds` has different -requirements for the connection compared to a 'watch', for example -`kubectl get pods -w`. The ingress-controller is configured to be able to -account for either scenario. - - -4. configuration-snippets - -These commands add headers to the incoming requests that help with telemetry -and diagnosing problems within the system. - -5. Rules - -Requests coming into the control planes use a `/v1/controlPlanes` prefix and -need to be routed to the spaces-router. - - -### Using a different ingress manifest - -Operators can choose to use an `Ingress` manifest and ingress controller that -makes the most sense for their organization. If they want to turn off deploying -the default `Ingress` manifest, they can do so during installation by providing -the following parameter during installation: -```shell ---set ".Values.ingress.provision=false" -``` - -#### Considerations - - - - - -Operators will need to take into account the following considerations when -disabling the default `Ingress` deployment. - -1. Ensure the custom `Ingress` manifest is placed in the same namespace as the -`spaces-router` pod. -2. Ensure that the ingress is configured to use a `spaces-router` as a secure -backend and that the secret used is the mxp-hostcluster-certs secret. -3. Ensure that the ingress is configured to handle long-lived connections. -4. Ensure that the routing rule sends requests prefixed with -`/v1/controlPlanes` to the `spaces-router` using the `http` port. - - - - - - -[cert-manager]: https://cert-manager.io/ -[Certificate Custom Resource]: https://cert-manager.io/docs/usage/certificate/ -[ClusterIssuer]: https://cert-manager.io/docs/concepts/issuer/ -[ingress-nginx ingress controller]: https://kubernetes.github.io/ingress-nginx/deploy/ -[installation ideas]: https://cert-manager.io/docs/installation/ -[Ingress]: https://kubernetes.io/docs/concepts/services-networking/ingress/ -[Issuer Configuration]: https://cert-manager.io/docs/configuration/ -[official-provider-families]: /manuals/packages/providers/provider-families -[aws-eks]: https://aws.amazon.com/eks/ -[google-cloud-gke]: https://cloud.google.com/kubernetes-engine -[microsoft-aks]: https://azure.microsoft.com/en-us/products/kubernetes-service -[upbound-account]: https://www.upbound.io/register/?utm_source=docs&utm_medium=cta&utm_campaign=docs_spaces -[provider-aws-ec2]: https://marketplace.upbound.io/providers/upbound/provider-aws-ec2 -[provider-aws-s3]: https://marketplace.upbound.io/providers/upbound/provider-aws-s3 -[provider-aws-iam]: https://marketplace.upbound.io/providers/upbound/provider-aws-iam -[azure-limits]: https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/request-limits-and-throttling -[aws-ec2-limits]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/throttling.html#throttling-limits-rate-based -[upjet]: https://github.com/upbound/upjet diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/dr.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/dr.md deleted file mode 100644 index 67ecbfecf..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/dr.md +++ /dev/null @@ -1,412 +0,0 @@ ---- -title: Disaster Recovery -sidebar_position: 13 -description: Configure Space-wide backups for disaster recovery. ---- - -:::info API Version Information -This guide applies to **Spaces v1.9.0 and later** (Self-Hosted only). Space Backups is a alpha feature enabled by default starting in v1.14.0. - -- **v1.9.0-v1.13.x**: Alpha feature (requires manual enablement) -- **v1.14.0+**: GA (enabled by default) - -For version-specific features and backup resources, see the . control-plane backups, see [Backup and Restore](../backup-and-restore.md). -::: - -:::important -For Connected and Disconnected Spaces, this feature requires Spaces `v1.9.0` and, starting with `v1.14.0`, Spaces enables it by default. - -To enable it on versions earlier than `v1.14.0`, set features.alpha.spaceBackup.enabled=true when you install Spaces. - -```bash -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - ... - --set "features.alpha.spaceBackup.enabled=true" -``` -::: - -Upbound's _Space Backups_ is a built-in Space-wide backup and restore feature. This guide explains how to configure Space Backups and how to restore from one of them in case of disaster recovery. - -This feature is meant for Space administrators. Group or Control Plane users can leverage [Shared Backups][shared-backups] to backup and restore their ControlPlanes. - -## Benefits -The Space Backups feature provides the following benefits: - -* Automatic backups for all resources in a Space and all resources in control planes, without any operational overhead. -* Backup schedules. -* Selectors to specify resources to backup. - -## Prerequisites - -Enabled the Space Backups feature in the Space: - -- Cloud Spaces: Not accessible to users. -- Connected Spaces: Space administrator must enable this feature. -- Disconnected Spaces: Space administrator must enable this feature. - -## Configure a Space Backup Config - -[SpaceBackupConfig][spacebackupconfig] is a cluster-scoped resource. This resource configures the storage details and provider. Whenever a backup executes (either by schedule or manually initiated), it references a SpaceBackupConfig to tell it where store the snapshot. - - -### Backup config provider - - -The `spec.objectStorage.provider` and `spec.objectStorage.config` fields configures: - -* The object storage provider -* The path to the provider -* The credentials needed to communicate with the provider - -You can only set one provider. Upbound currently supports AWS, Azure, and GCP as providers. - - -`spec.objectStorage.config` is a freeform map of configuration options for the object storage provider. See [Thanos object storage][thanos-object-storage] for more information on the formats for each supported cloud provider. `spec.bucket` and `spec.provider` overrides the required values in the config. - - -#### AWS as a storage provider - -This example demonstrates how to use AWS as a storage provider for your backups: - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackupConfig -metadata: - name: default -spec: - objectStorage: - provider: AWS - bucket: spaces-backup-bucket - config: - endpoint: s3.eu-west-2.amazonaws.com - region: eu-west-2 - credentials: - source: Secret - secretRef: - name: bucket-creds - namespace: upbound-system - key: creds -``` - -This example assumes you've already created an S3 bucket called -`spaces-backup-bucket` in the `eu-west-2` AWS region. To access the bucket, -define the account credentials as a Secret in the specified Namespace -(`upbound-system` in this example). - -#### Azure as a storage provider - -This example demonstrates how to use Azure as a storage provider for your backups: - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackupConfig -metadata: - name: default - namespace: default -spec: - objectStorage: - provider: Azure - bucket: upbound-backups - config: - storage_account: upbackupstore - container: upbound-backups - endpoint: blob.core.windows.net - credentials: - source: Secret - secretRef: - name: bucket-creds - namespace: upbound-system - key: creds -``` - - -This example assumes you've already created an Azure storage account called -`upbackupstore` and blob `upbound-backups`. To access the blob, -define the account credentials as a Secret in the specified Namespace -(`upbound-system` in this example). - - -#### GCP as a storage provider - -This example demonstrates how to use Google Cloud Storage as a storage provider for your backups: - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackupConfig -metadata: - name: default - namespace: default -spec: - objectStorage: - provider: GCP - bucket: spaces-backup-bucket - credentials: - source: Secret - secretRef: - name: bucket-creds - namespace: upbound-system - key: creds -``` - - -This example assumes you've already created a Cloud bucket called -"spaces-backup-bucket" and a service account with access to this bucket. Define the key file as a Secret in the specified Namespace -(`upbound-system` in this example). - - -## Configure a Space Backup Schedule - - -[SpaceBackupSchedule][spacebackupschedule] is a cluster-scoped resource. This resource defines a backup schedule for the whole Space. - -Below is an example of a Space Backup Schedule running every day. It backs up all groups having `environment: production` labels and all control planes in those groups having `backup: please` labels. - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackupSchedule -metadata: - name: daily-schedule -spec: - schedule: "@daily" - configRef: - kind: SpaceBackupConfig - name: default - match: - groups: - labelSelectors: - - matchLabels: - environment: production - controlPlanes: - labelSelectors: - - matchLabels: - backup: please -``` - -### Define a schedule - -The `spec.schedule` field is a [Cron-formatted][cron-formatted] string. Some common examples are below: - -| Entry | Description | -| ----------------- | ------------------------------------------------------------------------------------------------- | -| `@hourly` | Run once an hour. | -| `@daily` | Run once a day. | -| `@weekly` | Run once a week. | -| `0 0/4 * * *` | Run every 4 hours. | -| `0/15 * * * 1-5` | Run every fifteenth minute on Monday through Friday. | -| `@every 1h30m10s` | Run every 1 hour, 30 minutes, and 10 seconds. Hour is the largest measurement of time for @every. | - -### Suspend a schedule - -Use `spec.suspend` field to suspend the schedule. It creates no new backups, but allows running backups to complete. - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackupSchedule -metadata: - name: daily-schedule -spec: - suspend: true -... -``` - -### Garbage collect backups when the schedule gets deleted - -Set the `spec.useOwnerReferencesInBackup` to garbage collect associated `SpaceBackup` when a `SpaceBackupSchedule` gets deleted. If set to true, backups are garbage collected when the schedule gets deleted. - -### Set the time to live - -Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. - -The time to live is a duration, for example, `168h` for 7 days. - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackupSchedule -metadata: - name: daily-schedule -spec: - ttl: 168h # Backup is garbage collected after 7 days -... -``` - -## Selecting space resources to backup - -By default, a SpaceBackup selects all groups and, for each of them, all control planes, secrets, and any other group-scoped resources. - -By setting `spec.match`, you can include only specific groups, control planes, secrets, or other Space resources in the backup. - -By setting `spec.exclude`, you can filter out some matched Space API resources from the backup. - -### Including space resources in a backup - -Different fields are available to include resources based on labels or names: -- `spec.match.groups` to include only some groups in the backup. -- `spec.match.controlPlanes` to include only some control planes in the backup. -- `spec.match.secrets` to include only some secrets in the backup. -- `spec.match.extras` to include only some extra resources in the backup. - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackup -metadata: - name: my-backup -spec: - configRef: - kind: SpaceBackupConfig - name: default - match: - groups: - labelSelectors: - - matchLabels: - environment: production - controlPlanes: - labelSelectors: - - matchLabels: - backup: please - secrets: - names: - - my-secret - extras: - - apiGroup: "spaces.upbound.io" - kind: "SharedBackupConfig" - names: - - my-shared-backup -``` - -### Excluding Space resources from the backup - -Use the `spec.exclude` field to exclude matched Space API resources from the backup. - -Different fields are available to exclude resources based on labels or names: -- `spec.exclude.groups` to exclude some groups from the backup. -- `spec.exclude.controlPlanes` to exclude some control planes from the backup. -- `spec.exclude.secrets` to exclude some secrets from the backup. -- `spec.exclude.extras` to exclude some extra resources from the backup. - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackup -metadata: - name: my-backup -spec: - ttl: 168h # Backup is garbage collected after 7 days - configRef: - kind: SpaceBackupConfig - name: default - match: - groups: - labelSelectors: - - matchLabels: - environment: production - exclude: - groups: - names: - - not-this-one-please -``` - -### Exclude resources in control planes' backups - -By default, it backs up all resources in a selected control plane. - -Use the `spec.controlPlaneBackups.excludedResources` field to exclude resources from control planes' backups. - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackup -metadata: - name: my-backup -spec: - ttl: 168h # Backup is garbage collected after 7 days - configRef: - kind: SpaceBackupConfig - name: default - controlPlaneBackups: - excludedResources: - - secrets - - buckets.s3.aws.upbound.io -``` - -## Create a manual backup - -[SpaceBackup][spacebackup] is a cluster-scoped resource that causes a single backup to occur for the whole Space. - -Below is an example of a manual SpaceBackup: - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackup -metadata: - name: my-backup -spec: - configRef: - kind: SpaceBackupConfig - name: default - deletionPolicy: Delete -``` - - -The backup specification `DeletionPolicy` defines backup deletion actions, -including the deletion of the backup file from the bucket. The `Deletion Policy` -value defaults to `Orphan`. Set it to `Delete` to remove uploaded files -in the bucket. -For more information on the backup and restore process, review the [Spaces API documentation][spaces-api-documentation]. - -### Set the time to live - -Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackup -metadata: - name: my-backup -spec: - ttl: 168h # Backup is garbage collected after 7 days -... -``` - -## Restore from a space backup - -Space Backup and Restore focuses only on disaster recovery. The restore procedure assumes a new Space installation with no existing resources. The restore procedure is idempotent, so you can run it multiple times without any side effects in case of failures. - -To restore a Space from an existing Space Backup, follow these steps: - -1. Install Spaces from scratch as needed. -2. Create a `SpaceBackupConfig` as needed to access the SpaceBackup from the object storage, for example named `my-backup-config`. -3. Select the backup you want to restore from, for example `my-backup`. -4. Run the following command to restore the Space: - -```shell -export SPACE_BACKUP_CONFIG=my-backup-config -export SPACE_BACKUP=my-backup -kubectl exec -ti -n upbound-system deployments/spaces-controller -c spaces -- hyperspace restore $SPACE_BACKUP $SPACE_BACKUP_CONFIG -``` - -### Restore specific control planes - -:::important -This feature is available from Spaces v1.11. -::: - -Instead of restoring the whole Space, you can choose to restore specific control planes -from a backup using the `--controlplanes` flag. You can also use -the `--skip-space-restore` flag to skip restoring Space objects. -This allows Spaces admins to restore individual control planes without -needing to restore the entire Space. - -```shell -export SPACE_BACKUP_CONFIG=my-backup-config -export SPACE_BACKUP=my-backup -kubectl exec -ti -n upbound-system deployments/spaces-controller -c spaces --- hyperspace restore $SPACE_BACKUP $SPACE_BACKUP_CONFIG --controlplanes default/ctp1,default/ctp2 --skip-space-restore -``` - - -[shared-backups]: /spaces/howtos/self-hosted/workload-id/backup-restore-config/ -[spacebackupconfig]: /reference/apis/spaces-api/v1_9 -[thanos-object-storage]: https://thanos.io/tip/thanos/storage.md/ -[spacebackupschedule]: /reference/apis/spaces-api/v1_9 -[cron-formatted]: https://en.wikipedia.org/wiki/Cron -[spacebackup]: /reference/apis/spaces-api/v1_9 -[spaces-api-documentation]: /reference/apis/spaces-api/v1_9 - diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/gitops-with-argocd.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/gitops-with-argocd.md deleted file mode 100644 index 004247a10..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/gitops-with-argocd.md +++ /dev/null @@ -1,142 +0,0 @@ ---- -title: GitOps with ArgoCD in Self-Hosted Spaces -sidebar_position: 80 -description: Set up GitOps workflows with Argo CD in self-hosted Spaces -plan: "business" ---- - -:::info Deployment Model -This guide applies to **self-hosted Spaces** deployments. For Upbound Cloud Spaces, see [GitOps with Upbound Control Planes](/spaces/howtos/cloud-spaces/gitops-on-upbound/). -::: - -GitOps is an approach for managing a system by declaratively describing desired resources' configurations in Git and using controllers to realize the desired state. Upbound's control planes are compatible with this pattern and it's strongly recommended you integrate GitOps in the platforms you build on Upbound. - - -## Integrate with Argo CD - - -[Argo CD][argo-cd] is a project in the Kubernetes ecosystem commonly used for -GitOps. You can use it in tandem with Upbound control planes to achieve GitOps -flows. The sections below explain how to integrate these tools with Upbound. - -### Configure connection secrets for control planes - -You can configure control planes to write their connection details to a secret. -Do this by setting the -[`spec.writeConnectionSecretToRef`][spec-writeconnectionsecrettoref] field in a -control plane manifest. For example: - -```yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - name: ctp1 - namespace: default -spec: - writeConnectionSecretToRef: - name: kubeconfig-ctp1 - namespace: default -``` - - -### Configure Argo CD - - -To configure Argo CD for Annotation resource tracking, edit the Argo CD -ConfigMap in the Argo CD namespace. Add `application.resourceTrackingMethod: -annotation` to the data section as below. - -Next, configure the [auto respect RBAC for the Argo CD -controller][auto-respect-rbac-for-the-argo-cd-controller-1]. By default, Argo CD -attempts to discover some Kubernetes resource types that don't exist in a -control plane. You must configure Argo CD to respect the cluster's RBAC rules so -that Argo CD can sync. Add `resource.respectRBAC: normal` to the data section as -below. - -```bash -apiVersion: v1 -kind: ConfigMap -metadata: - name: argocd-cm -data: - ... - application.resourceTrackingMethod: annotation - resource.respectRBAC: normal -``` - -:::tip -The `resource.respectRBAC` configuration above tells Argo to respect RBAC for -_all_ cluster contexts. If you're using an Argo CD instance to manage more than -only control planes, you should consider changing the `clusters` string match -for the configuration to apply only to control planes. For example, if every -control plane context name followed the convention of being named -`controlplane-`, you could set the string match to be `controlplane-*` -::: - - -### Create a cluster context definition - - -Once the control plane is ready, extract the following values from the secret -containing the kubeconfig: - -```bash -kubeconfig_content=$(kubectl get secrets kubeconfig-ctp1 -n default -o jsonpath='{.data.kubeconfig}' | base64 -d) -server=$(echo "$kubeconfig_content" | grep 'server:' | awk '{print $2}') -bearer_token=$(echo "$kubeconfig_content" | grep 'token:' | awk '{print $2}') -ca_data=$(echo "$kubeconfig_content" | grep 'certificate-authority-data:' | awk '{print $2}') -``` - -Generate a new secret in the cluster where you installed Argo, using the prior -values extracted: - -```yaml -cat < - -import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; - - - - -:::important -This feature is only available for select Business Critical customers. You can't -set up your own Managed Space without the assistance of Upbound. If you're -interested in this deployment mode, please [contact us][contact]. -::: - - - -A Managed Space deployed on AWS is a single-tenant deployment of a control plane -space in your AWS organization in an isolated sub-account. With Managed Spaces, -you can use the same API, CLI, and Console that Upbound offers, with the benefit -of running entirely in a cloud account that you own and Upbound manages for you. - -The following guide walks you through setting up a Managed Space in your AWS -organization. If you have any questions while working through this guide, -contact your Upbound Account Representative for help. - - - - - -A Managed Space deployed on GCP is a single-tenant deployment of a control plane -space in your GCP organization in an isolated project. With Managed Spaces, you -can use the same API, CLI, and Console that Upbound offers, with the benefit of -running entirely in a cloud account that you own and Upbound manages for you. - -The following guide walks you through setting up a Managed Space in your GCP -organization. If you have any questions while working through this guide, -contact your Upbound Account Representative for help. - - - - -## Managed Space on your cloud architecture - - - -A Managed Space is a deployment of the Upbound Spaces software inside an -Upbound-controlled sub-account in your AWS cloud environment. The Spaces -software runs in this sub-account, orchestrated by Kubernetes. Backups and -billing data get stored inside bucket or blob storage in the same sub-account. -The control planes deployed and controlled by the Spaces software runs on the -Kubernetes cluster which gets deployed into the sub-account. - -The diagram below illustrates the high-level architecture of Upbound Managed Spaces: - -![Upbound Managed Spaces arch](/img/managed-arch-aws.png) - -The Spaces software gets deployed on an EKS Cluster in the region of your -choice. This EKS cluster is where your control planes are ultimately run. -Upbound also deploys buckets, 1 for the collection of the billing data and 1 for -control plane backups. - -Upbound doesn't have access to other sub-accounts nor your organization-level -settings in your cloud environment. Outside of your cloud organization, Upbound -runs the Upbound Console, which includes the Upbound API and web application, -including the dashboard you see at `console.upbound.io`. By default, all -connections are encrypted, but public. Optionally, you also have the option to -use private network connectivity through [AWS PrivateLink][aws-privatelink]. - - - - - - -A Managed Space is a deployment of the Upbound Spaces software inside an -Upbound-controlled project in your GCP cloud environment. The Spaces software -runs in this project, orchestrated by Kubernetes. Backups and billing data get -stored inside bucket or blob storage in the same project. The control planes -deployed and controlled by the Spaces software runs on the Kubernetes cluster -which gets deployed into the project. - -The diagram below illustrates the high-level architecture of Upbound Managed Spaces: - -![Upbound Managed Spaces arch](/img/managed-arch-gcp.png) - -The Spaces software gets deployed on a GKE Cluster in the region of your choice. -This GKE cluster is where your control planes are ultimately run. Upbound also -deploys cloud buckets, 1 for the collection of the billing data and 1 for -control plane backups. - -Upbound doesn't have access to other projects nor your organization-level -settings in your cloud environment. Outside of your cloud organization, Upbound -runs the Upbound Console, which includes the Upbound API and web application, -including the dashboard you see at `console.upbound.io`. By default, all -connections are encrypted, but public. Optionally, you also have the option to -use private network connectivity through [GCP Private Service -Connect][gcp-private-service-connect]. - - - -## Prerequisites - -- An organization created on Upbound - - - -- You should have a preexisting AWS organization to complete this guide. -- You must create a new AWS sub-account. Read the [AWS documentation][aws-documentation] to learn how to create a new sub-account in an existing organization on AWS. - -After the sub-account information gets provided to Upbound, **don't change it -any further.** Any changes made to the sub-account or the resources created by -Upbound for the purposes of the Managed Space deployments voids the SLA you have -with Upbound. If you want to make configuration changes, contact your Upbound -Solutions Architect. - - - - - -- You should have a preexisting GCP organization with an active Cloud Billing account to complete this guide. -- You must create a new GCP project. Read the [GCP documentation][gcp-documentation] to learn how to create a new project in an existing organization on GCP. - -After the project information gets provided to Upbound, **don't change it any -further.** Any changes made to the project or the resources created by Upbound -for the purposes of the Managed Space deployments voids the SLA you have with -Upbound. If you want to make configuration changes, contact your Upbound -Solutions Architect. - - - - - -## Set up cross-account management - -Upbound supports using AWS Key Management Service with cross-account IAM -permissions. This enables the isolation of keys so the infrastructure operated -by Upbound has limited access to symmetric keys. - -In the KMS key's account, apply the baseline key policy: - -```json -{ - "Sid": "Allow Upbound to use this key", - "Effect": "Allow", - "Principal": { - "AWS": ["[Managed Space sub-account ID]"] - }, - "Action": ["kms:Encrypt", "kms:Decrypt", "kms:ReEncrypt*", "kms:GenerateDataKey*", "kms:DescribeKey"], - "Resource": "*" -} -``` - -You need another key policy to let the sub-account create persistent resources -with the KMS key: - -```json -{ - "Sid": "Allow attachment of persistent resources for an Upbound Managed Space", - "Effect": "Allow", - "Principal": { - "AWS": "[Managed Space sub-account ID]" - }, - "Action": ["kms:CreateGrant", "kms:ListGrants", "kms:RevokeGrant"], - "Resource": "*", - "Condition": { - "Bool": { - "kms:GrantIsForAWSResource": "true" - } - } -} -``` - -### Configure PrivateLink - -By default, all connections to the Upbound Console are encrypted, but public. -AWS PrivateLink is a feature that allows VPC peering whereby your traffic -doesn't traverse the public internet. To have this configured, contact your -Upbound Account Representative. - - - - - -## Enable APIs - -Enable the following APIs in the new project: - -- Kubernetes Engine API -- Cloud Resource Manager API -- Compute Engine API -- Cloud DNS API - -:::tip -Read how to enable APIs in a GCP project [here][here]. -::: - -## Create a service account - -Create a service account in the new project. Name the service account, -upbound-sa. Give the service account the following roles: - -- Compute Admin -- Project IAM Admin -- Service Account Admin -- DNS Administrator -- Editor - -Select the service account you just created. Select keys. Add a new key and -select JSON. The key gets downloaded to your machine. Save this for later. - -## Create a DNS Zone - -Create a DNS Zone, set the **Zone type** to `Public`. - -### Configure Private Service Connect - -By default, all connections to the Upbound Console are encrypted, but public. -GCP Private Service Connect is a feature that allows VPC peering whereby your -traffic doesn't traverse the public internet. To have this configured, contact -your Upbound Account Representative. - - - -## Provide information to Upbound - -Once these policies get attached to the key, tell your Upbound Account -Representative, providing them the following: - - - -- the full ARN of the KMS key. -- the name of the organization that you created in Upbound. Use the up CLI command, `up org list`, so see this information. -- Confirmation of which region in AWS you want the deployment to target. - - - - - -- The service account JSON key -- The NS records associated with the DNS name created in the last step. -- the name of the organization that you created in Upbound. Use the up CLI command, `up org list`, so see this information. -- Confirmation of which region in GCP you want the deployment to target. - - - -Once Upbound has this information, the request gets processed in a business day. - -## Use your Managed Space - -Once the Managed Space gets deployed, you can see it in the Space selector when browsing your environment on [`console.upbound.io`][console-upbound-io]. - - - - -[contact]: https://www.upbound.io/contact-us -[aws-privatelink]: #configure-privatelink -[aws-documentation]: https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_create.html#orgs_manage_accounts_create-new -[gcp-private-service-connect]: #configure-private-service-connect -[gcp-documentation]: https://cloud.google.com/resource-manager/docs/creating-managing-organization -[here]: https://cloud.google.com/apis/docs/getting-started#enabling_apis -[console-upbound-io]: https://console.upbound.io/ diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/oidc-configuration.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/oidc-configuration.md deleted file mode 100644 index cbef4dc42..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/oidc-configuration.md +++ /dev/null @@ -1,289 +0,0 @@ ---- -title: Configure OIDC -sidebar_position: 20 -description: Configure OIDC in your Space ---- -:::important -This guide is only applicable for administrators who've deployed self-hosted Spaces. general RBAC in Upbound, read [Upbound RBAC][upbound-rbac]. -::: - -Upbound uses the Kubernetes [Structured Authentication Configuration][structured-auth-config] to validate OIDC tokens sent to the API. Upbound stores this -configuration as a `ConfigMap` and authenticates with the Upbound router -component during installation with Helm. - -This guide walks you through how to create and apply an authentication -configuration to validate Upbound with an external identity provider. Each -section focuses on a specific part of the configuration file. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. - -For details on authentication and access control across versions, see the . related platform authentication features, see the [Platform manual](../../../../platform/). -::: - -## Creating the `AuthenticationConfiguration` file - -First, create a file called `config.yaml` with an `AuthenticationConfiguration` -kind. The `AuthenticationConfiguration` is the initial authentication structure -necessary for Upbound to communicate with your chosen identity provider. - -```yaml -apiVersion: apiserver.config.k8s.io/v1beta1 -kind: AuthenticationConfiguration -jwt: -- issuer: - url: oidc-issuer-url - audiences: - - oidc-client-id - claimMappings: # optional - username: - claim: oidc-username-claim - prefix: oidc-username-prefix - groups: - claim: oidc-groups-claim - prefix: oidc-groups-prefix -``` - - -For detailed configuration options, including the CEL-based token validation, -review the feature [documentation][structured-auth-config]. - - -The `AuthenticationConfiguration` allows you to configure multiple JWT -authenticators as separate issuers. - -### Configure an issuer - -The `jwt` array requires an `issuer` specification and typically contains: - -- A `username` claim mapping -- A `groups` claim mapping -Optionally, the configuration may also include: -- A set of claim validation rules -- A set of user validation rules - -The `issuer` URL must be unique across all configured authenticators. - -```yaml -issuer: - url: https://example.com - discoveryUrl: https://discovery.example.com/.well-known/openid-configuration - certificateAuthority: |- - - audiences: - - client-id-a - - client-id-b - audienceMatchPolicy: MatchAny -``` - -By default, the authenticator assumes the OIDC Discovery URL is -`{issuer.url}/.well-known/openid-configuration`. Most identity providers follow -this structure, and you can omit the `discoveryUrl` field. To use a separate -discovery service, specify the full path to the discovery endpoint in this -field. - -If the CA for the Issuer isn't public, provide the PEM encoded CA for the Discovery URL. - -At least one of the `audiences` entries must match the `aud` claim in the JWT. -For OIDC tokens, this is the Client ID of the application attempting to access -the Upbound API. Having multiple values set allows the same configuration to -apply to multiple client applications, for example the `kubectl` CLI and an -Internal Developer Portal. - -If you specify multiple `audiences` , `audienceMatchPolicy` must equal `MatchAny`. - -### Configure `claimMappings` - -#### Username claim mapping - -By default, the authenticator uses the `sub` claim as the user name. To override this, either: - -- specify *both* `claim` and `prefix`. `prefix` may be explicitly set to the empty string. -or - -- specify a CEL `expression` to calculate the user name. - -```yaml -claimMappings: - username: - claim: "sub" - prefix: "keycloak" - # - expression: 'claims.username + ":external-user"' -``` - - -#### Groups claim mapping - -By default, this configuration doesn't map groups, unless you either: - -- specify both `claim` and `prefix`. `prefix` may be explicitly set to the empty string. -or - -- specify a CEL `expression` that returns a string or list of strings. - - -```yaml -claimMappings: - groups: - claim: "groups" - prefix: "" - # - expression: 'claims.roles.split(",")' -``` - - -### Validation rules - - -Validation rules are outside the scope of this document. Review the -[documentation][structured-auth-config] for more information. Examples include -using CEL expressions to validate authentication such as: - - -- Validating that a token claim has a specific value -- Validating that a token has a limited lifetime -- Ensuring usernames and groups don't contain reserved prefixes - -## Required claims - -To interact with Space and ControlPlane APIs, users must have the `upbound.io/aud` claim set to one of the following: - -| Upbound.io Audience | Notes | -| -------------------------------------------------------- | -------------------------------------------------------------------- | -| `[]` | No Access to Space-level or ControlPlane APIs | -| `['upbound:spaces:api']` | This Identity is only for Space-level APIs | -| `['upbound:spaces:controlplanes']` | This Identity is only for ControlPlane APIs | -| `['upbound:spaces:api', 'upbound:spaces:controlplanes']` | This Identity is for both Space-level and ControlPlane APIs | - - -You can set this claim in two ways: - -- In the identity provider mapped in the ID token. -- Inject in the authenticator with the `jwt.claimMappings.extra` array. - -For example: -```yaml -apiVersion: apiserver.config.k8s.io/v1beta1 -kind: AuthenticationConfiguration -jwt: -- issuer: - url: https://keycloak:8443/realms/master - certificateAuthority: |- - - audiences: - - master-realm - audienceMatchPolicy: MatchAny - claimMappings: - username: - claim: "preferred_username" - prefix: "keycloak:" - groups: - claim: "groups" - prefix: "" - extra: - - key: 'upbound.io/aud' - valueExpression: "['upbound:spaces:controlplanes', 'upbound:spaces:api']" -``` - -## Install the `AuthenticationConfiguration` - -Once you create an `AuthenticationConfiguration` file, specify this file as a -`ConfigMap` in the host cluster for the Upbound Space. - -```sh -kubectl create configmap -n upbound-system --from-file=config.yaml=./path/to/config.yaml -``` - - -To enable OIDC authentication and disable Upbound IAM when installing the Space, -reference the configuration and pass an empty value to the Upbound IAM issuer -parameter: - - -```sh -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - ... - --set "authentication.structuredConfig=" \ - --set "router.controlPlane.extraArgs[0]=--upbound-iam-issuer-url=" -``` - -## Configure RBAC - - -In this scenario, the external identity provider handles authentication, but -permissions for Spaces and ControlPlane APIs use standard RBAC objects. - -### Spaces APIs - -The Spaces APIs include: -```yaml -- apiGroups: - - spaces.upbound.io - resources: - - controlplanes - - sharedexternalsecrets - - sharedsecretstores - - backups - - backupschedules - - sharedbackups - - sharedbackupconfigs - - sharedbackupschedules -- apiGroups: - - observability.spaces.upbound.io - resources: - - sharedtelemetryconfigs -``` - -### ControlPlane APIs - - - -Crossplane specifies three [roles][crossplane-managed-clusterroles] for a -ControlPlane: admin, editor, and viewer. These map to the verbs `admin`, `edit`, -and `view` on the `controlplanes/k8s` resource in the `spaces.upbound.io` API -group. - - -### Control access - -The `groups` claim in the `AuthenticationConfiguration` allows you to control -resource access when you create a `ClusterRoleBinding`. A `ClusterRole` defines -the role parameters and a `ClusterRoleBinding` subject. - -The example below allows `admin` permissions for all ControlPlanes to members of -the `ctp-admins` group: - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: allow-ctp-admin -rules: -- apiGroups: - - spaces.upbound.io - resources: - - controlplanes/k8s - verbs: - - admin -``` - -ctp-admins ClusterRoleBinding -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: allow-ctp-admin -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: allow-ctp-admin -subjects: -- apiGroup: rbac.authorization.k8s.io - kind: Group - name: ctp-admins -``` - -[structured-auth-config]: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#using-authentication-configuration -[crossplane-managed-clusterroles]: https://github.com/crossplane/crossplane/blob/master/design/design-doc-rbac-manager.md#managed-rbac-clusterroles -[upbound-rbac]: /manuals/platform/concepts/authorization/upbound-rbac diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/proxies-config.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/proxies-config.md deleted file mode 100644 index 3802e4cb0..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/proxies-config.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: Proxied configuration -sidebar_position: 20 -description: Configure Upbound within a proxied environment ---- - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments in proxied environments. Proxy configuration procedures are consistent across versions. - -For version-specific deployment considerations, see the . -::: - - - -When you install Upbound with Helm in a proxied environment, please update the specified registry with your internal registry. - - - -```bash -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "${SPACES_VERSION}" \ - --set "ingress.host=${SPACES_ROUTER_HOST}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "authentication.hubIdentities=true" \ - --set "authorization.hubRBAC=true" \ - --set "registry=registry.company.corp/spaces" \ - --set "controlPlanes.uxp.registryOverride=registry.company.corp/xpkg.upbound.io" \ - --set "controlPlanes.uxp.repository=registry.company.corp/spaces" \ - --wait -``` diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/query-api.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/query-api.md deleted file mode 100644 index c112e9001..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/query-api.md +++ /dev/null @@ -1,396 +0,0 @@ ---- -title: Deploy Query API infrastructure -weight: 130 -description: Query API -aliases: - - /all-spaces/self-hosted-spaces/query-api - - /self-hosted-spaces/query-api - - all-spaces/self-hosted-spaces/query-api ---- - - - - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). Query API has evolved across versions: - -- **Cloud Spaces**: Available since v1.6 (enabled by default) -- **Self-Hosted**: Available since v1.8 (requires manual enablement) - -For details on Query API availability across versions, see the . -::: - -:::important - -This feature is in preview. The Query API is available in the Cloud Space offering in `v1.6` and enabled by default. - -This is a requirement to be able to connect a Space since `v1.8.0`, and is off by default, see below to enable it. - -::: - -Upbound's Query API allows users to inspect objects and resources within their control planes. The read-only `up alpha query` and `up alpha get` CLI commands allow you to gather information on your control planes in a fast and efficient package. These commands follow the [`kubectl` conventions][kubectl-conventions] for filtering, sorting, and retrieving information from your Space. - -Query API requires a PostgreSQL database to store the data. You can use the default PostgreSQL instance provided by Upbound or bring your own PostgreSQL instance. - -## Managed setup - -:::tip -If you don't have specific requirements for your setup, Upbound recommends following this approach. -::: - -To enable this feature, set `features.alpha.apollo.enabled=true` and `apollo.apollo.storage.postgres.create=true` when installing Spaces. - -However, you need to install CloudNativePG (`CNPG`) to provide the PostgreSQL instance. You can let the `up` CLI do this for you, or install it manually. - -For more customization, see the [Helm chart reference][helm-chart-reference]. You can modify the number -of PostgreSQL instances, pooling instances, storage size, and more. - -If you have specific requirements not addressed in the Helm chart, see below for more information on how to bring your own [PostgreSQL setup][postgresql-setup]. - -### Using the up CLI - -Before you begin, make sure you have the most recent version of the [`up` CLI installed][up-cli-installed]. - -To enable this feature, set `features.alpha.apollo.enabled=true` and `apollo.apollo.storage.postgres.create=true` when installing Spaces: - -```bash -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - ... - --set "features.alpha.apollo.enabled=true" \ - --set "apollo.apollo.storage.postgres.create=true" -``` - -`up space init` and `up space upgrade` install CloudNativePG automatically, if needed. - -### Helm chart - -If you are installing the Helm chart in some other way, you can manually install CloudNativePG in one of the [supported ways][supported-ways], for example: - -```shell -kubectl apply --server-side -f \ - https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.24.1/cnpg-1.24.1.yaml -kubectl rollout status -n cnpg-system deployment cnpg-controller-manager -w --timeout 120s -``` - -Next, install the Spaces Helm chart with the necessary values, for example: - -```shell -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "${SPACES_VERSION}" \ - ... - --set "features.alpha.apollo.enabled=true" \ - --set "apollo.apollo.storage.postgres.create=true" \ - --wait -``` - -## Self-hosted PostgreSQL configuration - - -If your workflow requires more customization, you can provide your own -PostgreSQL instance and configure credentials manually. - -Using your own PostgreSQL instance requires careful architecture consideration. -Review the architecture and requirements guidelines. - -### Architecture - -The Query API architecture uses three components, other than a PostgreSQL database: -* **Apollo Syncers**: Watching `ETCD` for changes and syncing them to PostgreSQL. One, or more, per control plane. -* **Apollo Server**: Serving the Query API out of the data in PostgreSQL. One, or more, per Space. - -The default setup also uses the `PgBouncer` connection pooler to manage connections from the syncers. -```mermaid -graph LR - User[User] - - subgraph Cluster["Cluster (Spaces)"] - direction TB - Apollo[apollo] - - subgraph ControlPlanes["Control Planes"] - APIServer[API Server] - Syncer[apollo-syncer] - end - end - - PostgreSQL[(PostgreSQL)] - - User -->|requests| Apollo - - Apollo -->|connects| PostgreSQL - Apollo -->|creates schemas & users| PostgreSQL - - Syncer -->|watches| APIServer - Syncer -->|writes| PostgreSQL - - PostgreSQL -->|data| Apollo - - style PostgreSQL fill:#e1f5ff,stroke:#333,stroke-width:2px,color:#000 - style Apollo fill:#ffe1e1,stroke:#333,stroke-width:2px,color:#000 - style Cluster fill:#f0f0f0,stroke:#333,stroke-width:2px,color:#000 - style ControlPlanes fill:#fff,stroke:#666,stroke-width:1px,stroke-dasharray: 5 5,color:#000 -``` - - -Each component needs to connect to the PostgreSQL database. - -In the event of database issues, you can provide a new database and the syncers -automatically repopulate the data. - -### Requirements - -* A PostgreSQL 16 instance or cluster. -* A database, for example named `upbound`. -* **Optional**: A dedicated user for the Apollo Syncers, otherwise the Spaces Controller generates a dedicated set of credentials per syncer with the necessary permissions, for example named `syncer`. -* A dedicated **superuser or admin account** for the Apollo Server. -* **Optional**: A connection pooler, like PgBouncer, to manage connections from the Apollo Syncers. If you didn't provide the optional users, you might have to configure the pooler to allow users to connect using the same credentials as PostgreSQL. -* **Optional**: A read replica for the Apollo Syncers to connect to, to reduce load on the primary database, this might cause a slight delay in the data being available through the Query API. - -Below you can find examples of setups to get you started, you can mix and match the examples to suit your needs. - -### In-cluster setup - -:::tip - -If you don't have strong opinions on your setup, but still want full control on -the resources created for some unsupported customizations, Upbound recommends -the in-cluster setup. - -::: - -For more customization than the managed setup, you can use CloudNativePG for -PostgreSQL in the same cluster. - -For in-cluster setup, manually deploy the operator in one of the [supported ways][supported-ways-1], for example: - -```shell -kubectl apply --server-side -f \ - https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.24.1/cnpg-1.24.1.yaml -kubectl rollout status -n cnpg-system deployment cnpg-controller-manager -w --timeout 120s -``` - -Then create a `Cluster` and `Pooler` in the `upbound-system` namespace, for example: - -```shell -kubectl create ns upbound-system - -kubectl apply -f - < - -### External setup - - -:::tip - -If you want to run your PostgreSQL instance outside the cluster, but are fine with credentials being managed by the `apollo` user, this is the suggested way to proceed. - -::: - -When using this setup, you must manually create the required Secrets in the -`upbound-system` namespace. The `apollo` user must have permissions to create -schemas and users. - -```shell - -kubectl create ns upbound-system - -# A Secret containing the necessary credentials to connect to the PostgreSQL instance -kubectl create secret generic spaces-apollo-pg-app -n upbound-system \ - --from-literal=password=supersecret - -# A Secret containing the necessary CA certificate to verify the connection to the PostgreSQL instance -kubectl create secret generic spaces-apollo-pg-ca -n upbound-system \ - --from-file=ca.crt=/path/to/ca.crt -``` - -Next, install Spaces with the necessary settings: - -```shell -export PG_URL=your-postgres-host:5432 -export PG_POOLED_URL=your-pgbouncer-host:5432 # this could be the same as above - -helm upgrade --install ... \ - --set "features.alpha.apollo.enabled=true" \ - --set "apollo.apollo.storage.postgres.create=false" \ - --set "apollo.apollo.storage.postgres.connection.url=$PG_URL" \ - --set "apollo.apollo.storage.postgres.connection.credentials.secret.name=spaces-apollo-pg-app" \ - --set "apollo.apollo.storage.postgres.connection.credentials.format=basicauth" \ - --set "apollo.apollo.storage.postgres.connection.ca.name=spaces-apollo-pg-ca" \ - --set "apollo.apollo.storage.postgres.connection.syncer.url=$PG_POOLED_URL" -``` - -### External setup with all custom credentials - -For custom credentials with Apollo Syncers or Server, create a new secret in the -`upbound-system` namespace: - -```shell -export APOLLO_SYNCER_USER=syncer -export APOLLO_SERVER_USER=apollo - -kubectl create ns upbound-system - -# A Secret containing the necessary credentials to connect to the PostgreSQL instance -kubectl create secret generic spaces-apollo-pg-app -n upbound-system \ - --from-literal=password=supersecret - -# A Secret containing the necessary CA certificate to verify the connection to the PostgreSQL instance -kubectl create secret generic spaces-apollo-pg-ca -n upbound-system \ - --from-file=ca.crt=/path/to/ca.crt - -# A Secret containing the necessary credentials for the Apollo Syncers to connect to the PostgreSQL instance. -# These will be used by all Syncers in the Space. -kubectl create secret generic spaces-apollo-pg-syncer -n upbound-system \ - --from-literal=username=$APOLLO_SYNCER_USER \ - --from-literal=password=supersecret - -# A Secret containing the necessary credentials for the Apollo Server to connect to the PostgreSQL instance. -kubectl create secret generic spaces-apollo-pg-apollo -n upbound-system \ - --from-literal=username=$APOLLO_SERVER_USER \ - --from-literal=password=supersecret -``` - -Next, install Spaces with the necessary settings: - -```shell -export PG_URL=your-postgres-host:5432 -export PG_POOLED_URL=your-pgbouncer-host:5432 # this could be the same as above - -helm ... \ - --set "features.alpha.apollo.enabled=true" \ - --set "apollo.apollo.storage.postgres.create=false" \ - --set "apollo.apollo.storage.postgres.connection.url=$PG_URL" \ - --set "apollo.apollo.storage.postgres.connection.credentials.secret.name=spaces-apollo-pg-app" \ - --set "apollo.apollo.storage.postgres.connection.credentials.format=basicauth" \ - --set "apollo.apollo.storage.postgres.connection.ca.name=spaces-apollo-pg-ca" \ - --set "apollo.apollo.storage.postgres.connection.syncer.url=$PG_POOLED_URL" \ - - #. the syncers - --set "apollo.apollo.storage.postgres.connection.syncer.credentials.format=basicauth" \ - --set "apollo.apollo.storage.postgres.connection.syncer.credentials.user=$APOLLO_SYNCER_USER" \ - --set "apollo.apollo.storage.postgres.connection.syncer.credentials.secret.name=spaces-apollo-pg-syncer" \ - - #. the server - --set "apollo.apollo.storage.postgres.connection.apollo.credentials.format=basicauth" \ - --set "apollo.apollo.storage.postgres.connection.apollo.credentials.user=$APOLLO_SERVER_USER" \ - --set "apollo.apollo.storage.postgres.connection.apollo.credentials.secret.name=spaces-apollo-pg-apollo" \ - --set "apollo.apollo.storage.postgres.connection.apollo.url=$PG_POOLED_URL" -``` - - -## Using the Query API - - -See the [Query API documentation][query-api-documentation] for more information on how to use the Query API. - - - - -[postgresql-setup]: #self-hosted-postgresql-configuration -[up-cli-installed]: /manuals/cli/overview -[query-api-documentation]: /spaces/howtos/query-api - -[helm-chart-reference]: /reference/helm-reference -[kubectl-conventions]: https://kubernetes.io/docs/reference/kubectl/generated/kubectl_get/ -[supported-ways]: https://cloudnative-pg.io/documentation/current/installation_upgrade/ -[supported-ways-1]: https://cloudnative-pg.io/documentation/current/installation_upgrade/ -[cloudnativepg-documentation]: https://cloudnative-pg.io/documentation/1.24/storage/#configuration-via-a-pvc-template -[postgresql-cluster]: https://cloudnative-pg.io/documentation/1.24/resource_management/ -[pooler]: https://cloudnative-pg.io/documentation/1.24/connection_pooling/#pod-templates -[postgresql-cluster-2]: https://cloudnative-pg.io/documentation/1.24/replication/ -[pooler-3]: https://cloudnative-pg.io/documentation/1.24/connection_pooling/#high-availability-ha -[postgresql-cluster-4]: https://cloudnative-pg.io/documentation/1.24/operator_capability_levels/#override-of-operand-images-through-the-crd -[pooler-5]: https://cloudnative-pg.io/documentation/1.24/connection_pooling/#pod-templates -[cloudnativepg-documentation-6]: https://cloudnative-pg.io/documentation/1.24/postgresql_conf/ diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/scaling-resources.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/scaling-resources.md deleted file mode 100644 index 7bb04d2c2..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/scaling-resources.md +++ /dev/null @@ -1,184 +0,0 @@ ---- -title: Scaling vCluster and etcd Resources -weight: 950 -description: A guide for scaling vCluster and etcd resources in self-hosted Spaces -aliases: - - /all-spaces/self-hosted-spaces/scaling-resources - - /spaces/scaling-resources ---- - -In large workloads or control plane migration, you may performance impacting -resource constraints. This guide explains how to scale vCluster and `etcd` -resources for optimal performance in your self-hosted Space. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Scaling procedures are consistent across versions. - -For version-specific resource requirements and capacity planning, see the . -::: - -## Signs of resource constraints - -You may need to scale your vCluster or `etcd` resources if you observe: - -- API server timeout errors such as `http: Handler timeout` -- Error messages about `too many requests` and requests to `try again later` -- Operations like provider installation failing with errors like `cannot apply provider package secret` -- vCluster pods experiencing continuous restarts -- API performance degrades with high resource volume - - -## Scaling vCluster resources - - -The vCluster component handles Kubernetes API requests for your control planes. -Deployments with multiple control planes or providers may exceed default resource allocations. - -```yaml -# Default settings -controlPlanes.vcluster.resources.limits.cpu: "3000m" -controlPlanes.vcluster.resources.limits.memory: "3960Mi" -controlPlanes.vcluster.resources.requests.cpu: "170m" -controlPlanes.vcluster.resources.requests.memory: "1320Mi" -``` - -For larger workloads, like migrating from an existing control plane with several -providers, increase these resource limits in your Spaces `values.yaml` file. - -```yaml -controlPlanes: - vcluster: - resources: - limits: - cpu: "4000m" # Increase to 4 cores - memory: "6Gi" # Increase to 6GB memory - requests: - cpu: "500m" # Increase baseline CPU request - memory: "2Gi" # Increase baseline memory request -``` - -## Scaling `etcd` storage - -Kubernetes relies on `etcd` performance, which can lead to IOPS (input/output -operations per second) bottlenecks. Upbound allocates `50Gi` volumes for `etcd` -in cloud environments to ensure adequate IOPS performance. - -```yaml -# Default setting -controlPlanes.etcd.persistence.size: "5Gi" -``` - -For production environments or when migrating large control planes, increase -`etcd` volume size and specify an appropriate storage class: - -```yaml -controlPlanes: - etcd: - persistence: - size: "50Gi" # Recommended for production - storageClassName: "fast-ssd" # Use a high-performance storage class -``` - -### Storage class considerations - -For AWS: -- Use GP3 volumes with adequate IOPS --. AWS GP3 volumes, IOPS scale with volume size (3000 IOPS baseline) --. optimal performance, provision at least 32Gi to support up to 16,000 IOPS - -For GCP and Azure: -- Use SSD-based persistent disk types for optimal performance -- Consider premium storage options for high-throughput workloads - -## Scaling Crossplane resources - -Crossplane manages provider resources in your control planes. You may need to increase provider resources for larger deployments: - -```yaml -# Default settings -controlPlanes.uxp.resourcesCrossplane.requests.cpu: "370m" -controlPlanes.uxp.resourcesCrossplane.requests.memory: "400Mi" -``` - - -For environments with many providers or managed resources: - - -```yaml -controlPlanes: - uxp: - resourcesCrossplane: - limits: - cpu: "1000m" # Add CPU limit - memory: "1Gi" # Add memory limit - requests: - cpu: "500m" # Increase CPU request - memory: "512Mi" # Increase memory request -``` - -## High availability configuration - -For production environments, enable High Availability mode to ensure resilience: - -```yaml -controlPlanes: - ha: - enabled: true -``` - -## Best practices for migration scenarios - -When migrating from existing control planes into a self-hosted Space: - -1. **Pre-scale resources**: Scale up resources before performing the migration -2. **Monitor resource usage**: Watch resource consumption during and after migration with `kubectl top pods` -3. **Scale incrementally**: If issues persist, increase resources incrementally until performance stabilizes -4. **Consider storage performance**: `etcd` is sensitive to storage I/O performance - -## Helm values configuration - -Apply these settings through your Spaces Helm values file: - -```yaml -controlPlanes: - vcluster: - resources: - limits: - cpu: "4000m" - memory: "6Gi" - requests: - cpu: "500m" - memory: "2Gi" - etcd: - persistence: - size: "50Gi" - storageClassName: "gp3" # Use your cloud provider's fast storage class - uxp: - resourcesCrossplane: - limits: - cpu: "1000m" - memory: "1Gi" - requests: - cpu: "500m" - memory: "512Mi" - ha: - enabled: true #. production environments -``` - -Apply the configuration using Helm: - -```bash -helm upgrade --install spaces oci://xpkg.upbound.io/spaces-artifacts/spaces \ - -f values.yaml \ - -n upbound-system -``` - -## Considerations - -- **Provider count**: Each provider adds resource overhead - consider using provider families to optimize resource usage -- **Managed resources**: The number of managed resources impacts CPU usage more than memory -- **Vertical pod autoscaling**: Consider using vertical pod autoscaling in Kubernetes to automatically adjust resources based on usage -- **Storage performance**: Storage performance is as important as capacity for etcd -- **Network latency**: Low-latency connections between components improve performance - - diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/self-hosted-spaces-deployment.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/self-hosted-spaces-deployment.md deleted file mode 100644 index e549e3939..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/self-hosted-spaces-deployment.md +++ /dev/null @@ -1,461 +0,0 @@ ---- -title: Deployment Workflow -sidebar_position: 3 -description: A quickstart guide for Upbound Spaces -tier: "business" ---- -import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; - - - - - -This guide deploys a self-hosted Upbound cluster in AWS. - - - - - -This guide deploys a self-hosted Upbound cluster in Azure. - - - - - -This guide deploys a self-hosted Upbound cluster in GCP. - - - -Disconnected Spaces allows you to host control planes in your preferred environment. - -## Prerequisites - -To get started deploying your own Disconnected Space, you need: - -- An Upbound organization account string, provided by your Upbound account representative -- A `token.json` license, provided by your Upbound account representative - - - -- An AWS account and the AWS CLI - - - - - -- An Azure account and the Azure CLI - - - - - -- An GCP account and the GCP CLI - - - -:::important -Disconnected Spaces are a business critical feature of Upbound and requires a license token to successfully complete the installation. [Contact Upbound][contact-upbound] if you want to try out Upbound with Disconnected Spaces. -::: - -## Provision the hosting environment - -### Create a cluster - - - -Configure the name and target region you want the EKS cluster deployed to. - -```ini -export SPACES_CLUSTER_NAME=upbound-space-quickstart -export SPACES_REGION=us-east-1 -``` - -Provision a 3-node cluster using eksctl. - -```bash -cat < - - - -Configure the name and target region you want the AKS cluster deployed to. - -```ini -export SPACES_RESOURCE_GROUP_NAME=upbound-space-quickstart -export SPACES_CLUSTER_NAME=upbound-space-quickstart -export SPACES_LOCATION=westus -``` - -Provision a new Azure resource group. - -```bash -az group create --name ${SPACES_RESOURCE_GROUP_NAME} --location ${SPACES_LOCATION} -``` - -Provision a 3-node cluster. - -```bash -az aks create -g ${SPACES_RESOURCE_GROUP_NAME} -n ${SPACES_CLUSTER_NAME} \ - --enable-managed-identity \ - --node-count 3 \ - --node-vm-size Standard_D4s_v4 \ - --enable-addons monitoring \ - --enable-msi-auth-for-monitoring \ - --generate-ssh-keys \ - --network-plugin kubenet \ - --network-policy calico -``` - -Get the kubeconfig of your AKS cluster. - -```bash -az aks get-credentials --resource-group ${SPACES_RESOURCE_GROUP_NAME} --name ${SPACES_CLUSTER_NAME} -``` - - - - - -Configure the name and target region you want the GKE cluster deployed to. - -```ini -export SPACES_PROJECT_NAME=upbound-spaces-project -export SPACES_CLUSTER_NAME=upbound-spaces-quickstart -export SPACES_LOCATION=us-west1-a -``` - -Create a new project and set it as the current project. - -```bash -gcloud projects create ${SPACES_PROJECT_NAME} -gcloud config set project ${SPACES_PROJECT_NAME} -``` - -Provision a 3-node cluster. - -```bash -gcloud container clusters create ${SPACES_CLUSTER_NAME} \ - --enable-network-policy \ - --num-nodes=3 \ - --zone=${SPACES_LOCATION} \ - --machine-type=e2-standard-4 -``` - -Get the kubeconfig of your GKE cluster. - -```bash -gcloud container clusters get-credentials ${SPACES_CLUSTER_NAME} --zone=${SPACES_LOCATION} -``` - - - -## Configure the pre-install - -### Set your Upbound organization account details - -Set your Upbound organization account string as an environment variable for use in future steps - -```ini -export UPBOUND_ACCOUNT= -``` - -### Set up pre-install configurations - -Export the path of the license token JSON file provided by your Upbound account representative. - -```ini {copy-lines="2"} -# Change the path to where you saved the token. -export SPACES_TOKEN_PATH="/path/to/token.json" -``` - -Set the version of Spaces software you want to install. - -```ini -export SPACES_VERSION= -``` - -Set the router host and cluster type. The `SPACES_ROUTER_HOST` is the domain name that's used to access the control plane instances. It's used by the ingress controller to route requests. - -```ini -export SPACES_ROUTER_HOST="proxy.upbound-127.0.0.1.nip.io" -``` - -:::important -Make sure to replace the placeholder text in `SPACES_ROUTER_HOST` and provide a real domain that you own. -::: - - -## Install the Spaces software - - -### Install cert-manager - -Install cert-manager. - -```bash -kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.11.0/cert-manager.yaml -kubectl wait deployment -n cert-manager cert-manager-webhook --for condition=Available=True --timeout=360s -``` - - - -### Install ALB Load Balancer - -```bash -helm install aws-load-balancer-controller aws-load-balancer-controller --namespace kube-system \ - --repo https://aws.github.io/eks-charts \ - --set clusterName=${SPACES_CLUSTER_NAME} \ - --set serviceAccount.create=false \ - --set serviceAccount.name=aws-load-balancer-controller \ - --wait -``` - - - -### Install ingress-nginx - -Starting with Spaces v1.10.0, you need to configure the ingress-nginx -controller to allow SSL-passthrough mode. You can do so by passing the -`--enable-ssl-passthrough=true` command-line option to the controller. -The following Helm install command enables this with the `controller.extraArgs` -parameter: - - - -```bash -helm upgrade --install ingress-nginx ingress-nginx \ - --create-namespace --namespace ingress-nginx \ - --repo https://kubernetes.github.io/ingress-nginx \ - --version 4.12.1 \ - --set 'controller.service.type=LoadBalancer' \ - --set 'controller.extraArgs.enable-ssl-passthrough=true' \ - --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-type=external' \ - --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-scheme=internet-facing' \ - --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-nlb-target-type=ip' \ - --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-healthcheck-protocol=http' \ - --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-healthcheck-path=/healthz' \ - --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-healthcheck-port=10254' \ - --wait -``` - - - - - -```bash -helm upgrade --install ingress-nginx ingress-nginx \ - --create-namespace --namespace ingress-nginx \ - --repo https://kubernetes.github.io/ingress-nginx \ - --version 4.12.1 \ - --set 'controller.service.type=LoadBalancer' \ - --set 'controller.extraArgs.enable-ssl-passthrough=true' \ - --set 'controller.service.annotations.service\.beta\.kubernetes\.io/azure-load-balancer-health-probe-request-path=/healthz' \ - --wait -``` - - - - - -```bash -helm upgrade --install ingress-nginx ingress-nginx \ - --create-namespace --namespace ingress-nginx \ - --repo https://kubernetes.github.io/ingress-nginx \ - --version 4.12.1 \ - --set 'controller.service.type=LoadBalancer' \ - --set 'controller.extraArgs.enable-ssl-passthrough=true' \ - --wait -``` - - - -### Install Upbound Spaces software - -Create an image pull secret so that the cluster can pull Upbound Spaces images. - -```bash -kubectl create ns upbound-system -kubectl -n upbound-system create secret docker-registry upbound-pull-secret \ - --docker-server=https://xpkg.upbound.io \ - --docker-username="$(jq -r .accessId $SPACES_TOKEN_PATH)" \ - --docker-password="$(jq -r .token $SPACES_TOKEN_PATH)" -``` - -Log in with Helm to be able to pull chart images for the installation commands. - -```bash -jq -r .token $SPACES_TOKEN_PATH | helm registry login xpkg.upbound.io -u $(jq -r .accessId $SPACES_TOKEN_PATH) --password-stdin -``` - -Install the Spaces software. - -```bash -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "${SPACES_VERSION}" \ - --set "ingress.host=${SPACES_ROUTER_HOST}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "authentication.hubIdentities=true" \ - --set "authorization.hubRBAC=true" \ - --wait -``` - -### Create a DNS record - -:::important -If you chose to create a public ingress, you also need to create a DNS record for the load balancer of the public facing ingress. Do this before you create your first control plane. -::: - -Create a DNS record for the load balancer of the public facing ingress. To get the address for the Ingress, run the following: - - - -```bash -kubectl get ingress \ - -n upbound-system mxe-router-ingress \ - -o jsonpath='{.status.loadBalancer.ingress[0].hostname}' -``` - - - - - -```bash -kubectl get ingress \ - -n upbound-system mxe-router-ingress \ - -o jsonpath='{.status.loadBalancer.ingress[0].ip}' -``` - - - - - -```bash -kubectl get ingress \ - -n upbound-system mxe-router-ingress \ - -o jsonpath='{.status.loadBalancer.ingress[0].ip}' -``` - - - -If the preceding command doesn't return a load balancer address then your provider may not have allocated it yet. Once it's available, add a DNS record for the `ROUTER_HOST` to point to the given load balancer address. If it's an IPv4 address, add an A record. If it's a domain name, add a CNAME record. - -## Configure the up CLI - -With your kubeconfig pointed at the Kubernetes cluster where you installed -Upbound Spaces, create a new profile in the `up` CLI. This profile interacts -with your Space: - -```bash -up profile create --use ${SPACES_CLUSTER_NAME} --type=disconnected --organization ${UPBOUND_ACCOUNT} -``` - -Optionally, log in to your Upbound account using the new profile so you can use the Upbound Marketplace with this profile as well: - -```bash -up login -``` - - -## Connect to your Space - - -Use `up ctx` to create a kubeconfig context pointed at your new Space: - -```bash -up ctx disconnected/$(kubectl config current-context) -``` - -## Create your first control plane - -You can now create a control plane with the `up` CLI: - -```bash -up ctp create ctp1 -``` - -You can also create a control plane with kubectl: - -```yaml -cat < -```yaml -observability: - spacesCollector: - env: - - name: API_KEY - valueFrom: - secretKeyRef: - name: my-secret - key: api-key - config: - exporters: - otlphttp: - endpoint: "" - headers: - api-key: ${env:API_KEY} - exportPipeline: - logs: - - otlphttp - metrics: - - otlphttp - traces: - - otlphttp -``` - - -You can export metrics, logs, and traces from your Crossplane installation, Spaces -infrastructure (controller, API, router, etc.), provider-helm, and -provider-kubernetes. - -### Router metrics - -The Spaces router component uses Envoy as a reverse proxy and exposes detailed -metrics about request handling, circuit breakers, and connection pooling. -Upbound collects these metrics in your Space after you enable Space-level -observability. - -Envoy metrics in Upbound include: - -- **Upstream cluster metrics** - Request status codes, timeouts, retries, and latency for traffic to control planes and services -- **Circuit breaker metrics** - Connection and request circuit breaker state for both `DEFAULT` and `HIGH` priority levels -- **Downstream listener metrics** - Client connections and requests received -- **HTTP connection manager metrics** - End-to-end HTTP request processing and latency - -For a complete list of available router metrics and example PromQL queries, see the [Router metrics reference][router-ref]. - -### Router tracing - -The Spaces router generates distributed traces through OpenTelemetry integration, -providing end-to-end visibility into request flow across the system. Use these -traces to debug latency issues, understand request paths, and correlate errors -across services. - -The router uses: - -- **Protocol**: OTLP (OpenTelemetry Protocol) over gRPC -- **Service name**: `spaces-router` -- **Transport**: TLS-encrypted connection to telemetry collector - -#### Trace configuration - -Enable tracing and configure the sampling rate with the following Helm values: - -```yaml -observability: - enabled: true - tracing: - enabled: true - sampling: - rate: 0.1 # Sample 10% of new traces (0.0-1.0) -``` - -The sampling behavior depends on whether a parent trace context exists: - -- **With parent context**: If a `traceparent` header is present, the parent's - sampling decision is respected, enabling proper distributed tracing across services. -- **Root spans**:. new traces without a parent, Envoy samples based on - `x-request-id` hashing. The default sampling rate is 10%. - -#### TLS configuration for external collectors - -To send traces to an external OTLP collector, configure the endpoint and TLS settings: - -```yaml -observability: - enabled: true - tracing: - enabled: true - endpoint: "otlp-gateway.example.com" - port: 443 - tls: - caBundleSecretRef: "custom-ca-secret" -``` - -If `caBundleSecretRef` is set, the router uses the CA bundle from the referenced -Kubernetes secret. The secret must contain a key named `ca.crt` with the -PEM-encoded CA bundle. If not set, the router uses the Spaces CA for the -in-cluster collector. - -#### Custom trace tags - -The router adds custom tags to every span to enable filtering and grouping by -control plane: - -| Tag | Source | Description | -|-----|--------|-------------| -| `controlplane.id` | `x-upbound-mxp-id` header | Control plane UUID | -| `controlplane.name` | `x-upbound-mxp-host` header | Internal vcluster hostname | -| `hostcluster.id` | `x-upbound-hostcluster-id` header | Host cluster identifier | - -These tags enable queries like "show all slow requests to control plane X" or -"find errors for control planes in host cluster Y." - -#### Example trace - -The following example shows the attributes from a successful GET request: - -```text -Span: ingress -├─ Service: spaces-router -├─ Duration: 8.025ms -├─ Attributes: -│ ├─ http.method: GET -│ ├─ http.status_code: 200 -│ ├─ upstream_cluster: ctp-b2b37aaa-ee55-492c-ba0c-4d561a6325fa-api-cluster -│ ├─ controlplane.id: b2b37aaa-ee55-492c-ba0c-4d561a6325fa -│ ├─ controlplane.name: vcluster.mxp-b2b37aaa-ee55-492c-ba0c-4d561a6325fa-system -│ └─ response_size: 1827 -``` - -## Available metrics - -Space-level observability collects metrics from multiple infrastructure components: - -### Infrastructure component metrics - -- Crossplane controller metrics -- Spaces controller, API, and router metrics -- Provider metrics (provider-helm, provider-kubernetes) - -### Router metrics - -The router component exposes Envoy proxy metrics for monitoring traffic flow and -service health. Key metric categories include: - -- `envoy_cluster_upstream_rq_*` - Upstream request metrics (status codes, timeouts, retries, latency) -- `envoy_cluster_circuit_breakers_*` - Circuit breaker state and capacity -- `envoy_listener_downstream_*` - Client connection and request metrics -- `envoy_http_downstream_*` - HTTP request processing metrics - -Example query to monitor total request rate: - -```promql -sum(rate(envoy_cluster_upstream_rq_total{job="spaces-router-envoy"}[5m])) -``` - -Example query for P95 latency: - -```promql -histogram_quantile( - 0.95, - sum by (le) ( - rate(envoy_cluster_upstream_rq_time_bucket{job="spaces-router-envoy"}[5m]) - ) -) -``` - -For detailed router metrics documentation and more query examples, see the [Router metrics reference][router-ref]. - - -## OpenTelemetryCollector image - - -Control plane (`SharedTelemetry`) and Space observability deploy the same custom -OpenTelemetry Collector image. The OpenTelemetry Collector image supports -`otlhttp`, `datadog`, and `debug` exporters. - -For more information on observability configuration, review the [Helm chart reference][helm-chart-reference]. - -## Observability in control planes - -Read the [observability documentation][observability-documentation] to learn -about the features Upbound offers for collecting telemetry from control planes. - - -## Router metrics reference {#router-ref} - -To avoid overwhelming observability tools with hundreds of Envoy metrics, an -allow-list filters metrics to only the following metric families. - -### Upstream cluster metrics - -Metrics tracking requests sent from Envoy to configured upstream clusters. -Individual control planes, spaces-api, and other services are each considered -an upstream cluster. Use these metrics to monitor service health, identify -upstream errors, and measure backend latency. - -| Metric | Description | -|--------|-------------| -| `envoy_cluster_upstream_rq_xx_total` | HTTP status codes (2xx, 3xx, 4xx, 5xx) with label `envoy_response_code_class` | -| `envoy_cluster_upstream_rq_timeout_total` | Requests that timed out waiting for upstream | -| `envoy_cluster_upstream_rq_retry_limit_exceeded_total` | Requests that exhausted retry attempts | -| `envoy_cluster_upstream_rq_total` | Total upstream requests | -| `envoy_cluster_upstream_rq_time_bucket` | Latency histogram (for P50/P95/P99 calculations) | -| `envoy_cluster_upstream_rq_time_sum` | Sum of request durations | -| `envoy_cluster_upstream_rq_time_count` | Count of requests | - -### Circuit breaker metrics - - - -Metrics tracking circuit breaker state and remaining capacity. Circuit breakers -prevent cascading failures by limiting connections and concurrent requests to -unhealthy upstreams. Two priority levels exist: `DEFAULT` for watch requests and -`HIGH` for API requests. - - -| Name | Description | -|--------|-------------| -| `envoy_cluster_circuit_breakers_default_cx_open` | `DEFAULT` priority connection circuit breaker open (gauge) | -| `envoy_cluster_circuit_breakers_default_rq_open` | `DEFAULT` priority request circuit breaker open (gauge) | -| `envoy_cluster_circuit_breakers_default_remaining_cx` | Available `DEFAULT` priority connections (gauge) | -| `envoy_cluster_circuit_breakers_default_remaining_rq` | Available `DEFAULT` priority request slots (gauge) | -| `envoy_cluster_circuit_breakers_high_cx_open` | `HIGH` priority connection circuit breaker open (gauge) | -| `envoy_cluster_circuit_breakers_high_rq_open` | `HIGH` priority request circuit breaker open (gauge) | -| `envoy_cluster_circuit_breakers_high_remaining_cx` | Available `HIGH` priority connections (gauge) | -| `envoy_cluster_circuit_breakers_high_remaining_rq` | Available `HIGH` priority request slots (gauge) | - -### Downstream listener metrics - -Metrics tracking requests received from clients such as kubectl and API consumers. -Use these metrics to monitor client connection patterns, overall request volume, -and responses sent to external users. - -| Name | Description | -|--------|-------------| -| `envoy_listener_downstream_rq_xx_total` | HTTP status codes for responses sent to clients | -| `envoy_listener_downstream_rq_total` | Total requests received from clients | -| `envoy_listener_downstream_cx_total` | Total connections from clients | -| `envoy_listener_downstream_cx_active` | Currently active client connections (gauge) | - - - -### HTTP connection manager metrics - - -Metrics from Envoy's HTTP connection manager tracking end-to-end request -processing. These metrics provide a comprehensive view of the HTTP request -lifecycle including status codes and client-perceived latency. - -| Name | Description | -|--------|-------------| -| `envoy_http_downstream_rq_xx` | HTTP status codes (note: no `_total` suffix for this metric family) | -| `envoy_http_downstream_rq_total` | Total HTTP requests received | -| `envoy_http_downstream_rq_time_bucket` | Downstream request latency histogram | -| `envoy_http_downstream_rq_time_sum` | Sum of downstream request durations | -| `envoy_http_downstream_rq_time_count` | Count of downstream requests | - -[router-ref]: #router-ref -[observability-documentation]: /spaces/howtos/observability -[opentelemetry-collector]: https://opentelemetry.io/docs/collector/ -[opentelemetry-operator]: https://opentelemetry.io/docs/kubernetes/operator/ -[helm-chart-reference]: /reference/helm-reference diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/spaces-management.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/spaces-management.md deleted file mode 100644 index 3df61c306..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/spaces-management.md +++ /dev/null @@ -1,219 +0,0 @@ ---- -title: Interacting with Disconnected Spaces -sidebar_position: 10 -description: Common operations in Spaces ---- - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Spaces management procedures are consistent across versions. - -For version compatibility details, see the . -::: - -## Spaces management - -### Create a Space - -To install an Upbound Space into a cluster, it's recommended you dedicate an entire Kubernetes cluster for the Space. You can use [up space init][up-space-init] to install an Upbound Space. Below is an example: - -```bash -up space init "v1.9.0" -``` -:::tip -For a full guide to get started with Spaces, read the [quickstart][quickstart] guide: -::: - -You can also install the helm chart for Spaces directly. In order for a Spaces install to succeed, you must install some prerequisites first and configure them. This includes: - -- UXP -- provider-helm and provider-kubernetes -- cert-manager - -Furthermore, the Spaces chart requires a pull secret, which Upbound must provide to you. - -```bash -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "v1.9.0" \ - --set "ingress.host=your-host.com" \ - --set "clusterType=eks" \ - --set "account=your-upbound-account" \ - --wait -``` -For a complete tutorial of the helm install, read one of the deployment guides for [AWS][aws], [Azure][azure] , or [GCP][gcp] which cover the step-by-step process. - -### Upgrade a Space - -To upgrade a Space from one version to the next, use [up space upgrade][up-space-upgrade]. Spaces supports upgrading from version `ver x.N.*` to version `ver x.N+1.*`. - -```bash -up space upgrade "v1.9.0" -``` - -You can also upgrade a Space by manually bumping the Helm chart version. Before -upgrading, review the release notes for any breaking changes or -special requirements: - -1. Review the release notes for the target version in the [Spaces Release Notes][spaces-release-notes] -2. Upgrade the Space by updating the helm chart version: - -```bash -helm -n upbound-system upgrade spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "v1.9.0" \ - --reuse-values \ - --wait -``` - -For major version upgrades or configuration changes, extract your current values -and adjust: - -```bash -# Extract current values to a file -helm -n upbound-system get values spaces > spaces-values.yaml - -# Upgrade with modified values -helm -n upbound-system upgrade spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "v1.9.0" \ - -f spaces-values.yaml \ - --wait -``` - -### Downgrade a Space - -To rollback a Space from one version to the previous, use [up space upgrade][up-space-upgrade-1]. Spaces supports downgrading from version `ver x.N.*` to version `ver x.N-1.*`. - -```bash -up space upgrade --rollback -``` - -You can also downgrade a Space manually using Helm by specifying an earlier version: - -```bash -helm -n upbound-system upgrade spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "v1.8.0" \ - --reuse-values \ - --wait -``` - -When downgrading, make sure to: -1. Check the [release notes][release-notes] for specific downgrade instructions -2. Verify compatibility between the downgraded Space and any control planes -3. Back up any critical data before proceeding - -### Uninstall a Space - -To uninstall a Space from a Kubernetes cluster, use [up space destroy][up-space-destroy]. A destroy operation uninstalls core components and orphans control planes and their associated resources. - -```bash -up space destroy -``` - -## Control plane management - -You can manage control planes in a Space via the [up CLI][up-cli] or the Spaces-local Kubernetes API. When you install a Space, it defines new a API type, `kind: Controlplane`, that you can use to create and manage control planes in the Space. - -### Create a control plane - -To create a control plane in a Space using `up`, run the following: - -```bash -up ctp create ctp1 -``` - -You can also declare a new control plane like the example below and apply it to your Spaces cluster: - -```yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - name: ctp1 - namespace: default -spec: - writeConnectionSecretToRef: - name: kubeconfig-ctp1 - namespace: default -``` - -This manifest: - -- Creates a new control plane in the space called `ctp1`. -- Publishes the kubeconfig to connect to the control plane to a secret in the Spaces cluster, called `kubeconfig-ctp1` - -### Connect to a control plane - -To connect to a control plane in a Space using `up`, run the following: - -```bash -up ctp connect new-control-plane -``` - -The command changes your kubeconfig's current context to the control plane you specify. If you want to change your kubeconfig back to a previous context, run: - -```bash -up ctp disconnect -``` - -If you configured your control plane to publish connection details, you can also access it this way. Once the control plane is ready, use the secret (containing connection details) to connect to the API server of your control plane. - -```bash -kubectl get secret -n default -o jsonpath='{.data.kubeconfig}' | base64 -d > /tmp/.yaml -``` - -Reference the kubeconfig whenever you want to interact directly with the API server of the control plane (vs the Space's API server): - -```bash -kubectl get providers --kubeconfig=/tmp/.yaml -``` - -### Configure a control plane - -Spaces offers a built-in feature that allows you to connect a control plane to a Git source. This experience is like when a control plane runs in [Upbound's SaaS environment][upbound-s-saas-environment]. Upbound recommends using the built-in Git integration to drive configuration of your control planes in a Space. - -Learn more in the [Spaces Git integration][spaces-git-integration] documentation. - -### List control planes - -To list all control planes in a Space using `up`, run the following: - -```bash -up ctp list -``` - -Or you can use Kubernetes-style semantics to list the control plane: - -```bash -kubectl get controlplanes -``` - - -### Delete a control plane - -To delete a control plane in a Space using `up`, run the following: - -```bash -up ctp delete ctp1 -``` - -Or you can use Kubernetes-style semantics to delete the control plane: - -```bash -kubectl delete controlplane ctp1 -``` - - -[up-space-init]: /reference/cli-reference -[quickstart]: / -[aws]: /spaces/howtos/self-hosted/self-hosted-spaces-deployment -[azure]:/spaces/howtos/self-hosted/self-hosted-spaces-deployment -[gcp]:/spaces/howtos/self-hosted/self-hosted-spaces-deployment -[up-space-upgrade]: /reference/cli-reference -[spaces-release-notes]: /reference/release-notes/spaces -[up-space-upgrade-1]: /reference/cli-reference -[release-notes]: /reference/release-notes/spaces -[up-space-destroy]: /reference/cli-reference -[up-cli]: /reference/cli-reference -[upbound-s-saas-environment]: /spaces/howtos/self-hosted/spaces-management -[spaces-git-integration]: /spaces/howtos/self-hosted/gitops diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/troubleshooting.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/troubleshooting.md deleted file mode 100644 index 8d1ca6517..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/troubleshooting.md +++ /dev/null @@ -1,132 +0,0 @@ ---- -title: Troubleshooting -sidebar_position: 100 -description: A guide for troubleshooting an issue that occurs in a Space ---- - -Find guidance below on how to find solutions for issues you encounter when deploying and using an Upbound Space. Use the tips below as a supplement to the observability metrics discussed in the [Observability][observability] page. - -## General tips - -Most issues fall into two general categories: - -1. issues with the Spaces management plane -2. issues on a control plane - -If your control plane doesn't reach a `Ready` state, it's indicative of the former. If your control plane is in a created and running state, but resources aren't reconciling, it's indicative of the latter. - -### Spaces component layout - -Run `kubectl get pods -A` against the cluster hosting a Space. You should see a variety of pods across several namespaces. It should look something like this: - -```bash -NAMESPACE NAME READY STATUS RESTARTS AGE -cert-manager cert-manager-6d6769565c-mc5df 1/1 Running 0 25m -cert-manager cert-manager-cainjector-744bb89575-nw4fg 1/1 Running 0 25m -cert-manager cert-manager-webhook-759d6dcbf7-ps4mq 1/1 Running 0 25m -ingress-nginx ingress-nginx-controller-7f8ccfccc6-6szlp 1/1 Running 0 25m -kube-system coredns-5d78c9869d-4p477 1/1 Running 0 26m -kube-system coredns-5d78c9869d-pdxt6 1/1 Running 0 26m -kube-system etcd-kind-control-plane 1/1 Running 0 26m -kube-system kindnet-8s7pq 1/1 Running 0 26m -kube-system kube-apiserver-kind-control-plane 1/1 Running 0 26m -kube-system kube-controller-manager-kind-control-plane 1/1 Running 0 26m -kube-system kube-proxy-l68r8 1/1 Running 0 26m -kube-system kube-scheduler-kind-control-plane 1/1 Running 0 26m -local-path-storage local-path-provisioner-6bc4bddd6b-qsdjt 1/1 Running 0 26m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system coredns-5dc69d6447-f56rh-x-kube-system-x-vcluster 1/1 Running 0 21m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system crossplane-6b6d67bc66-6b8nx-x-upbound-system-x-vcluster 1/1 Running 0 20m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system crossplane-rbac-manager-78f6fc7cb4-pjkhc-x-upbound-s-12253c3c4e 1/1 Running 0 20m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system kube-state-metrics-7f8f4dcc5b-8p8c4 1/1 Running 0 22m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system mxp-gateway-68f546b9c8-xnz5j-x-upbound-system-x-vcluster 1/1 Running 0 20m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system mxp-ksm-config-54655667bb-hv9br 1/1 Running 0 22m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system mxp-readyz-5f7f97d967-b98bw 1/1 Running 0 22m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system otlp-collector-56d7d46c8d-g5sh5-x-upbound-system-x-vcluster 1/1 Running 0 20m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-67c9fb8959-ppb2m 1/1 Running 0 22m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-api-6bfbccc49d-ffgpj 1/1 Running 0 22m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-controller-7cc6855656-8c46b 1/1 Running 0 22m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-etcd-0 1/1 Running 0 22m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vector-754b494b84-wljw4 1/1 Running 0 22m -mxp-system mxp-charts-chartmuseum-7587f77558-8tltb 1/1 Running 0 23m -upbound-system crossplane-b4dc7b4c9-6hjh5 1/1 Running 0 25m -upbound-system crossplane-contrib-provider-helm-ce18dd03e6e4-7945d8985-4gcwr 1/1 Running 0 24m -upbound-system crossplane-contrib-provider-kubernetes-1f1e32c1957d-577756gs2x4 1/1 Running 0 24m -upbound-system crossplane-rbac-manager-d8cb49cbc-gbvvf 1/1 Running 0 25m -upbound-system spaces-controller-6647677cf9-5zl5q 1/1 Running 0 24m -upbound-system spaces-router-bc78c96d7-kzts2 2/2 Running 0 24m -``` - -What you are seeing is: - -- Pods in the `upbound-system` namespace are components required to run the management plane of the Space. This includes the `spaces-controller`, `spaces-router`, and install of UXP. -- Pods in the `mxp-{GUID}-system` namespace are components that collectively power a control plane. Notable call outs include pod names that look like `vcluster-api-{GUID}` and `vcluster-controller-{GUID}`, which are integral components of a control plane. -- Pods in other notable namespaces, including `cert-manager` and `ingress-nginx`, are prerequisite components that support a Space's successful operation. - - - -### Troubleshooting tips for the Spaces management plane - -Start by getting the status of all the pods in a Space: - -1. Make sure the current context of your kubeconfig points at the Kubernetes cluster hosting your Space -2. Get the status of all the pods in the Space: -```bash -kubectl get pods -A -``` -3. Scan the `Status` column to see if any of the pods report a status besides `Running`. -4. Scan the `Restarts` column to see if any of the pods have restarted. -5. If you notice a Status other than `Running` or see pods that restarted, you should investigate their events by running -```bash -kubectl describe pod -n -``` - -Next, inspect the status of objects and releases: - -1. Make sure the current context of your kubeconfig points at the Kubernetes cluster hosting your Space -2. Inspect the objects in your Space. If any are unhealthy, describe those objects to get the events: -```bash -kubectl get objects -``` -3. Inspect the releases in your Space. If any are unhealthy, describe those releases to get the events: -```bash -kubectl get releases -``` - -### Troubleshooting tips for control planes in a Space - -General troubleshooting in a control plane starts by fetching the events of the control plane: - -1. Make sure the current context of your kubeconfig points at the Kubernetes cluster hosting your Space -2. Run the following to fetch your control planes. -```bash -kubectl get ctp -``` -3. Describe the control plane by providing its name, found in the preceding instruction. -```bash -kubectl describe controlplanes.spaces.upbound.io -``` - -## Issues - - -### Your control plane is stuck in a 'creating' state - -#### Error: unknown field "ports" in io.k8s.api.networking.v1.NetworkPolicySpec - -This error is emitted by a Helm release named `control-plane-host-policies` attempting to be installed by the Spaces software. The full error is: - -_CannotCreateExternalResource failed to install release: unable to build kubernetes objects from release manifest: error validating "": error validating data: ValidationError(NetworkPolicy.spec): unknown field "ports" in io.k8s.api.networking.v1.NetworkPolicySpec_ - -This error may be caused by running a Space on an earlier version of Kubernetes than is supported (`v1.26 or later`). To resolve this issue, upgrade the host Kubernetes cluster version to 1.25 or later. - -### Your Spaces install fails - -#### Error: You tried to install a Space on a previous Crossplane installation - -If you try to install a Space on an existing cluster that previously had Crossplane or UXP on it, you may encounter errors. Due to how the Spaces installer tests for the presence of UXP, it may detect orphaned CRDs that weren't cleaned up by the previous uninstall of Crossplane. You may need to manually [remove old Crossplane CRDs][remove-old-crossplane-crds] for the installer to properly detect the UXP prerequisite. - - - - -[observability]: /spaces/howtos/observability -[remove-old-crossplane-crds]: https://docs.crossplane.io/latest/guides/uninstall-crossplane/ diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/workload-id/_category_.json b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/workload-id/_category_.json deleted file mode 100644 index c5ecc93f6..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/workload-id/_category_.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "label": "Workload Identity Configuration", - "position": 2, - "collapsed": true, - "customProps": { - "plan": "business" - } - -} - - diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/workload-id/backup-restore-config.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/workload-id/backup-restore-config.md deleted file mode 100644 index 935ca69ec..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/workload-id/backup-restore-config.md +++ /dev/null @@ -1,384 +0,0 @@ ---- -title: Backup and Restore Workload ID -weight: 1 -description: Configure workload identity for Spaces Backup and Restore ---- -import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; - - - - - - - -Workload-identity authentication lets you use access policies to grant temporary -AWS credentials to your Kubernetes pod with a service account. Assigning IAM roles and service accounts allows the pod to assume the IAM role dynamically and much more securely than static credentials. - -This guide walks you through creating an IAM trust role policy and applying it -to your EKS cluster to handle backup and restore storage. - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary Azure credentials to your Kubernetes pod based on -a service account. Assigning managed identities and service accounts allows the pod to -authenticate with Azure resources dynamically and much more securely than static credentials. - -This guide walks you through creating a managed identity and federated credential for your AKS -cluster to handle backup and restore storage. - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary GCP credentials to your Kubernetes pod based on -a service account. Assigning IAM roles and service accounts allows the pod to -access cloud resources dynamically and much more securely than static credentials. - -This guide walks you through configuring workload identity for your GKE -cluster to handle backup and restore storage. - - - -## Prerequisites - - -To set up a workload-identity, you'll need: - - -- A self-hosted Space cluster -- Administrator access in your cloud provider -- Helm and `kubectl` - -## About the backup and restore component - -The `mxp-controller` component handles backup and restore workloads. It needs to -access your cloud storage to store and retrieve backups. By default, this -component runs in each control plane's host namespace. - -## Configuration - - - -Upbound supports workload-identity configurations in AWS with IAM Roles for -Service Accounts and EKS pod identity association. - -#### IAM Roles for Service Accounts (IRSA) - -With IRSA, you can associate a Kubernetes service account in an EKS cluster with -an AWS IAM role. Upbound authenticates workloads with that service account as -the IAM role using temporary credentials instead of static role credentials. -IRSA relies on AWS `AssumeRoleWithWebIdentity` `STS` to exchange OIDC ID tokens with -the IAM role's temporary credentials. IRSA uses the `eks.amazon.aws/role-arn` -annotation to link the service account and the IAM role. - -First, create an IAM role with appropriate permissions to access your S3 bucket: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:GetObject", - "s3:PutObject", - "s3:ListBucket", - "s3:DeleteObject" - ], - "Resource": [ - "arn:aws:s3:::${YOUR_BACKUP_BUCKET}", - "arn:aws:s3:::${YOUR_BACKUP_BUCKET}/*" - ] - } - ] -} -``` - -Next, ensure your EKS cluster has an OIDC identity provider: - -```shell -eksctl utils associate-iam-oidc-provider --cluster ${YOUR_CLUSTER_NAME} --approve -``` - -Configure the IAM role trust policy with the namespace for each -provisioned control plane. - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:oidc-provider/${YOUR_OIDC_PROVIDER}" - }, - "Action": "sts:AssumeRoleWithWebIdentity", - "Condition": { - "StringEquals": { - "${YOUR_OIDC_PROVIDER}:aud": "sts.amazonaws.com", - "${YOUR_OIDC_PROVIDER}:sub": "system:serviceaccount:${YOUR_NAMESPACE}:mxp-controller" - } - } - } - ] -} -``` - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the Backup and Restore component: - -```shell ---set controlPlanes.mxpController.serviceAccount.annotations."eks\.amazonaws\.com/role-arn"="${SPACES_BR_IAM_ROLE_ARN}" -``` - -This command allows the backup and restore component to authenticate with your -dedicated IAM role in your EKS cluster environment. - -#### EKS pod identities - -Upbound also supports EKS Pod Identity configuration. EKS Pod Identities allow -you to create a pod identity association with your Kubernetes namespace, a -service account, and an IAM role, which allows the EKS control plane to -automatically handle the credential exchange. - -First, create an IAM role with appropriate permissions to access your S3 bucket: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:GetObject", - "s3:PutObject", - "s3:ListBucket", - "s3:DeleteObject" - ], - "Resource": [ - "arn:aws:s3:::${YOUR_BACKUP_BUCKET}", - "arn:aws:s3:::${YOUR_BACKUP_BUCKET}/*" - ] - } - ] -} -``` - -When you install or upgrade your Space with Helm, add the backup/restore values: - -```shell -helm upgrade spaces spaces-helm-chart \ - --set "billing.enabled=true" \ - --set "backup.enabled=true" \ - --set "backup.storage.provider=aws" \ - --set "backup.storage.aws.region= ${YOUR_AWS_REGION}" \ - --set "backup.storage.aws.bucket= ${YOUR_BACKUP_BUCKET}" -``` - -After Upbound provisions your control plane, create a Pod Identity Association -with the `aws` CLI: - -```shell -aws eks create-pod-identity-association \ - --cluster-name ${YOUR_CLUSTER_NAME} \ - --namespace ${YOUR_CONTROL_PLANE_NAMESPACE} \ - --service-account mxp-controller \ - --role-arn arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/backup-restore-role -``` - - - - - -Upbound supports workload-identity configurations in Azure with Azure's built-in -workload identity feature. - -#### Prepare your cluster - -First, enable the OIDC issuer and workload identity in your AKS cluster: - -```shell -az aks update --resource-group ${YOUR_RESOURCE_GROUP} --name ${YOUR_AKS_CLUSTER_NAME} --enable-oidc-issuer --enable-workload-identity -``` - -Next, find and store the OIDC issuer URL as an environment variable: - -```shell -export AKS_OIDC_ISSUER="$(az aks show --name ${YOUR_AKS_CLUSTER_NAME} --resource-group ${YOUR_RESOURCE_GROUP} --query "oidcIssuerProfile.issuerUrl" --output tsv)" -``` - -#### Create a User-Assigned Managed Identity - -Create a new managed identity to associate with the backup and restore component: - -```shell -az identity create --name backup-restore-identity --resource-group ${YOUR_RESOURCE_GROUP} --location ${YOUR_LOCATION} -``` - -Retrieve the client ID and store it as an environment variable: - -```shell -export USER_ASSIGNED_CLIENT_ID="$(az identity show --name backup-restore-identity --resource-group ${YOUR_RESOURCE_GROUP} --query clientId -otsv)" -``` - -Grant the managed identity you created to access your Azure Storage account: - -```shell -az role assignment create \ - --role "Storage Blob Data Contributor" \ - --assignee ${USER_ASSIGNED_CLIENT_ID} \ - --scope /subscriptions/${YOUR_SUBSCRIPTION_ID}/resourceGroups/${YOUR_RESOURCE_GROUP}/providers/Microsoft.Storage/storageAccounts/${YOUR_STORAGE_ACCOUNT} -``` - -#### Apply the managed identity role - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the backup and restore component: - -```shell ---set controlPlanes.mxpController.serviceAccount.annotations."azure\.workload\.identity/client-id"="${YOUR_USER_ASSIGNED_CLIENT_ID}" ---set controlPlanes.mxpController.pod.customLabels."azure\.workload\.identity/use"="true" -``` - -#### Create a Federated Identity credential - -```shell -az identity federated-credential create \ - --name backup-restore-federated-identity \ - --identity-name backup-restore-identity \ - --resource-group ${YOUR_RESOURCE_GROUP} \ - --issuer ${AKS_OIDC_ISSUER} \ - --subject system:serviceaccount:${YOUR_CONTROL_PLANE_NAMESPACE}:mxp-controller -``` - - - - - -Upbound supports workload-identity configurations in GCP with IAM principal -identifiers and service account impersonation. - -#### Prepare your cluster - -First, enable Workload Identity Federation on your GKE cluster: - -```shell -gcloud container clusters update ${YOUR_CLUSTER_NAME} \ - --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ - --region=${YOUR_REGION} -``` - -#### Create a Google Service Account - -Create a service account for the backup and restore component: - -```shell -gcloud iam service-accounts create backup-restore-sa \ - --display-name "Backup Restore Service Account" \ - --project ${YOUR_PROJECT_ID} -``` - -Grant the service account access to your Google Cloud Storage bucket: - -```shell -gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ - --member "serviceAccount:backup-restore-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" \ - --role "roles/storage.objectAdmin" -``` - -#### Configure Workload Identity - -Create an IAM binding to grant the Kubernetes service account access to the Google service account: - -```shell -gcloud iam service-accounts add-iam-policy-binding \ - backup-restore-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com \ - --role roles/iam.workloadIdentityUser \ - --member "serviceAccount:${YOUR_PROJECT_ID}.svc.id.goog[${YOUR_CONTROL_PLANE_NAMESPACE}/mxp-controller]" -``` - -#### Apply the service account configuration - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the backup and restore component: - -```shell ---set controlPlanes.mxpController.serviceAccount.annotations."iam\.gke\.io/gcp-service-account"="backup-restore-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" -``` - - - -## Verify your configuration - -After you apply the configuration use `kubectl` to verify the service account -has the correct annotation: - -```shell -kubectl get serviceaccount mxp-controller -n ${YOUR_CONTROL_PLANE_NAMESPACE} -o yaml -``` - -Verify the `mxp-controller` pod is running: - -```shell -kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep mxp-controller -``` - -## Restart workload - -You must manually restart a workload's pod when you add the workload identity annotations to the running pod's service account. - - - -This restart enables the EKS pod identity webhook to inject the necessary -environment for using IRSA. - - - - - -This restart enables the workload identity webhook to inject the necessary -environment for using Azure workload identity. - - - - - -This restart enables the workload identity webhook to inject the necessary -environment for using GCP workload identity. - - - -```shell -kubectl rollout restart deployment mxp-controller -n ${YOUR_CONTROL_PLANE_NAMESPACE} -``` - -## Use cases - - -Configuring backup and restore with workload identity eliminates the need for -static credentials in your cluster and the overhead of credential rotation. -These benefits are helpful in: - -* Disaster recovery scenarios -* Control plane migration -* Compliance requirements -* Rollbacks after unsuccessful upgrades - -## Next steps - -Now that you have a workload identity configured for the backup and restore -component, visit the [Backup Configuration][backup-restore-guide] documentation. - -Other workload identity guides are: -* [Billing][billing] -* [Shared Secrets][secrets] - -[backup-restore-guide]: /spaces/howtos/backup-and-restore -[billing]: /spaces/howtos/self-hosted/workload-id/billing-config -[secrets]: /spaces/howtos/self-hosted/workload-id/eso-config diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/workload-id/billing-config.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/workload-id/billing-config.md deleted file mode 100644 index 323a6122f..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/workload-id/billing-config.md +++ /dev/null @@ -1,454 +0,0 @@ ---- -title: Billing Workload ID -weight: 1 -description: Configure workload identity for Spaces Billing ---- -import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; - - - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary AWS credentials to your Kubernetes pod based on -a service account. Assigning IAM roles and service accounts allows the pod to -assume the IAM role dynamically and much more securely than static credentials. - -This guide walks you through creating an IAM trust role policy and applying it to your EKS -cluster for billing in your Space cluster. - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary Azure credentials to your Kubernetes pod based on -a service account. Assigning managed identities and service accounts allows the pod to -authenticate with Azure resources dynamically and much more securely than static credentials. - -This guide walks you through creating a managed identity and federated credential for your AKS -cluster for billing in your Space cluster. - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary GCP credentials to your Kubernetes pod based on -a service account. Assigning IAM roles and service accounts allows the pod to -access cloud resources dynamically and much more securely than static -credentials. - -This guide walks you through configuring workload identity for your GKE -cluster's billing component. - - - -## Prerequisites - - -To set up a workload-identity, you'll need: - - -- A self-hosted Space cluster -- Administrator access in your cloud provider -- Helm and `kubectl` - -## About the billing component - -The `vector.dev` component handles billing metrics collection in spaces. It -stores account data in your cloud storage. By default, this component runs in -each control plane's host namespace. - -## Configuration - - - -Upbound supports workload-identity configurations in AWS with IAM Roles for -Service Accounts and EKS pod identity association. - -#### IAM Roles for Service Accounts (IRSA) - -With IRSA, you can associate a Kubernetes service account in an EKS cluster with -an AWS IAM role. Upbound authenticates workloads with that service account as -the IAM role using temporary credentials instead of static role credentials. -IRSA relies on AWS `AssumeRoleWithWebIdentity` `STS` to exchange OIDC ID tokens with -the IAM role's temporary credentials. IRSA uses the `eks.amazon.aws/role-arn` -annotation to link the service account and the IAM role. - -**Create an IAM role and trust policy** - -First, create an IAM role appropriate permissions to access your S3 bucket: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:GetObject", - "s3:PutObject", - "s3:ListBucket", - "s3:DeleteObject" - ], - "Resource": [ - "arn:aws:s3:::${YOUR_BILLING_BUCKET}", - "arn:aws:s3:::${YOUR_BILLING_BUCKET}/*" - ] - } - ] -} -``` - -You must configure the IAM role trust policy with the exact match for each -provisioned control plane. An example of a trust policy for a single control -plane is below: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:oidc-provider/${YOUR_OIDC_PROVIDER}" - }, - "Action": "sts:AssumeRoleWithWebIdentity", - "Condition": { - "StringEquals": { - ":aud": "sts.amazonaws.com", - ":sub": "system:serviceaccount:${YOUR_NAMESPACE}:vector" - } - } - } - ] -} -``` - -**Configure the EKS OIDC provider** - -Next, ensure your EKS cluster has an OIDC identity provider: - -```shell -eksctl utils associate-iam-oidc-provider --cluster ${YOUR_CLUSTER_NAME} --approve -``` - -**Apply the IAM role** - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the Billing component: - -```shell ---set "billing.enabled=true" ---set "billing.storage.provider=aws" ---set "billing.storage.aws.region=${YOUR_AWS_REGION}" ---set "billing.storage.aws.bucket=${YOUR_BILLING_BUCKET}" ---set "billing.storage.secretRef.name=" ---set controlPlanes.vector.serviceAccount.customAnnotations."eks\.amazonaws\.com/role-arn"="arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_BILLING_ROLE_NAME}" -``` - -:::important -You **must** set the `billing.storage.secretRef.name` to an empty string to -enable workload identity for the billing component -::: - -#### EKS pod identities - -Upbound also supports EKS Pod Identity configuration. EKS Pod Identities allow -you to create a pod identity association with your Kubernetes namespace, a -service account, and an IAM role, which allows the EKS control plane to -automatically handle the credential exchange. - -**Create an IAM role** - -First, create an IAM role appropriate permissions to access your S3 bucket: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:GetObject", - "s3:PutObject", - "s3:ListBucket" - ], - "Resource": [ - "arn:aws:s3:::${YOUR_BILLING_BUCKET}", - "arn:aws:s3:::${YOUR_BILLING_BUCKET}/*" - ] - } - ] -} -``` - -**Configure your Space with Helm** - -When you install or upgrade your Space with Helm, add the billing values: - -```shell -helm upgrade spaces spaces-helm-chart \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=aws" \ - --set "billing.storage.aws.region=${YOUR_AWS_REGION}" \ - --set "billing.storage.aws.bucket=${YOUR_BILLING_BUCKET}" \ - --set "billing.storage.secretRef.name=" -``` - -**Create a Pod Identity Association** - -After Upbound provisions your control plane, create a Pod Identity Association -with the `aws` CLI: - -```shell -aws eks create-pod-identity-association \ - --cluster-name ${YOUR_CLUSTER_NAME} \ - --namespace ${YOUR_CONTROL_PLANE_NAMESPACE} \ - --service-account vector \ - --role-arn arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_BILLING_ROLE_NAME} -``` - - - - - -Upbound supports workload-identity configurations in Azure with Azure's built-in -workload identity feature. - -First, enable the OIDC issuer and workload identity in your AKS cluster: - -```shell -az aks update --resource-group ${YOUR_RESOURCE_GROUP} --name ${YOUR_AKS_CLUSTER_NAME} --enable-oidc-issuer --enable-workload-identity -``` - -Next, find and store the OIDC issuer URL as an environment variable: - -```shell -export AKS_OIDC_ISSUER="$(az aks show --name ${YOUR_AKS_CLUSTER_NAME} --resource-group ${YOUR_RESOURCE_GROUP} --query "oidcIssuerProfile.issuerUrl" --output tsv)" -``` - -Create a new managed identity to associate with the billing component: - -```shell -az identity create --name billing-identity --resource-group ${YOUR_RESOURCE_GROUP} --location ${YOUR_LOCATION} -``` - -Retrieve the client ID and store it as an environment variable: - -```shell -export USER_ASSIGNED_CLIENT_ID="$(az identity show --name billing-identity --resource-group ${YOUR_RESOURCE_GROUP} --query clientId -otsv)" -``` - -Grant the managed identity you created to access your Azure Storage account: - -```shell -az role assignment create --role "Storage Blob Data Contributor" --assignee $USER_ASSIGNED_CLIENT_ID --scope /subscriptions/${YOUR_SUBSCRIPTION_ID}/resourceGroups/${YOUR_RESOURCE_GROUP}/providers/Microsoft.Storage/storageAccounts/${YOUR_STORAGE_ACCOUNT} -``` - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the billing component: - -```shell ---set "billing.enabled=true" ---set "billing.storage.provider=azure" ---set "billing.storage.azure.storageAccount=${SPACES_BILLING_STORAGE_ACCOUNT}" ---set "billing.storage.azure.container=${SPACES_BILLING_STORAGE_CONTAINER}" ---set "billing.storage.secretRef.name=" ---set controlPlanes.vector.serviceAccount.customAnnotations."azure\.workload\.identity/client-id"="${SPACES_BILLING_APP_ID}" ---set controlPlanes.vector.pod.customLabels."azure\.workload\.identity/use"="true" -``` - -Create a federated credential to establish trust between the managed identity -and your AKS OIDC provider: - -```shell -az identity federated-credential create \ - --name billing-federated-identity \ - --identity-name billing-identity \ - --resource-group ${YOUR_RESOURCE_GROUP} \ - --issuer ${AKS_OIDC_ISSUER} \ - --subject system:serviceaccount:${YOUR_CONTROL_PLANE_NAMESPACE}:vector -``` - - - - - -Upbound supports workload-identity configurations in GCP with IAM principal -identifiers or service account impersonation. - -#### IAM principal identifiers - -IAM principal identifiers allow you to grant permissions directly to -Kubernetes service accounts without additional annotation. Upbound recommends -this approach for ease-of-use and flexibility. - -First, enable Workload Identity Federation on your GKE cluster: - -```shell -gcloud container clusters update ${YOUR_CLUSTER_NAME} \ - --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ - --region=${YOUR_REGION} -``` - -Next, configure your Spaces installation with the Spaces Helm chart parameters: - -```shell ---set "billing.enabled=true" ---set "billing.storage.provider=gcp" ---set "billing.storage.gcp.bucket=${YOUR_BILLING_BUCKET}" ---set "billing.storage.secretRef.name=" -``` - -:::important -You **must** set the `billing.storage.secretRef.name` to an empty string to -enable workload identity for the billing component. -::: - -Grant the necessary permissions to your Kubernetes service account: - -```shell -gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ - --member="principalSet://iam.googleapis.com/projects/${YOUR_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${YOUR_PROJECT_ID}.svc.id.goog/attribute.kubernetes_namespace/${YOUR_CONTROL_PLANE_NAMESPACE}/attribute.kubernetes_service_account/vector" \ - --role="roles/storage.objectAdmin" -``` - -Enable uniform bucket-level access on your storage bucket: - -```shell -gcloud storage buckets update gs://${YOUR_BILLING_BUCKET} --uniform-bucket-level-access -``` - -#### Service account impersonation - -Service account impersonation allows you to link a Kubernetes service account to -a GCP service account. The Kubernetes service account assumes the permissions of -the GCP service account you specify. - -Enable workload id federation on your GKE cluster: - -```shell -gcloud container clusters update ${YOUR_CLUSTER_NAME} \ - --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ - --region=${YOUR_REGION} -``` - -Next, create a dedicated service account for your billing operations: - -```shell -gcloud iam service-accounts create billing-sa \ - --project=${YOUR_PROJECT_ID} -``` - -Grant storage permissions to the service account you created: - -```shell -gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ - --member="serviceAccount:billing-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" \ - --role="roles/storage.objectAdmin" -``` - -Link the Kubernetes service account to the GCP service account: - -```shell -gcloud iam service-accounts add-iam-policy-binding \ - billing-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com \ - --role="roles/iam.workloadIdentityUser" \ - --member="serviceAccount:${YOUR_PROJECT_ID}.svc.id.goog[${YOUR_CONTROL_PLANE_NAMESPACE}/vector]" -``` - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the billing component: - -```shell ---set "billing.enabled=true" ---set "billing.storage.provider=gcp" ---set "billing.storage.gcp.bucket=${YOUR_BILLING_BUCKET}" ---set "billing.storage.secretRef.name=" ---set controlPlanes.vector.serviceAccount.customAnnotations."iam\.gke\.io/gcp-service-account"="billing-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" -``` - - - -## Verify your configuration - -After you apply the configuration use `kubectl` to verify the service account -has the correct annotation: - -```shell -kubectl get serviceaccount vector -n ${YOUR_CONTROL_PLANE_NAMESPACE} -o yaml -``` - -Verify the `vector` pod is running: - -```shell -kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep vector -``` - -## Restart workload - - - -You must manually restart a workload's pod when you add the -`eks.amazonaws.com/role-arn key` annotation to the running pod's service -account. - -This restart enables the EKS pod identity webhook to inject the necessary -environment for using IRSA. - - - - - -You must manually restart a workload's pod when you add the workload identity annotations to the running pod's service account. - -This restart enables the workload identity webhook to inject the necessary -environment for using Azure workload identity. - - - - - -GCP workload identity doesn't require pod restarts after configuration changes. -If you do need to restart the workload, use the `kubectl` command to force the -component restart: - - - -```shell -kubectl rollout restart deployment vector -``` - - -## Use cases - - -Using workload identity authentication for billing eliminates the need for static -credentials in your cluster as well as the overhead of credential rotation. -These benefits are helpful in: - -* Resource usage tracking across teams/projects -* Cost allocation for multi-tenant environments -* Financial auditing requirements -* Capacity billing and resource optimization -* Automated billing workflows - -## Next steps - -Now that you have workload identity configured for the billing component, visit -the [Billing guide][billing-guide] for more information. - -Other workload identity guides are: -* [Backup and restore][backuprestore] -* [Shared Secrets][secrets] - -[billing-guide]: /spaces/howtos/self-hosted/billing -[backuprestore]: /spaces/howtos/self-hosted/workload-id/backup-restore-config -[secrets]: /spaces/howtos/self-hosted/workload-id/eso-config diff --git a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/workload-id/eso-config.md b/spaces_versioned_docs/version-v1.15/howtos/self-hosted/workload-id/eso-config.md deleted file mode 100644 index c1418c171..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/self-hosted/workload-id/eso-config.md +++ /dev/null @@ -1,503 +0,0 @@ ---- -title: Shared Secrets Workload ID -weight: 1 -description: Configure workload identity for Spaces Shared Secrets ---- -import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; - - - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary AWS credentials to your Kubernetes pod based on -a service account. Assigning IAM roles and service accounts allows the pod to -assume the IAM role dynamically and much more securely than static credentials. - -This guide walks you through creating an IAM trust role policy and applying it to your EKS -cluster for secret sharing with Kubernetes. - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary Azure credentials to your Kubernetes pod based on -a service account. Assigning managed identities and service accounts allows the pod to -authenticate with Azure resources dynamically and much more securely than static credentials. - -This guide walks you through creating a managed identity and federated credential for your AKS -cluster for shared secrets in your Space cluster. - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary GCP credentials to your Kubernetes pod based on -a service account. Assigning IAM roles and service accounts allows the pod to -access cloud resources dynamically and much more securely than static -credentials. - -This guide walks you through configuring workload identity for your GKE -cluster's Shared Secrets component. - - - -## Prerequisites - - -To set up a workload-identity, you'll need: - - -- A self-hosted Space cluster -- Administrator access in your cloud provider -- Helm and `kubectl` - - -## About the Shared Secrets component - - - - -The External Secrets Operator (ESO) runs in each control plane's host namespace as `external-secrets-controller`. It needs to access -your external secrets management service like AWS Secrets Manager. - -To configure your shared secrets workflow controller, you must: - -* Annotate the Kubernetes service account to associate it with a cloud-side - principal (such as an IAM role, service account, or enterprise application). The workload must then - use this service account. -* Label the workload (pod) to allow the injection of a temporary credential set, - enabling authentication. - - - - - -The External Secrets Operator (ESO) component runs in each control plane's host -namespace as `external-secrets-controller`. It synchronizes secrets from -external APIs into Kubernetes secrets. Shared secrets allow you to manage -credentials outside your Kubernetes cluster while making them available to your -application - - - - - -The External Secrets Operator (ESO) component runs in each control plane's host -namespace as `external-secrets-controller`. It synchronizes secrets from -external APIs into Kubernetes secrets. Shared secrets allow you to manage -credentials outside your Kubernetes cluster while making them available to your -application - - - -## Configuration - - - -Upbound supports workload-identity configurations in AWS with IAM Roles for -Service Accounts or EKS pod identity association. - -#### IAM Roles for Service Accounts (IRSA) - -With IRSA, you can associate a Kubernetes service account in an EKS cluster with -an AWS IAM role. Upbound authenticates workloads with that service account as -the IAM role using temporary credentials instead of static role credentials. -IRSA relies on AWS `AssumeRoleWithWebIdentity` `STS` to exchange OIDC ID tokens with -the IAM role's temporary credentials. IRSA uses the `eks.amazon.aws/role-arn` -annotation to link the service account and the IAM role. - -**Create an IAM role and trust policy** - -First, create an IAM role with appropriate permissions to access AWS Secrets Manager: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "secretsmanager:GetSecretValue", - "secretsmanager:DescribeSecret", - "ssm:GetParameter" - ], - "Resource": [ - "arn:aws:secretsmanager:${YOUR_REGION}:${YOUR_AWS_ACCOUNT_ID}:secret:${YOUR_SECRET_PREFIX}*", - "arn:aws:ssm:${YOUR_REGION}:${YOUR_AWS_ACCOUNT_ID}:parameter/${YOUR_PARAMETER_PREFIX}*" - ] - } - ] -} -``` - -You must configure the IAM role trust policy with the exact match for each -provisioned control plane. An example of a trust policy for a single control -plane is below: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:oidc-provider/${YOUR_OIDC_PROVIDER}" - }, - "Action": "sts:AssumeRoleWithWebIdentity", - "Condition": { - "StringEquals": { - ":aud": "sts.amazonaws.com" - }, - "StringLike": { - ":sub": "system:serviceaccount:*:external-secrets-controller" - } - } - } - ] -} -``` - -**Configure the EKS OIDC provider** - -Next, ensure your EKS cluster has an OIDC identity provider: - -```shell -eksctl utils associate-iam-oidc-provider --cluster ${YOUR_CLUSTER_NAME} --approve -``` - -**Apply the IAM role** - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the shared secrets component: - -```yaml ---set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."eks\.amazonaws\.com/role-arn"="arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_ESO_ROLE_NAME}" -``` - -This command allows the shared secrets component to authenticate with your -dedicated IAM role in your EKS cluster environment. - -#### EKS pod identities - -Upbound also supports EKS Pod Identity configuration. EKS Pod Identities allow -you to create a pod identity association with your Kubernetes namespace, a -service account, and an IAM role, which allows the EKS control plane to -automatically handle the credential exchange. - -**Create an IAM role** - -First, create an IAM role with appropriate permissions to access AWS Secrets Manager: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "secretsmanager:GetSecretValue", - "secretsmanager:DescribeSecret", - "ssm:GetParameter" - ], - "Resource": [ - "arn:aws:secretsmanager:${YOUR_AWS_REGION}:${YOUR_AWS_ACCOUNT_ID}:secret:${YOUR_SECRET_PREFIX}*", - "arn:aws:ssm:${YOUR_AWS_REGION}:${YOUR_AWS_ACCOUNT_ID}:parameter/${YOUR_PARAMETER_PREFIX}*" - ] - } - ] -} -``` - -**Configure your Space with Helm** - -When you install or upgrade your Space with Helm, add the shared secrets value: - -```shell -helm upgrade spaces spaces-helm-chart \ - --set "sharedSecrets.enabled=true" -``` - -**Create a Pod Identity Association** - -After Upbound provisions your control plane, create a Pod Identity Association -with the `aws` CLI: - -```shell -aws eks create-pod-identity-association \ - --cluster-name ${YOUR_CLUSTER_NAME} \ - --namespace ${YOUR_CONTROL_PLANE_NAMESPACE} \ - --service-account external-secrets-controller \ - --role-arn arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_ROLE_NAME} -``` - - - - - -Upbound supports workload-identity configurations in Azure with Azure's built-in -workload identity feature. - -First, enable the OIDC issuer and workload identity in your AKS cluster: - -```shell -az aks update --resource-group ${YOUR_RESOURCE_GROUP} --name ${YOUR_AKS_CLUSTER_NAME} --enable-oidc-issuer --enable-workload-identity -``` - -Next, find and store the OIDC issuer URL as an environment variable: - -```shell -export AKS_OIDC_ISSUER="$(az aks show --name ${YOUR_AKS_CLUSTER_NAME} --resource-group ${YOUR_RESOURCE_GROUP} --query "oidcIssuerProfile.issuerUrl" --output tsv)" -``` - -Create a new managed identity to associate with the shared secrets component: - -```shell -az identity create --name secrets-identity --resource-group ${YOUR_RESOURCE_GROUP} --location ${YOUR_LOCATION} -``` - -Retrieve the client ID and store it as an environment variable: - -```shell -export USER_ASSIGNED_CLIENT_ID="$(az identity show --name secrets-identity --resource-group ${YOUR_RESOURCE_GROUP} --query clientId -otsv)" -``` - -Grant the managed identity you created to access your Azure Storage account: - -```shell -az keyvault set-policy --name ${YOUR_KEY_VAULT_NAME} \ - --resource-group ${YOUR_RESOURCE_GROUP} \ - --object-id $(az identity show --name secrets-identity --resource-group ${YOUR_RESOURCE_GROUP} --query principalId -otsv) \ - --secret-permissions get list -``` - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the shared secrets component: - -```shell ---set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."azure\.workload\.identity/client-id"="${USER_ASSIGNED_CLIENT_ID}" ---set controlPlanes.sharedSecrets.pod.customLabels."azure\.workload\.identity/use"="true" -``` - -Next, create a federated credential to establish trust between the managed identity -and your AKS OIDC provider: - -```shell -az identity federated-credential create \ - --name secrets-federated-identity \ - --identity-name secrets-identity \ - --resource-group ${YOUR_RESOURCE_GROUP} \ - --issuer ${AKS_OIDC_ISSUER} \ - --subject system:serviceaccount:${YOUR_CONTROL_PLANE_NAMESPACE}:external-secrets-controller -``` - - - - - -Upbound supports workload-identity configurations in GCP with IAM principal -identifiers or service account impersonation. - -#### IAM principal identifiers - -IAM principal identifiers allow you to grant permissions directly to -Kubernetes service accounts without additional annotation. Upbound recommends -this approach for ease-of-use and flexibility. - -First, enable Workload Identity Federation on your GKE cluster: - -```shell -gcloud container clusters update ${YOUR_CLUSTER_NAME} \ - --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ - --region=${YOUR_REGION} -``` - -Next, grant the necessary permissions to your Kubernetes service account: - -```shell -gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ - --member="principalSet://iam.googleapis.com/projects/${YOUR_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${YOUR_PROJECT_ID}.svc.id.goog/attribute.kubernetes_namespace/${YOUR_CONTROL_PLANE_NAMESPACE}/attribute.kubernetes_service_account/external-secrets-controller" \ - --role="roles/secretmanager.secretAccessor" -``` - -#### Service account impersonation - -Service account impersonation allows you to link a Kubernetes service account to -a GCP service account. The Kubernetes service account assumes the permissions of -the GCP service account you specify. - -Enable workload id federation on your GKE cluster: - -```shell -gcloud container clusters update ${YOUR_CLUSTER_NAME} \ - --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ - --region=${YOUR_REGION} -``` - -Next, create a dedicated service account for your secrets operations: - -```shell -gcloud iam service-accounts create secrets-sa \ - --project=${YOUR_PROJECT_ID} -``` - -Grant secret access permissions to the service account you created: - -```shell -gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ - --member="serviceAccount:secrets-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" \ - --role="roles/secretmanager.secretAccessor" -``` - -Link the Kubernetes service account to the GCP service account: - -```shell -gcloud iam service-accounts add-iam-policy-binding \ - secrets-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com \ - --role="roles/iam.workloadIdentityUser" \ - --member="serviceAccount:${YOUR_PROJECT_ID}.svc.id.goog[${YOUR_CONTROL_PLANE_NAMESPACE}/external-secrets-controller]" -``` - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the shared secrets component: - -```shell ---set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."iam\.gke\.io/gcp-service-account"="secrets-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" -``` - - - -## Verify your configuration - -After you apply the configuration use `kubectl` to verify the service account -has the correct annotation: - -```shell -kubectl get serviceaccount external-secrets-controller -n ${YOUR_CONTROL_PLANE_NAMESPACE} -o yaml -``` - - - -Verify the `external-secrets` pod is running correctly: - -```shell -kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep external-secrets -``` - - - - - -Verify the External Secrets Operator pod is running correctly: - -```shell -kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep external-secrets -``` - - - - - -Verify the `external-secrets` pod is running correctly: - -```shell -kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep external-secrets -``` - - - -## Restart workload - - - -You must manually restart a workload's pod when you add the -`eks.amazonaws.com/role-arn key` annotation to the running pod's service -account. - -This restart enables the EKS pod identity webhook to inject the necessary -environment for using IRSA. - - - - - -You must manually restart a workload's pod when you add the workload identity annotations to the running pod's service account. - -This restart enables the workload identity webhook to inject the necessary -environment for using Azure workload identity. - - - - - -GCP workload identity doesn't require pod restarts after configuration changes. -If you do need to restart the workload, use the `kubectl` command to force the -component restart: - - - -```shell -kubectl rollout restart deployment external-secrets -``` - -## Use cases - - - - -Shared secrets with workload identity eliminates the need for static credentials -in your cluster. These benefits are particularly helpful in: - -* Secure application credentials management -* Database connection string storage -* API token management -* Compliance with secret rotation security standards -* Multi-environment configuration with centralized secret management - - - - - -Using workload identity authentication for shared secrets eliminates the need for static -credentials in your cluster as well as the overhead of credential rotation. -These benefits are particularly helpful in: - -* Secure application credentials management -* Database connection string storage -* API token management -* Compliance with secret rotation security standards - - - - - -Configuring the external secrets operator with workload identity eliminates the need for -static credentials in your cluster and the overhead of credential rotation. -These benefits are particularly helpful in: - -* Secure application credentials management -* Database connection string storage -* API token management -* Compliance with secret rotation security standards - - - -## Next steps - -Now that you have workload identity configured for the shared secrets component, visit -the [Shared Secrets][eso-guide] guide for more information. - -Other workload identity guides are: -* [Backup and restore][backuprestore] -* [Billing][billing] - -[eso-guide]: /spaces/howtos/secrets-management -[backuprestore]: /spaces/howtos/self-hosted/workload-id/backup-restore-config -[billing]: /spaces/howtos/self-hosted/workload-id/billing-config diff --git a/spaces_versioned_docs/version-v1.15/howtos/simulations.md b/spaces_versioned_docs/version-v1.15/howtos/simulations.md deleted file mode 100644 index 26cb0e657..000000000 --- a/spaces_versioned_docs/version-v1.15/howtos/simulations.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -title: Simulate changes to your Control Plane Projects -sidebar_position: 100 -description: Use the Up CLI to mock operations before deploying to your environments. ---- - -:::info API Version Information -This guide covers Simulations, available in v1.10+ (GA since v1.13). version-specific availability and features, see the . - -For API specifications on the Simulations CRD, see the [Spaces API Reference](../../../reference/apis/spaces-api/). -::: - -:::important -The Simulations feature is in private preview. more information, [reach out to Upbound][reach-out-to-upbound]. -::: - -Control plane simulations allow you to preview changes to your resources before -applying them to your control planes. Like a plan or dry-run operation, -simulations expose the impact of updates to compositions or claims without -changing your actual resources. - -A control plane simulation creates a temporary copy of your control plane and -returns a preview of the desired changes. The simulation change plan helps you -reduce the risk of unexpected behavior based on your changes. - -## Simulation benefits - -Control planes are dynamic systems that automatically reconcile resources to -match your desired state. Simulations provide visibility into this -reconciliation process by showing: - - -* New resources to create -* Existing resources to change -* Existing resources to delete -* How configuration changes propagate through the system - -These insights are crucial when planning complex changes or upgrading Crossplane -packages. - -## Requirements - -Simulations are available to select customers on Upbound Cloud with Team -Tier or higher. more information, [reach out to Upbound][reach-out-to-upbound-1]. - -## How to simulate your control planes - -Before you start a simulation, build your project and use the `up -project run` command to run your control plane. - -Use the `up project simulate` command with your control plane name to start the -simulation: - -```ini {copy-lines="all"} -up project simulate --complete-after=60s --terminate-on-finish -``` - -The `complete-after` flag determines how long to run the simulation before it completes and calculates the results. Depending on the change, a simulation may not complete within your defined interval leaving unaffected resources as `unchanged`. - -The `terminate-on-finish` flag terminates the simulation after the time -you set - deleting the control plane that ran the simulation. - -At the end of your simulation, your CLI returns: -* A summary of the resources created, modified, or deleted -* Diffs for each resource affected - -## View your simulation in the Upbound Console -You can also view your simulation results in the Upbound Console: - -1. Navigate to your base control plane in the Upbound Console -2. Select the "Simulations" tab in the menu -3. Select a simulation object for a change list of all - resources affected. - -The Console provides visual indications of changes: - -- Created Resources: Marked with green -- Modified Resources: Marked with yellow -- Deleted Resources: Marked with red -- Unchanged Resources: Displayed in gray - -![Upbound Console Simulation](/img/simulations.png) - -## Considerations - -Simulations is a **private preview** feature. - -Be aware of the following limitations: - -- Simulations can't predict the exact behavior of external systems due to the - complexity and non-deterministic reconciliation pattern in Crossplane. - -- The only completion criteria for a simulation is time. Your simulation may not - receive a conclusive result within that interval. Upbound recommends the - default `60s` value. - -- Providers don't run in simulations. Simulations can't compose resources that - rely on the status of Managed Resources. - - -The Upbound team is working to improve these limitations. Your feedback is always appreciated. - -## Next steps - -For more information, follow the [tutorial][tutorial] on Simulations. - - -[tutorial]: /manuals/cli/howtos/simulations -[reach-out-to-upbound]: https://www.upbound.io/contact-us -[reach-out-to-upbound-1]: https://www.upbound.io/contact-us diff --git a/spaces_versioned_docs/version-v1.15/overview/_category_.json b/spaces_versioned_docs/version-v1.15/overview/_category_.json deleted file mode 100644 index 54bb16430..000000000 --- a/spaces_versioned_docs/version-v1.15/overview/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Overview", - "position": 0 -} diff --git a/spaces_versioned_docs/version-v1.15/reference/_category_.json b/spaces_versioned_docs/version-v1.15/reference/_category_.json deleted file mode 100644 index 4a6a139c4..000000000 --- a/spaces_versioned_docs/version-v1.15/reference/_category_.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "label": "Spaces API", - "position": 1, - "collapsed": true -} diff --git a/spaces_versioned_docs/version-v1.15/reference/index.md b/spaces_versioned_docs/version-v1.15/reference/index.md deleted file mode 100644 index 5e68b0768..000000000 --- a/spaces_versioned_docs/version-v1.15/reference/index.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: Spaces API Reference -description: Documentation for the Spaces API resources (v1.15 - Latest) -sidebar_position: 1 ---- -import CrdDocViewer from '@site/src/components/CrdViewer'; - - -This page documents the Custom Resource Definitions (CRDs) for the Spaces API. - - -## Control Planes -### Control Planes - - -## Observability -### Shared Telemetry Configs - - -## `pkg` -### Controller Revisions - - -### Controller Runtime Configs - - -### Controllers - - -### Remote Configuration Revisions - - -### Remote Configurations - - -## Policy -### Shared Upbound Policies - - -## References -### Referenced Objects - - -## Scheduling -### Environments - - -## Secrets -### Shared External Secrets - - -### Shared Secret Stores - - -## Simulations - - -## Spaces Backups -### Backups - - -### Backup Schedules - - -### Shared Backup Configs - - -### Shared Backups - - -### Shared Backup Schedules - diff --git a/spaces_versioned_docs/version-v1.9/concepts/_category_.json b/spaces_versioned_docs/version-v1.9/concepts/_category_.json deleted file mode 100644 index 4b8667e29..000000000 --- a/spaces_versioned_docs/version-v1.9/concepts/_category_.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "label": "Concepts", - "position": 2, - "collapsed": true -} - - diff --git a/spaces_versioned_docs/version-v1.9/concepts/control-planes.md b/spaces_versioned_docs/version-v1.9/concepts/control-planes.md deleted file mode 100644 index 7066343de..000000000 --- a/spaces_versioned_docs/version-v1.9/concepts/control-planes.md +++ /dev/null @@ -1,227 +0,0 @@ ---- -title: Control Planes -weight: 1 -description: An overview of control planes in Upbound ---- - - -Control planes in Upbound are fully isolated Crossplane control plane instances that Upbound manages for you. This means: - -- the underlying lifecycle of infrastructure (compute, memory, and storage) required to power your instance. -- scaling of the infrastructure. -- the maintenance of the core Crossplane components that make up a control plane. - -This lets users focus on building their APIs and operating their control planes, while Upbound handles the rest. Each control plane has its own dedicated API server connecting users to their control plane. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). - -For detailed information about Control Plane API specifications and CRD fields, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: - -## Control plane architecture - -![Managed Control Plane Architecture](/img/mcp.png) - -Along with underlying infrastructure, Upbound manages the Crossplane system components. You don't need to manage the Crossplane API server or core resource controllers because Upbound manages your control plane lifecycle from creation to deletion. - -### Crossplane API - -Each control plane offers a unified endpoint. You interact with your control plane through Kubernetes and Crossplane API calls. Each control plane runs a Kubernetes API server to handle API requests. You can make API calls in the following ways: - -- Direct calls: HTTP/gRPC -- Indirect calls: the up CLI, Kubernetes clients such as kubectl, or the Upbound Console. - -Like in Kubernetes, the API server is the hub for all communication for the control plane. All internal components such as system processes and provider controllers act as clients of the API server. - -Your API requests tell Crossplane your desired state for the resources your control plane manages. Crossplane attempts to constantly maintain that state. Crossplane lets you configure objects in the API either imperatively or declaratively. - -### Crossplane versions and features - -Upbound automatically upgrades Crossplane system components on control planes to new Crossplane versions for updated features and improvements in the open source project. With [automatic upgrades][automatic-upgrades], you choose the cadence that Upbound automatically upgrades the system components in your control plane. You can also choose to manually upgrade your control plane to a different Crossplane version. - -For detailed information on versions and upgrades, refer to the [release notes][release-notes] and the automatic upgrade documentation. If you don't enroll a control plane in a release channel, Upbound doesn't apply automatic upgrades. - -Features considered "alpha" in Crossplane are by default not supported in a control plane unless otherwise specified. - -### Hosting environments - -Every control plane in Upbound belongs to a [control plane group][control-plane-group]. Control plane groups are a logical grouping of one or more control planes with shared objects (such as secrets or backup configuration). Every group resides in a [Space][space] in Upbound, which are hosting environments for control planes. - -Think of a Space as being conceptually the same as an AWS, Azure, or GCP region. Regardless of the Space type you run a control plane in, the core experience is identical. - -## Management - -### Create a control plane - -You can create a new control plane from the Upbound Console, [up CLI][up-cli], or with Kubernetes clients such as `kubectl`. - - - - - -To use the CLI, run the following: - -```shell -up ctp create -``` - -To learn more about control plane-related commands in `up`, go to the [CLI reference][cli-reference] documentation. - - - -You can create and manage control planes declaratively in Upbound. Before you -begin, ensure you're logged into Upbound and set the correct context: - -```bash -up login -# Example: acmeco/upbound-gcp-us-west-1/default -up ctx ${yourOrganization}/${yourSpace}/${yourGroup} -```` - -```yaml -#controlplane-a.yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - name: controlplane-a -spec: - crossplane: - autoUpgrade: - channel: Rapid -``` - -```bash -kubectl apply -f controlplane-a.yaml -``` - - - - - -### Connect directly to your control plane - -Each control plane offers a unified endpoint. You interact with your control plane through Kubernetes and Crossplane API calls. Each control plane runs a Kubernetes API server to handle API requests. - -You can connect to a control plane's API server directly via the up CLI. Use the [`up ctx`][up-ctx] command to set your kubeconfig's current context to a control plane: - -```shell -# Example: acmeco/upbound-gcp-us-west-1/default/ctp1 -up ctx ${yourOrganization}/${yourSpace}/${yourGroup}/${yourControlPlane} -``` - -To disconnect from your control plane and revert your kubeconfig's current context to the previous entry, run the following: - -```shell -up ctx .. -``` - -You can also generate a `kubeconfig` file for a control plane with [`up ctx -f`][up-ctx-f]. - -```shell -up ctx ${yourOrganization}/${yourSpace}/${yourGroup}/${yourControlPlane} -f - > ctp-kubeconfig.yaml -``` - -:::tip -To learn more about how to use `up ctx` to navigate different contexts in Upbound, read the [CLI documentation][cli-documentation]. -::: - -## Configuration - -When you create a new control plane, Upbound provides you with a fully isolated instance of Crossplane. Configure your control plane by installing packages that extend its capabilities, like to create and manage the lifecycle of new types of infrastructure resources. - -You're encourage to install any available Crossplane package type (Providers, Configurations, Functions) available in the [Upbound Marketplace][upbound-marketplace] on your control planes. - -### Install packages - -Below are a couple ways to install Crossplane packages on your control plane. - - - - - - -Use the `up` CLI to install Crossplane packages from the [Upbound Marketplace][upbound-marketplace-1] on your control planes. Connect directly to your control plane via `up ctx`. Then, to install a provider: - -```shell -up ctp provider install xpkg.upbound.io/upbound/provider-family-aws -``` - -To install a Configuration: - -```shell -up ctp configuration install xpkg.upbound.io/upbound/platform-ref-aws -``` - -To install a Function: - -```shell -up ctp function install xpkg.upbound.io/crossplane-contrib/function-kcl -``` - - -You can use kubectl to directly apply any Crossplane manifest. Below is an example for installing a Crossplane provider: - -```yaml -cat < - - - -For production-grade scenarios, it's recommended you configure your control plane declaratively via Git plus a Continuous Delivery (CD) Engine such as Argo. guidance on this topic, read [GitOps with control planes][gitops-with-control-planes]. - - - - - - -### Configure Crossplane ProviderConfigs - -#### ProviderConfigs with OpenID Connect - -Use OpenID Connect (`OIDC`) to authenticate to Upbound control planes without credentials. OIDC lets your control plane exchange short-lived tokens directly with your cloud provider. Read how to [connect control planes to external services][connect-control-planes-to-external-services] to learn more. - -#### Generic ProviderConfigs - -The Upbound Console doesn't allow direct editing of ProviderConfigs that don't support `Upbound` authentication. To edit these ProviderConfigs on your control plane, connect to the control plane directly by following the instructions in the previous section and using `kubectl`. - -### Configure secrets - -Upbound gives users the ability to configure the synchronization of secrets from external stores into control planes. Configure this capability at the group-level, explained in the [Spaces documentation][spaces-documentation]. - -### Configure backups - -Upbound gives users the ability to configure backup schedules, take impromptu backups, and conduct self-service restore operations. Configure this capability at the group-level, explained in the [Spaces documentation][spaces-documentation-1]. - -### Configure telemetry - - -Upbound gives users the ability to configure the collection of telemetry (logs, metrics, and traces) in their control planes. Using Upbound's built-in [OTEL][otel] support, you can stream this data out to your preferred observability solution. Configure this capability at the group-level, explained in the [Spaces documentation][spaces-documentation-2]. - - - -[automatic-upgrades]: /spaces/howtos/auto-upgrade -[release-notes]: https://github.com/upbound/universal-crossplane/releases -[control-plane-group]: /spaces/concepts/groups -[space]: /spaces/overview -[up-cli]: /reference/cli-reference -[cli-reference]: /reference/cli-reference -[up-ctx]: /reference/cli-reference -[up-ctx-f]: /reference/cli-reference -[cli-documentation]: /manuals/cli/concepts/contexts -[upbound-marketplace]: https://marketplace.upbound.io -[upbound-marketplace-1]: https://marketplace.upbound.io -[gitops-with-control-planes]: /spaces/howtos/cloud-spaces/gitops -[connect-control-planes-to-external-services]: /manuals/platform/howtos/oidc -[spaces-documentation]: /spaces/howtos/secrets-management -[spaces-documentation-1]: /spaces/howtos/backup-and-restore -[otel]: https://otel.com -[spaces-documentation-2]: /spaces/howtos/observability diff --git a/spaces_versioned_docs/version-v1.9/concepts/deployment-modes.md b/spaces_versioned_docs/version-v1.9/concepts/deployment-modes.md deleted file mode 100644 index f5e718f88..000000000 --- a/spaces_versioned_docs/version-v1.9/concepts/deployment-modes.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: Deployment Modes -sidebar_position: 10 -description: An overview of deployment modes for Spaces ---- - -Upbound Spaces can be deployed and used in a variety of modes: - -- **Cloud Spaces:** Multi-tenant Upbound-hosted, Upbound-managed Space environment. Cloud Spaces provide a typical SaaS experience. -- **[Dedicated Spaces][dedicated-spaces]:** Single-tenant Upbound-hosted, Upbound-managed Space environment. Dedicated Spaces provide a SaaS experience, with additional isolation guarantees that your workloads run in a fully isolated context. -- **[Managed Spaces][managed-spaces]:** Single-tenant customer-hosted, Upbound-managed Space environment. Managed Spaces provide a SaaS-like experience, with additional guarantees of all hosting infrastructure being served from your own cloud account. -- **[Self-Hosted Spaces][self-hosted-spaces]:** Single-tenant customer-hosted, customer-managed Space environment. This is a fully self-hosted, self-managed software experience for using Spaces. Upbound delivers the Spaces software and you run it yourself. - -The Upbound platform uses a federated model to connect each Space back to a -central service called the [Upbound Console][console], which is deployed and -managed by Upbound. - -By default, customers have access to a set of Cloud Spaces. - -## Supported clouds - -You can use host Upbound Spaces on Amazon Web Services (AWS), Microsoft Azure, -and Google Cloud Platform (GCP). Regardless of the hosting platform, you can use -Spaces to deploy control planes that manage the lifecycle of your resources. - -## Supported regions - -This table lists the cloud service provider regions supported by Upbound. - -### GCP - -| Region | Location | -| --- | --- | -| `us-west-1` | Western US (Oregon) -| `us-central-1` | Central US (Iowa) -| `eu-west-3` | Eastern Europe (Frankfurt) - -### AWS - -| Region | Location | -| --- | --- | -| `us-east-1` | Eastern US (Northern Virginia) - -### Azure - -| Region | Location | -| --- | --- | -| `us-east-1` | Eastern US (Iowa) - -[dedicated-spaces]: /spaces/howtos/cloud-spaces/dedicated-spaces-deployment -[managed-spaces]: /spaces/howtos/self-hosted/managed-spaces-deployment -[self-hosted-spaces]: /spaces/howtos/self-hosted/self-hosted-spaces-deployment -[console]: /manuals/console/upbound-console/ diff --git a/spaces_versioned_docs/version-v1.9/concepts/groups.md b/spaces_versioned_docs/version-v1.9/concepts/groups.md deleted file mode 100644 index d2ccacdb3..000000000 --- a/spaces_versioned_docs/version-v1.9/concepts/groups.md +++ /dev/null @@ -1,115 +0,0 @@ ---- -title: Control Plane Groups -sidebar_position: 2 -description: An introduction to the Control Plane Groups in Upbound -plan: "enterprise" ---- - - - -In Upbound, Control Plane Groups (or just, 'groups') are a logical grouping of one or more control planes with shared resources like [secrets][secrets] or [backups][backups]. It's a mechanism for isolating these groups of resources within a single [Space][space]. All role-based access control in Upbound happens at the control plane group-level. - -## When to use multiple groups - -You should use groups in environments where there's a need to have Crossplane manage infrastructure across multiple cloud accounts or projects. For users who only need to deploy and manage resources in a couple cloud accounts, you shouldn't need to think about groups at all. - -Groups are a way to divide access in Upbound between multiple teams. Think of a group as being analogous to a Kubernetes _namespace_. - -## The 'default' group - -Every Cloud Space in Upbound has a group named _default_ available. - -## Working with groups - -### View groups - -You can list groups in a Space using: - -```shell -up group list -``` - -If you're operating in a single-tenant Space and have access to the underlying cluster, you can list namespaces that have the group label: - -```shell -kubectl get namespaces -l spaces.upbound.io/group=true -``` - -### Set the group for a request - -Several commands in _up_ have a group context. To set the group for a request, use the `--group` flag: - -```shell -up ctp list --group=team1 -``` -```shell -up ctp create new-ctp --group=team2 -``` - -### Set the group preference - -The _up_ CLI operates upon a single [Upbound context][upbound-context]. Whatever context gets set is then used as the preference for other commands. An Upbound context is capable of pointing at a variety of altitudes: - -1. A Space in Upbound -2. A group within a Space -3. a control plane within a group - -To set the group preference, use `up ctx` to choose a group as your preferred Upbound context. For example: - -```shell -# This sets the context for the up CLI to the default group in an Upbound-managed Cloud Space (gcp-us-west-1) for an organization called 'acmeco' -up ctx acmeco/upbound-gcp-us-west-1/default/ -``` - -### Create a group - -To create a group, login to Upbound and set your context to your desired Space: - -```shell -up login -up ctx '/' -# Example: up ctx acmeco/upbound-gcp-us-west-1 -``` - - -Create a group: - -```shell -up group create my-new-group -``` - -### Delete a group - -To delete a group, login to Upbound and set your context to your desired Space: - -```shell -up login -up ctx '/' -# Example: up ctx acmeco/upbound-gcp-us-west-1 -``` - -Delete a group: - -```shell -up group delete my-new-group -``` - -### Protected groups - -Once a control plane gets created in a group, Upbound enforces a protection policy on the group. Upbound prevents accidental deletion of the group. To delete a group that has control planes in it, you should first delete all control planes in the group. - -## Groups in the context of single-tenant Spaces - -Upbound offers a variety of deployment models to use the product. If you deploy your own single-tenant Upbound Space (whether connected or disconnected), you're self-hosting Upbound software in a Kubernetes cluster. In these environments, a control plane group maps to a corresponding namespace in the cluster which hosts the Space. - -Most Kubernetes clusters come with some set of predefined namespaces. Because a group maps to a corresponding Kubernetes namespace, whenever a group gets created, there too must be a Kubernetes namespace accordingly. When the Spaces software is newly installed, no groups exist. You _can_ elevate a Kubernetes namespace to become a group by doing the following: - -1. Creating a group with the same name as a preexisting Kubernetes namespace -2. Creating a control plane in a preexisting Kubernetes namespace -3. Labeling a Kubernetes namespace with the label `spaces.upbound.io/group=true` - - -[secrets]: /spaces/howtos/secrets-management -[backups]: /spaces/howtos/self-hosted/workload-id/backup-restore-config/ -[space]: /spaces/overview -[upbound-context]: /manuals/cli/concepts/contexts diff --git a/spaces_versioned_docs/version-v1.9/howtos/_category_.json b/spaces_versioned_docs/version-v1.9/howtos/_category_.json deleted file mode 100644 index d3a8547aa..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/_category_.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "label": "How-tos", - "position": 3, - "collapsed": true -} - - diff --git a/spaces_versioned_docs/version-v1.9/howtos/api-connector.md b/spaces_versioned_docs/version-v1.9/howtos/api-connector.md deleted file mode 100644 index a14468f52..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/api-connector.md +++ /dev/null @@ -1,413 +0,0 @@ ---- -title: API Connector -weight: 90 -description: Connect Kubernetes clusters to remote Crossplane control planes for resource synchronization -aliases: - - /api-connector - - /concepts/api-connector ---- -:::info API Version Information -This guide covers API Connector, a preview feature available in all supported versions (v1.9-v1.14+). - -For related API specifications and available resources, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: - -:::warning -API Connector is currently in **Preview**. The feature is under active -development and subject to breaking changes. Use for testing and evaluation -purposes only. -::: - -API Connector enables seamless integration between Kubernetes application -clusters consuming APIs and remote Crossplane control planes providing and -reconciling APIs. - -You can use the API Connector to decouple where Crossplane is running (for -example in an Upbound control plane), and where APIs are consumed -(for example in an existing Kubernetes cluster). This gives you flexibility and -consistency in your control plane operations. - - - -Unlike the [Control Plane Connector](ctp-connector.md) which offers only -coarse-grained connectivity between app clusters and a control plane, API -connector offers fine-grained configuration of which APIs get offered along with -multi-cluster connectivity. - -## Architecture overview - -![API Connector Architecture](/img/api-connector.png) - -API Connector uses a **provider-consumer** model: - -- **Provider control plane**: The Upbound control plane that provides APIs and manages infrastructure. -- **Consumer cluster**: Any Kubernetes cluster where its users wants to use APIs provided by the provider control plane, without having to run Crossplane. API connector gets installed in the consumer cluster, and bidirectionally syncs API objects to the provider. - -### Key components - -**Custom Resource Definitions (CRDs)**: - - -- `ClusterConnection`: Establishes a connection from the consumer to the provider cluster. Pulls bindable CRD APIs from the provider into the consumer cluster for use. - -- `ClusterAPIBinding`: Instructs API connector to sync all API objects cluster-wide with a given API group to a given provider cluster. -- `APIBinding`: Namespaced version of `ClusterAPIBinding`. Instructs API connector to sync API objects within a given namespace and with a given API group to a given provider cluster. - - -## Prerequisites - -Before using API Connector, ensure: - -1. **Consumer cluster** has network access to the provider control plane -1. You have an license to use API connector. If you are unsure, [contact Upbound][contact] or your sales representative. - -This guide walks through how to automate connecting your cluster to an Upbound -control plane. You can also manually configure the API Connector. - -## Publishing APIs in the provider cluster - - - - -First, log in to your provider control plane, and choose which CRD APIs you want -to make accessible to the consumer cluster's. API connector only syncs -these "bindable" CRDs. - - - - - - -Use the `up` CLI to login: - -```bash -up login -``` - -Connect to your control plane: - -```bash -up ctx -``` - -Check what CRDs are available: - -```bash -kubectl get crds -``` - - -Label all CRDs you want to publish with the bindable label: - - -```bash -kubectl label crd 'connect.upbound.io/bindable'='true' --overwrite -``` - - - - -Change context to the provider cluster: -```bash -kubectl config set-context -``` - -Check what CRDs are available: -```bash -kubectl get crds -``` - - -Label all CRDs you want to publish with the bindable label - -```bash -kubectl label crd 'connect.upbound.io/bindable'='true' --overwrite -``` - - - -## Installation - - - - -The up CLI provides the simplest installation method with automatic -configuration: - -Make sure the current Kubeconfig context is set to the **provider control plane** -```bash -up ctx - -up controlplane api-connector install --consumer-kubeconfig [OPTIONS] -``` - -The command: -1. creates a Robot account (named ``) in the Upbound Cloud organization ``, -1. Gives the created robot account `admin` permissions to the provider control plane `` -1. Generates a JWT token for the robot account, and stores it in a Kubernetes Secret in the consumer cluster. -1. Installs the API connector Helm chart in the consumer cluster. -1. Creates a `ClusterConnection` object in the consumer cluster, referring to the newly generated Secret, so that API connector can authenticate successfully to the provider control plane. -1. API connector pulls all published CRDs from the previous step into the consumer cluster. - -**Example**: -```bash -up controlplane api-connector install \ - --consumer-kubeconfig ~/.kube/config \ - --consumer-context my-cluster \ - --upbound-token -``` - -This command uses provided token to authenticate with the **Provider control plane** -and create a `ClusterConnection` resource in the **Consumer cluster** to connect to the -**Provider control plane**. - -**Key Options**: -- `--consumer-kubeconfig`: Path to consumer cluster kubeconfig (required) -- `--consumer-context`: Context name for consumer cluster (required) -- `--name`: Custom name for connection resources (optional) -- `--upbound-token`: API token for authentication (optional) -- `--upgrade`: Upgrade existing installation (optional) -- `--version`: Specific version to install (optional) - - - - -For manual installation or custom configurations: - -```bash -helm upgrade --install api-connector oci://xpkg.upbound.io/spaces-artifacts/api-connector \ - --namespace upbound-system \ - --create-namespace \ - --version \ - --set consumerClusterDisplayName= -``` - -### Authentication methods - -API Connector supports two authentication methods: - - - - -For Upbound Spaces integration: - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: spaces-secret - namespace: upbound-system -type: Opaque -stringData: - token: - organization: - spacesBaseURL: - controlPlaneGroupName: - controlPlaneName: -``` - - - -For direct cluster access: - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: provider-kubeconfig - namespace: upbound-system -type: Opaque -data: - kubeconfig: -``` - - - - -### Connection setup - -Create a `ClusterConnection` to establish connectivity: - - - - -```yaml -apiVersion: connect.upbound.io/v1alpha1 -kind: ClusterConnection -metadata: - name: spaces-connection - namespace: upbound-system -spec: - secretRef: - kind: UpboundRobotToken - name: spaces-secret - namespace: upbound-system - crdManagement: - pullBehavior: Pull -``` - - - - -```yaml -apiVersion: connect.upbound.io/v1alpha1 -kind: ClusterConnection -metadata: - name: provider-connection - namespace: upbound-system -spec: - secretRef: - kind: KubeConfig - name: provider-kubeconfig - namespace: upbound-system - crdManagement: - pullBehavior: Pull -``` - - - - - - - -### Configuration - -Bind APIs to make them available in your consumer cluster: - -```yaml -apiVersion: connect.upbound.io/v1alpha1 -kind: ClusterAPIBinding -metadata: - name: -spec: - connectionRef: - kind: ClusterConnection - name: # Or --name value -``` - - - - -The `ClusterAPIBinding` name must match the **Resource.Group** (name of the CustomResourceDefinition) of the CRD you want to bind. - - - - -## Usage example - -After configuration, you can create API objects (in the consumer cluster) that -will be synchronized to the provider cluster: - -```yaml -apiVersion: nop.example.org/v1alpha1 -kind: NopResource -metadata: - name: my-resource - namespace: default -spec: - coolField: "Synchronized resource" - compositeDeletePolicy: Foreground -``` - -Verify the resource status: - -```bash -kubectl get nopresource my-resource -o yaml - -``` -When the `APIBound=True` condition is present, it means that the API object has -been synced to the provider cluster, and is being reconciled there. Whenever the -API object in the provider cluster gets status updates (for example -`Ready=True`), that status is synced back to the consumer cluster. - -Switch contexts to the provider cluster to see the API object being created: - -```bash -up ctx -# or kubectl config set-context -``` - -```bash -kubectl get nopresource my-resource -o yaml -``` - -Note that in the provider cluster, the API object is labeled with information on -where the API object originates from, and `connect.upbound.io/managed=true`. - -## Monitoring and troubleshooting - -### Check connection status - -```bash -kubectl get clusterconnection -``` - -Expected output: -``` -NAME STATUS MESSAGE -spaces-connection Ready Provider controlplane is available -``` - -### View available APIs - -```bash -kubectl get clusterconnection spaces-connection -o jsonpath='{.status.offeredAPIs[*].name}' -``` - -### Check API binding status - -```bash -kubectl get clusterapibinding -``` - -### Debug resource synchronization - -```bash -kubectl describe -``` - -## Removal - -### Using the up CLI - -```bash -up controlplane api-connector uninstall \ - --consumer-kubeconfig ~/.kube/config \ - --all -``` - -The `--all` flag removes all resources including connections and secrets. -Without the flag, only runtime related resources won't be removed. - -:::note -Uninstall doesn't remove any API objects in the provider control plane. If you -want to clean up all API objects there, delete all API objects from the consumer -cluster before API connector uninstallation, and wait for the objects to get -deleted. -::: - - -### Using Helm - -```bash -helm uninstall api-connector -n upbound-system -``` - -## Limitations - -- **Preview feature**: Subject to breaking changes. Not yet production grade. -- **CRD updates**: CRDs are pulled once but not automatically updated. If multiple Crossplane clusters offer the same CRD API, API changes must be synchronized out of band, for example using a [Crossplane Configuration](https://docs.crossplane.io/latest/packages/). -- **Network requirements**: Consumer cluster must have direct network access to provider cluster. -- **Wide permissions needed in consumer cluster**: Because the API connector doesn't know up front the names of the APIs it needs to reconcile, it currently runs with full "root" privileges in the consumer cluster. - -- **Connector polling**: API Connector checks for drift between the consumer and provider cluster - periodically through polling. The poll interval can be changed with the `pollInterval` Helm value. - - -## Advanced configuration - -### Multiple connections - -You can connect to multiple provider clusters simultaneously by creating multiple `ClusterConnection` resources with different names and configurations. - -[contact]: https://www.upbound.io/contact-us diff --git a/spaces_versioned_docs/version-v1.9/howtos/auto-upgrade.md b/spaces_versioned_docs/version-v1.9/howtos/auto-upgrade.md deleted file mode 100644 index 249056fb4..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/auto-upgrade.md +++ /dev/null @@ -1,131 +0,0 @@ ---- -title: Automatically upgrade control planes -sidebar_position: 50 -description: How to configure automatic upgrades of Crossplane in a control plane -plan: "standard" ---- - - - -Upbound Spaces can automatically upgrade the version of Upbound Crossplane in managed control plane instances. You can edit the `spec.crossplane.autoUpgrade` field in your `ControlPlane` specification with the available release channels below. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). The `spec.crossplane.autoUpgrade` field has been available since v1.9. - -For ControlPlane API specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: - -| Channel | Description | Example | -|------------|-----------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------| -| **None** | Disables auto upgrades. | _Uses version specified in `spec.crossplane.version`._ | -| **Patch** | Upgrades to the latest supported patch release. | _Control plane version 1.12.2-up.2 auto upgrades to 1.12.3-up.1 upon release._ | -| **Stable** | Default setting. Upgrades to the latest supported patch release on minor version _N-1_ where N is the latest supported minor version. | _If latest supported minor version is 1.14, auto upgrades to latest patch - 1.13.2-up.3_ | -| **Rapid** | Upgrades to the latest supported patch release on the latest supported minor version. | _If the latest supported minor version is 1.14, auto upgrades to the latest patch of minor version. 1.14 upgrades to 1.14.5-up.1_ | - - -:::warning - -The `Rapid` channel is only recommended for users willing to accept the risk of new features and potentially breaking changes. - -::: - -## Examples - -The specs below are examples of how to edit the `autoUpgrade` channel in your `ControlPlane` specification. - -To run a control plane with the `Rapid` auto upgrade channel, your spec should look like this: - -```yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - name: example-ctp -spec: - crossplane: - autoUpgrade: - channel: Rapid - writeConnectionSecretToRef: - name: kubeconfig-example-ctp -``` - -To run a control plane with a pinned version of Crossplane, specify in the `version` field: - -```yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - name: example-ctp -spec: - crossplane: - version: 1.14.3-up.1 - autoUpgrade: - channel: None - writeConnectionSecretToRef: - name: kubeconfig-example-ctp -``` - -## Supported Crossplane versions - -Spaces supports the three [preceding minor versions][preceding-minor-versions] from the last supported minor version. example, if the last supported minor version is `1.14`, minor versions `1.13` and `1.12` are also supported. Versions older than the three most recent minor versions aren't supported. Only supported Crossplane versions are valid specifications for new control planes. - -Current Crossplane version support by Spaces version: - -| Spaces Version | Crossplane Version Min | Crossplane Version Max | -|:--------------:|:----------------------:|:----------------------:| -| 1.2 | 1.13 | 1.15 | -| 1.3 | 1.13 | 1.15 | -| 1.4 | 1.14 | 1.16 | -| 1.5 | 1.14 | 1.16 | -| 1.6 | 1.14 | 1.16 | -| 1.7 | 1.14 | 1.16 | -| 1.8 | 1.15 | 1.17 | -| 1.9 | 1.16 | 1.18 | -| 1.10 | 1.16 | 1.18 | -| 1.11 | 1.16 | 1.18 | -| 1.12 | 1.17 | 1.19 | - - -Upbound offers extended support for all installed Crossplane versions released within a 12 month window since the last Spaces release. Contact your Upbound sales representative for more information on version support. - - -:::warning - -If the auto upgrade channel is `Stable` or `Rapid`, the Crossplane version will always stay within the support window after auto upgrade. If set to `Patch` or `None`, the minor version may be outside the support window. You are responsible for upgrading to a supported version - -::: - -To view the support status of a control plane instance, use `kubectl get ctp`. - -```bash -kubectl get ctp -NAME CROSSPLANE VERSION SUPPORTED READY MESSAGE AGE -example-ctp 1.13.2-up.3 True True 31m - -``` - -Unsupported versions return `SUPPORTED: False`. - -```bash -kubectl get ctp -NAME CROSSPLANE VERSION SUPPORTED READY MESSAGE AGE -example-ctp 1.11.5-up.1 False True 31m - -``` - -For more information, use the `-o yaml` flag to return more information. - -```bash -kubectl get controlplanes.spaces.upbound.io example-ctp -o yaml -status: -conditions: -... -- lastTransitionTime: "2024-01-23T06:36:10Z" - message: Crossplane version 1.11.5-up.1 is outside of the support window. - Oldest supported minor version is 1.12. - reason: UnsupportedCrossplaneVersion - status: "False" - type: Supported -``` - - -[preceding-minor-versions]: /reference/usage/lifecycle/#maintenance-and-updates diff --git a/spaces_versioned_docs/version-v1.9/howtos/automation-and-gitops/_category_.json b/spaces_versioned_docs/version-v1.9/howtos/automation-and-gitops/_category_.json deleted file mode 100644 index b65481af6..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/automation-and-gitops/_category_.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "label": "Automation & GitOps", - "position": 11, - "collapsed": true, - "customProps": { - "plan": "business" - } -} diff --git a/spaces_versioned_docs/version-v1.9/howtos/automation-and-gitops/overview.md b/spaces_versioned_docs/version-v1.9/howtos/automation-and-gitops/overview.md deleted file mode 100644 index 57eeb15fc..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/automation-and-gitops/overview.md +++ /dev/null @@ -1,138 +0,0 @@ ---- -title: Automation and GitOps Overview -sidebar_label: Overview -sidebar_position: 1 -description: Guide to automating control plane deployments with GitOps and Argo CD -plan: "business" ---- - -Automating control plane deployments with GitOps enables declarative, version-controlled infrastructure management. This section covers integrating GitOps workflows with Upbound control planes using Argo CD and related tools. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). GitOps patterns are compatible across all versions of Upbound Spaces. - -For details on deployment models and their differences, see the [Deployment Modes](../../concepts/deployment-modes.md) concept guide. version-specific features, see the . -::: - -## What is GitOps? - -GitOps is an approach for managing infrastructure by: -- **Declaratively describing** desired system state in Git -- **Using controllers** to continuously reconcile actual state with desired state -- **Treating Git as the source of truth** for all configuration and deployments - -Upbound control planes are fully compatible with GitOps patterns and we strongly recommend integrating GitOps in the platforms you build on Upbound. - -## Key Concepts - -### Argo CD -[Argo CD](https://argo-cd.readthedocs.io/) is a popular Kubernetes-native GitOps controller. It continuously monitors Git repositories and automatically applies changes to your infrastructure when commits are detected. - -### Deployment Models - -The way you configure GitOps depends on your deployment model: - -| Aspect | Cloud Spaces | Self-Hosted Spaces | -|--------|--------------|-------------------| -| **Access Method** | Upbound API with tokens | Kubernetes native (secrets/kubeconfig) | -| **Configuration** | Kubeconfig via `up` CLI | Control plane connection secrets | -| **Setup Complexity** | More involved (API integration) | Simpler (native Kubernetes) | -| **Typical Use Case** | Managing Upbound resources | Managing workloads on control planes | - -## Getting Started - -**Choose your path based on your deployment model:** - -###. Cloud Spaces -If you're using Upbound Cloud Spaces (Dedicated or Managed): -1. Start with [GitOps with Upbound Control Planes](../cloud-spaces/gitops-on-upbound.md) -2. Learn how to integrate Argo CD with Cloud Spaces -3. Manage both control plane infrastructure and Upbound resources declaratively - -###. Self-Hosted Spaces -If you're running self-hosted Spaces: -1. Start with [GitOps with ArgoCD in Self-Hosted Spaces](../self-hosted/gitops-with-argocd.md) -2. Learn how to configure control plane connection secrets -3. Manage workloads deployed to your control planes - -## Common Workflows - -### Workflow 1: Managing Control Planes with GitOps -Create and manage control planes themselves declaratively using provider-kubernetes: - -```yaml -apiVersion: kubernetes.crossplane.io/v1alpha2 -kind: Object -metadata: - name: my-controlplane -spec: - forProvider: - manifest: - apiVersion: spaces.upbound.io/v1beta1 - kind: ControlPlane - # ... control plane configuration -``` - -### Workflow 2: Managing Workloads on Control Planes -Deploy applications and resources to control planes using standard Kubernetes GitOps patterns: - -```yaml -apiVersion: v1 -kind: Namespace -metadata: - name: my-app ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: my-app - namespace: my-app -# ... deployment configuration -``` - -### Workflow 3: Managing Upbound Resources -Use provider-upbound to manage Upbound IAM and repository resources: - -- Teams -- Robots and their team memberships -- Repositories and permissions - -## Advanced Topics - -### Argo CD Plugin for Upbound -Learn more in the [ArgoCD Plugin guide](../self-hosted/use-argo.md) for enhanced integration with self-hosted Spaces. - -### Declarative Control Plane Creation -See [Declaratively create control planes](../self-hosted/declarative-ctps.md) for advanced automation patterns. - -### Consuming Control Plane APIs -Understand how to [consume control plane APIs in your app cluster](../mcp-connector-guide.md) with Argo CD. - -## Prerequisites - -Before implementing GitOps with control planes, ensure you have: - -**For Cloud Spaces:** -- Access to Upbound Cloud Spaces -- `up` CLI installed and configured -- API token with appropriate permissions -- Argo CD or similar GitOps controller running -- Familiarity with Kubernetes RBAC - -**For Self-Hosted Spaces:** -- Self-hosted Spaces deployed and running -- Argo CD deployed in your infrastructure -- Kubectl access to the cluster hosting Spaces -- Understanding of control plane architecture - -## Next Steps - -1. **Choose your deployment model** above -2. **Review the relevant getting started guide** -3. **Set up your GitOps controller** (Argo CD) -4. **Deploy your first automated control plane** -5. **Explore advanced topics** as needed - -:::tip -Start with simple deployments to test your GitOps workflow before moving to production. Use [simulations](../simulations.md) to preview changes before applying them. -::: diff --git a/spaces_versioned_docs/version-v1.9/howtos/backup-and-restore.md b/spaces_versioned_docs/version-v1.9/howtos/backup-and-restore.md deleted file mode 100644 index 3b8d026cb..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/backup-and-restore.md +++ /dev/null @@ -1,530 +0,0 @@ ---- -title: Backup and restore -sidebar_position: 13 -description: Configure and manage backups in your Upbound Space. -plan: "enterprise" ---- - - - -Upbound's _Shared Backups_ is a built-in backup and restore feature. Shared Backups lets you configure automatic schedules for taking snapshots of your control planes. You can restore data from these backups by making new control planes. This guide explains how to use Shared Backups for disaster recovery or upgrade scenarios. - -:::info API Version Information & Available Versions -This guide applies to **all supported versions** (v1.9-v1.15+). - -**Select your API version**: -- [Spaces API v1.15 (Latest)](../../../next/reference/apis/spaces-api/) -- [Spaces API v1.14](../../../1.14/reference/apis/spaces-api/) -- [Spaces API v1.13](../../../1.13/reference/apis/spaces-api/) -- [Spaces API v1.12](../../../1.12/reference/apis/spaces-api/) -- [Spaces API v1.11](../../../1.11/reference/apis/spaces-api/) -- [Spaces API v1.10](../../../1.10/reference/apis/spaces-api/) -- [Spaces API v1.9](../../../1.9/reference/apis/spaces-api/) - -For version support policy, see . version compatibility details, see the . -::: - -## Benefits - -The Shared Backups feature provides the following benefits: - -* Automatic backups for control planes without any operational overhead -* Backup schedules for multiple control planes in a group -* Shared Backups are available across all hosting environments of Upbound (Disconnected, Connected or Cloud Spaces) - - -## Configure a Shared Backup Config - - -[SharedBackupConfig][sharedbackupconfig] is a [group-scoped][group-scoped] resource. You should create them in a group containing one or more control planes. This resource configures the storage details and provider. Whenever a backup executes (either by schedule or manually initiated), it references a SharedBackupConfig to tell it where store the snapshot. - - -### Backup config provider - - -The `spec.objectStorage.provider` and `spec.objectStorage.config` fields configures: - -* The object storage provider -* The path to the provider -* The credentials needed to communicate with the provider - -You can only set one provider. Upbound currently supports AWS, Azure, and GCP as providers. - - -`spec.objectStorage.config` is a freeform map of configuration options for the object storage provider. See [Thanos object storage][thanos-object-storage] for more information on the formats for each supported cloud provider. `spec.bucket` and `spec.provider` overrides the required values in the config. - - - -#### AWS as a storage provider - -:::important -For Cloud Spaces, static credentials are currently the only supported auth method. -::: - -This example demonstrates how to use AWS as a storage provider for your backups: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupConfig -metadata: - name: default - namespace: default -spec: - objectStorage: - provider: AWS - bucket: spaces-backup-bucket - config: - endpoint: s3.eu-west-2.amazonaws.com - region: eu-west-2 - credentials: - source: Secret - secretRef: - name: bucket-creds - key: creds -``` - - -This example assumes you've already created an S3 bucket called "spaces-backup-bucket" in AWS `eu-west-2` region. The account credentials to access the bucket should exist in a secret of the same namespace as the Shared Backup Config. - -#### Azure as a storage provider - -:::important -For Cloud Spaces, static credentials are currently the only supported auth method. -::: - -This example demonstrates how to use Azure as a storage provider for your backups: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupConfig -metadata: - name: default - namespace: default -spec: - objectStorage: - provider: Azure - bucket: upbound-backups - config: - storage_account: upbackupstore - container: upbound-backups - endpoint: blob.core.windows.net - credentials: - source: Secret - secretRef: - name: bucket-creds - key: creds -``` - - -This example assumes you've already created an Azure storage account called `upbackupstore` and blob `upbound-backups`. The storage account key to access the blob should exist in a secret of the same namespace as the Shared Backup Config. - - -#### GCP as a storage provider - -:::important -For Cloud Spaces, static credentials are currently the only supported auth method. -::: - -This example demonstrates how to use Google Cloud Storage as a storage provider for your backups: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupConfig -metadata: - name: default - namespace: default -spec: - objectStorage: - provider: GCP - bucket: spaces-backup-bucket - credentials: - source: Secret - secretRef: - name: bucket-creds - key: creds -``` - - -This example assumes you've already created a Cloud bucket called "spaces-backup-bucket" and a service account with access to this bucket. The key file should exist in a secret of the same namespace as the Shared Backup Config. - - -## Configure a Shared Backup Schedule - - -[SharedBackupSchedule][sharedbackupschedule] is a [group-scoped][group-scoped-1] resource. You should create them in a group containing one or more control planes. This resource defines a backup schedule for control planes within its corresponding group. - -Below is an example of a Shared Backup Schedule that takes backups every day of all control planes having `environment: production` labels: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: daily-schedule - namespace: default -spec: - schedule: "@daily" - configRef: - kind: SharedBackupConfig - name: default - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production -``` - -### Define a schedule - -The `spec.schedule` field is a [Cron-formatted][cron-formatted] string. Some common examples are below: - - -| Entry | Description | -| ----------------- | ------------------------------------------------------------------------------------------------- | -| `@hourly` | Run once an hour. | -| `@daily` | Run once a day. | -| `@weekly` | Run once a week. | -| `0 0/4 * * *` | Run every 4 hours. | -| `0/15 * * * 1-5` | Run every fifteenth minute on Monday through Friday. | -| `@every 1h30m10s` | Run every 1 hour, 30 minutes, and 10 seconds. Hour is the largest measurement of time for @every. | - - -### Exclude resources from the backup - -The `spec.excludedResources` field is an array of resource names to exclude from each backup. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: daily-schedule -spec: - excludedResources: - - "xclusters.aws.platformref.upbound.io" - - "xdatabase.aws.platformref.upbound.io" - - "xrolepolicyattachment.iam.aws.crossplane.io" -``` - -:::warning -You must specify resource names in lowercase "resource.group" format (for example, `xclusters.aws.platformref.upbound.io`). Using only the resource kind (for example, `XCluster`) isn't supported. -::: - -### Suspend a schedule - -Use `spec.suspend` field to suspend the schedule. It creates no new backups, but allows running backups to complete. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: daily-schedule -spec: - suspend: true -``` - -### Set the time to live - -Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: daily-schedule -spec: - ttl: 168h # Backup is garbage collected after 7 days -``` -:::tip -By default, this setting doesn't delete uploaded files. Review the next section to define -the deletion policy. -::: - -### Define the deletion policy - -Set the `spec.deletionPolicy` to define backup deletion actions, including the -deletion of the backup file from the bucket. The Deletion Policy value defaults -to `Orphan`. Set it to `Delete` to remove uploaded files in the bucket. more -information on the backup and restore process, review the [Spaces API -documentation][spaces-api-documentation]. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: daily-schedule -spec: - ttl: 168h # Backup is garbage collected after 7 days - deletionPolicy: Delete # Defaults to Orphan -``` - -### Garbage collect backups when the schedule gets deleted - -Set the `spec.useOwnerReferencesInBackup` to garbage collect associated backups when a shared schedule gets deleted. If set to true, backups are garbage collected when the schedule gets deleted. - -### Control plane selection - -To configure which control planes in a group you want to create a backup schedule for, use the `spec.controlPlaneSelector` field. You can either use `labelSelectors` or the `names` of a control plane directly. A control plane matches if any of the label selectors match. - -This example matches all control planes in the group that have `environment: production` as a label: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: my-backup-schedule -spec: - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production -``` - -You can use the more complex `matchExpressions` to match labels based on an expression. This example matches control planes that have label `environment: production` or `environment: staging`: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: my-backup-schedule -spec: - controlPlaneSelector: - labelSelectors: - - matchExpressions: - - { key: environment, operator: In, values: [production,staging] } -``` - -You can also specify the names of control planes directly: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackupSchedule -metadata: - name: my-backup-schedule -spec: - controlPlaneSelector: - names: - - controlplane-dev - - controlplane-staging - - controlplane-prod -``` - - -## Configure a Shared Backup - - - -[SharedBackup][sharedbackup] is a [group-scoped][group-scoped-2] resource. You should create them in a group containing one or more control planes. This resource causes a backups to occur for control planes within its corresponding group. - -Below is an example of a Shared Backup that takes a backup of all control planes having `environment: production` labels: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackup -metadata: - name: my-backup - namespace: default -spec: - configRef: - kind: SharedBackupConfig - name: default - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production -``` - -### Exclude resources from the backup - -The `spec.excludedResources` field is an array of resource names to exclude from each backup. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackup -metadata: - name: my-backup -spec: - excludedResources: - - "xclusters.aws.platformref.upbound.io" - - "xdatabase.aws.platformref.upbound.io" - - "xrolepolicyattachment.iam.aws.crossplane.io" -``` - -:::warning -You must specify resource names in lowercase "resource.group" format (for example, `xclusters.aws.platformref.upbound.io`). Using only the resource kind (for example, `XCluster`) isn't supported. -::: - -### Set the time to live - -Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackup -metadata: - name: my-backup -spec: - ttl: 168h # Backup is garbage collected after 7 days -``` - - -### Garbage collect backups on Shared Backup deletion - - - -Set the `spec.useOwnerReferencesInBackup` to define whether to garbage collect associated backups when a shared backup gets deleted. If set to true, backups are garbage collected when the shared backup gets deleted. - -### Control plane selection - -To configure which control planes in a group you want to create a backup for, use the `spec.controlPlaneSelector` field. You can either use `labelSelectors` or the `names` of a control plane directly. A control plane matches if any of the label selectors match. - -This example matches all control planes in the group that have `environment: production` as a label: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackup -metadata: - name: my-backup -spec: - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production -``` - -You can use the more complex `matchExpressions` to match labels based on an expression. This example matches control planes that have label `environment: production` or `environment: staging`: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackup -metadata: - name: my-backup -spec: - controlPlaneSelector: - labelSelectors: - - matchExpressions: - - { key: environment, operator: In, values: [production,staging] } -``` - -You can also specify the names of control planes directly: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedBackup -metadata: - name: my-backup -spec: - controlPlaneSelector: - names: - - controlplane-dev - - controlplane-staging - - controlplane-prod -``` - -## Create a manual backup - -[Backup][backup] is a [group-scoped][group-scoped-3] resource that causes a single backup to occur for a control planes in its corresponding group. - -Below is an example of a manual Backup of a control plane: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: Backup -metadata: - name: my-backup - namespace: default -spec: - configRef: - kind: SharedBackupConfig - name: default - controlPlane: my-awesome-ctp - deletionPolicy: Delete -``` - -The backup specification `DeletionPolicy` defines backup deletion actions, -including the deletion of the backup file from the bucket. The `Deletion Policy` -value defaults to `Orphan`. Set it to `Delete` to remove uploaded files -in the bucket. -For more information on the backup and restore process, review the [Spaces API documentation][spaces-api-documentation-1]. - - -### Choose a control plane to backup - -The `spec.controlPlane` field defines which control plane to execute a backup against. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: Backup -metadata: - name: my-backup - namespace: default -spec: - controlPlane: my-awesome-ctp -``` - -If the control plane doesn't exist, the backup fails after multiple failed retry attempts. - -### Exclude resources from the backup - -The `spec.excludedResources` field is an array of resource names to exclude from the manual backup. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: Backup -metadata: - name: my-backup -spec: - excludedResources: - - "xclusters.aws.platformref.upbound.io" - - "xdatabase.aws.platformref.upbound.io" - - "xrolepolicyattachment.iam.aws.crossplane.io" -``` - -:::warning -You must specify resource names in lowercase "resource.group" format (for example, `xclusters.aws.platformref.upbound.io`). Using only the resource kind (for example, `XCluster`) isn't supported. -::: - -### Set the time to live - -Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: Backup -metadata: - name: my-backup -spec: - ttl: 168h # Backup is garbage collected after 7 days -``` - -## Restore a control plane from a backup - -You can restore a control plane's state from a backup. Below is an example of creating a new control plane from a previous backup called `restore-me`: - - -```yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - name: my-awesome-restored-ctp - namespace: default -spec: - restore: - source: - kind: Backup - name: restore-me -``` - - -[group-scoped]: /spaces/concepts/groups -[group-scoped-1]: /spaces/concepts/groups -[group-scoped-2]: /spaces/concepts/groups -[group-scoped-3]: /spaces/concepts/groups -[sharedbackupconfig]: /reference/apis/spaces-api/latest -[thanos-object-storage]: https://thanos.io/tip/thanos/storage.md/ -[sharedbackupschedule]: /reference/apis/spaces-api/latest -[cron-formatted]: https://en.wikipedia.org/wiki/Cron -[spaces-api-documentation]: /reference/apis/spaces-api/v1_9 -[sharedbackup]: /reference/apis/spaces-api/latest -[backup]: /reference/apis/spaces-api/latest -[spaces-api-documentation-1]: /reference/apis/spaces-api/v1_9 - - - diff --git a/spaces_versioned_docs/version-v1.9/howtos/cloud-spaces/_category_.json b/spaces_versioned_docs/version-v1.9/howtos/cloud-spaces/_category_.json deleted file mode 100644 index 1e1869a38..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/cloud-spaces/_category_.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "label": "Cloud Spaces", - "position": 1, - "collapsed": true, - "customProps": { - "plan": "standard" - } -} - - diff --git a/spaces_versioned_docs/version-v1.9/howtos/cloud-spaces/dedicated-spaces-deployment.md b/spaces_versioned_docs/version-v1.9/howtos/cloud-spaces/dedicated-spaces-deployment.md deleted file mode 100644 index ebad9493e..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/cloud-spaces/dedicated-spaces-deployment.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: Dedicated Spaces -sidebar_position: 4 -description: A guide to Upbound Dedicated Spaces -plan: business ---- - - -## Benefits - -Dedicated Spaces offer the following benefits: - -- **Single-tenancy** A control plane space where Upbound guarantees you're the only tenant operating in the environment. -- **Connectivity to your private network** Establish secure network connections between your Dedicated Cloud Space running in Upbound and your own resources behind your private network. -- **Reduced Overhead.** Offload day-to-day operational burdens to Upbound while focusing on your job of building your platform. - -## Architecture - -A Dedicated Space is a deployment of the Upbound Spaces software inside an -Upbound-controlled cloud account and network. The control planes you run. - -The diagram below illustrates the high-level architecture of Upbound Dedicated Spaces: - -![Upbound Managed Spaces arch](/img/managed-arch-gcp.png) - -## How to get access to Dedicated Spaces - -If you have an interest in Upbound Dedicated Spaces, contact -[Upbound][contact-us]. We can chat more about your -requirements and see if Dedicated Spaces are a good fit for you. - -[contact-us]: https://www.upbound.io/contact-us -[managed-space]: /spaces/howtos/self-hosted/managed-spaces-deployment diff --git a/spaces_versioned_docs/version-v1.9/howtos/cloud-spaces/gitops-on-upbound.md b/spaces_versioned_docs/version-v1.9/howtos/cloud-spaces/gitops-on-upbound.md deleted file mode 100644 index fa59a8dce..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/cloud-spaces/gitops-on-upbound.md +++ /dev/null @@ -1,318 +0,0 @@ ---- -title: GitOps with Upbound Control Planes -sidebar_position: 80 -description: An introduction to doing GitOps with control planes on Upbound Cloud Spaces -tier: "business" ---- - -:::info Deployment Model -This guide applies to **Upbound Cloud Spaces** (Dedicated and Managed Spaces). For self-hosted Spaces deployments, see [GitOps with ArgoCD in Self-Hosted Spaces](/spaces/howtos/self-hosted/gitops-with-argocd/). -::: - -GitOps is an approach for managing a system by declaratively describing desired resources' configurations in Git and using controllers to realize the desired state. Upbound's control planes are compatible with this pattern and it's strongly recommended you integrate GitOps in the platforms you build on Upbound. - - -## Integrate with Argo CD - - -[Argo CD][argo-cd] is a project in the Kubernetes ecosystem commonly used for GitOps. You can use it in tandem with Upbound control planes to achieve GitOps flows. The sections below explain how to integrate these tools with Upbound. - -### Generate a kubeconfig for your control plane - -Use the up CLI to [generate a kubeconfig][generate-a-kubeconfig] for your control plane. - -```bash -up ctx /// -f - > context.yaml -``` - -### Create an API token - - -You need a personal access token (PAT). You create PATs on a per-user basis in the Upbound Console. Go to [My Account - API tokens][my-account-api-tokens] and select Create New Token. Give the token a name and save the secret value to somewhere safe. - - -### Add the up CLI init container to Argo - -Create a new file called `up-plugin-values.yaml` and paste the following YAML: - -```yaml -controller: - volumes: - - name: up-plugin - emptyDir: {} - - name: up-home - emptyDir: {} - - volumeMounts: - - name: up-plugin - mountPath: /usr/local/bin/up - subPath: up - - name: up-home - mountPath: /home/argocd/.up - - initContainers: - - name: up-plugin - image: xpkg.upbound.io/upbound/up-cli:v0.39.0 - command: ["cp"] - args: - - /usr/local/bin/up - - /plugin/up - volumeMounts: - - name: up-plugin - mountPath: /plugin - -server: - volumes: - - name: up-plugin - emptyDir: {} - - name: up-home - emptyDir: {} - - volumeMounts: - - name: up-plugin - mountPath: /usr/local/bin/up - subPath: up - - name: up-home - mountPath: /home/argocd/.up - - initContainers: - - name: up-plugin - image: xpkg.upbound.io/upbound/up-cli:v0.39.0 - command: ["cp"] - args: - - /usr/local/bin/up - - /plugin/up - volumeMounts: - - name: up-plugin - mountPath: /plugin -``` - -### Install or upgrade Argo using the values file - -Install or upgrade Argo via Helm, including the values from the `up-plugin-values.yaml` file: - -```bash -helm upgrade --install -n argocd -f up-plugin-values.yaml --reuse-values argocd argo/argo-cd -``` - - -### Configure Argo CD - - -To configure Argo CD for Annotation resource tracking, edit the Argo CD ConfigMap in the Argo CD namespace. -Add `application.resourceTrackingMethod: annotation` to the data section as below. -This configuration turns off Argo CD auto pruning, preventing the deletion of Crossplane resources. - -Next, configure the [auto respect RBAC for the Argo CD controller][auto-respect-rbac-for-the-argo-cd-controller]. -By default, Argo CD attempts to discover some Kubernetes resource types that don't exist in a control plane. -You must configure Argo CD to respect the cluster's RBAC rules so that Argo CD can sync. -Add `resource.respectRBAC: normal` to the data section as below. - -```bash -apiVersion: v1 -kind: ConfigMap -metadata: - name: argocd-cm -data: - ... - application.resourceTrackingMethod: annotation - resource.respectRBAC: normal -``` - -:::tip -The `resource.respectRBAC` configuration above tells Argo to respect RBAC for _all_ cluster contexts. If you're using an Argo CD instance to manage more than only control planes, you should consider changing the `clusters` string match for the configuration to apply only to control planes. For example, if every control plane context name followed the convention of being named `controlplane-`, you could set the string match to be `controlplane-*` -::: - - -### Create a cluster context definition - - -Replace the variables and run the following script to configure a new Argo cluster context definition. - -To configure Argo for a control plane in a Connected Space, replace `stringData.server` with the ingress URL of the control plane. This URL is what's outputted when using `up ctx`. - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: my-control-plane - namespace: argocd - labels: - argocd.argoproj.io/secret-type: cluster -type: Opaque -stringData: - name: my-control-plane-context - server: https://.spaces.upbound.io/apis/spaces.upbound.io/v1beta1/namespaces//controlplanes//k8s - config: | - { - "execProviderConfig": { - "apiVersion": "client.authentication.k8s.io/v1", - "command": "up", - "args": [ "org", "token" ], - "env": { - "ORGANIZATION": "", - "UP_TOKEN": "" - } - }, - "tlsClientConfig": { - "insecure": false, - "caData": "" - } - } -``` - - -## GitOps for Upbound resources - - -Like any other cloud service, you can drive the lifecycle of Upbound Cloud resources with Crossplane. This lets you establish GitOps flows to declaratively create and manage: - -- [control plane groups][control-plane-groups] -- [control planes][control-planes] -- [Upbound IAM resources][upbound-iam-resources] - -Use a control plane installed with [provider-upbound][provider-upbound] and [provider-kubernetes][provider-kubernetes] to achieve this. - -### Provider-upbound - -[Provider-upbound][provider-upbound-2] is a Crossplane provider built by Upbound to interact with Upbound resources. use _provider-upbound_ to declaratively create and manage the lifecycle of IAM resources and repositories: - -- [Robots][robots] and their membership to teams -- [Teams][teams] -- [Repositories][repositories] and [permissions][permissions] on those repositories. - -:::tip -This provider defines managed resources for control planes, their auth, and permissions. These resources only applicable for customers who run in Upbound's **Legacy Spaces** control plane hosting environments. Customers should use provider-kubernetes explained below to manage the lifecycle of control planes with Crossplane. -::: - -### Provider-kubernetes - -[Provider-kubernetes][provider-kubernetes-3] is a Crossplane provider that defines an [Object][object] resource. Use _Objects_ as general-purpose resources to wrap _any_ Kubernetes resource for Crossplane to manage. - -Upbound [Space APIs][space-apis] are Kube-like APIs and have implemented support for most Kubernetes-style API concepts. You can use kubectl or any other Kubernetes-compatible tooling to interact with the API. This means you can use _provider-kubernetes_ to drive interactions with Space APIs. - -:::warning -When interacting with a Cloud Space's API, the Kubernetes [watch][watch] feature **isn't implemented.** Argo CD requires _watch_ support to function as expected, meaning you can't point Argo directly at a Cloud Space until it's implemented. -::: - -Use _provider-kubernetes_ to declaratively drive interactions with all [Space APIs][space-apis-1]. Wrap the desired API resource in an _Object_. See the example below for a control plane: - -```yaml -apiVersion: kubernetes.crossplane.io/v1alpha2 -kind: Object -metadata: - name: my-controlplane -spec: - forProvider: - manifest: - apiVersion: spaces.upbound.io/v1beta1 - kind: ControlPlane - metadata: - name: my-controlplane - namespace: default - spec: - crossplane: - autoUpgrade: - channel: Rapid -``` - -[Control plane groups][control-plane-groups-2] are a special case because they technically map to an underlying Kubernetes namespace. You should create a `kind: namespace` with the `spaces.upbound.io/group` label to create a control plane group in a Space. See the example below: - -```yaml -apiVersion: kubernetes.crossplane.io/v1alpha2 -kind: Object -metadata: - name: group1 -spec: - forProvider: - manifest: - apiVersion: v1 - kind: Namespace - metadata: - name: group1 - labels: - spaces.upbound.io/group: "true" - spec: {} -``` - -### Configure auth for provider-kubernetes - -Like any other Crossplane provider, _provider-kubernetes_ requires a valid [ProviderConfig][providerconfig] to authenticate with Upbound before interacting with its APIs. Follow the steps below to configure auth for a ProviderConfig on a control plane that you want to use to interact with Upbound resources. - -1. Define an environment variable for the name of your Upbound org account. Use `up org list` to retrieve this value. -```ini -export UPBOUND_ACCOUNT="" -``` - -2. Create a [personal access token][personal-access-token] and store it as an environment variable. -```shell -export UPBOUND_TOKEN="" -``` - -3. Log on to Upbound. -```shell -up login -``` - -4. Create a kubeconfig for the desired Cloud Space instance you want to interact with. -```shell -export CONTROLPLANE_CONFIG=/tmp/controlplane-kubeconfig -KUBECONFIG=$CONTROLPLANE_CONFIG up ctx $UPBOUND_ACCOUNT/upbound-gcp-us-west-1 # Replace this path with whichever Cloud Space you want to communicate with. -``` - -5. On the control plane you want to use to interact with Upbound resources, create a secret containing the credentials: -```shell -kubectl -n crossplane-system create secret generic cluster-config --from-file=kubeconfig=$CONTROLPLANE_CONFIG -kubectl -n crossplane-system create secret generic upbound-credentials --from-literal=token=$UPBOUND_TOKEN -``` - -6. Create a ProviderConfig that references the credentials created in the prior step. Create this resource in your control plane: -```yaml -apiVersion: kubernetes.crossplane.io/v1alpha1 -kind: ProviderConfig -metadata: - name: default -spec: - credentials: - source: Secret - secretRef: - namespace: crossplane-system - name: cluster-config - key: kubeconfig - identity: - type: UpboundTokens - source: Secret - secretRef: - name: upbound-credentials - namespace: crossplane-system - key: token -``` - -You can now create _Objects_ in the control plane which wrap Space APIs. - -[generate-a-kubeconfig]: /manuals/cli/concepts/contexts -[control-plane-groups]: /spaces/concepts/groups -[control-planes]: /spaces/concepts/control-planes -[upbound-iam-resources]: /manuals/platform/concepts/identity-management -[space-apis]: /reference/apis/spaces-api/v1_9 -[space-apis-1]: /reference/apis/spaces-api/v1_9 -[control-plane-groups-2]: /spaces/concepts/groups - - -[argo-cd]: https://argo-cd.readthedocs.io/en/stable/ -[my-account-api-tokens]: https://accounts.upbound.io/settings/tokens -[auto-respect-rbac-for-the-argo-cd-controller]: https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#auto-respect-rbac-for-controller -[spec-writeconnectionsecrettoref]: /reference/apis/spaces-api/latest -[auto-respect-rbac-for-the-argo-cd-controller-1]: https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#auto-respect-rbac-for-controller -[provider-upbound]: https://marketplace.upbound.io/providers/upbound/provider-upbound -[provider-kubernetes]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes -[provider-upbound-2]: https://marketplace.upbound.io/providers/upbound/provider-upbound -[robots]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/iam.upbound.io/Robot/v1alpha1 -[teams]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/iam.upbound.io/Team/v1alpha1 -[repositories]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/repository.upbound.io/Repository/v1alpha1 -[permissions]: https://marketplace.upbound.io/providers/upbound/provider-upbound/v0.8.0/resources/repository.upbound.io/Permission/v1alpha1 -[provider-kubernetes-3]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes -[object]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes/v0.17.0/resources/kubernetes.crossplane.io/Object/v1alpha2 -[watch]: https://kubernetes.io/docs/reference/using-api/api-concepts/#watch-bookmarks -[providerconfig]: https://marketplace.upbound.io/providers/upbound/provider-kubernetes/v0.17.0/resources/kubernetes.crossplane.io/ProviderConfig/v1alpha1 -[personal-access-token]: https://accounts.upbound.io/settings/tokens diff --git a/spaces_versioned_docs/version-v1.9/howtos/control-plane-topologies.md b/spaces_versioned_docs/version-v1.9/howtos/control-plane-topologies.md deleted file mode 100644 index 9020e5a41..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/control-plane-topologies.md +++ /dev/null @@ -1,566 +0,0 @@ ---- -title: Control Plane Topologies -sidebar_position: 15 -description: Configure scheduling of composites to remote control planes ---- - -:::info API Version Information -This guide is for the Control Plane Topology feature, which is in **private preview**. interested customers with access to this feature, it applies to v1.12+. - -For related API specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). details on feature availability, see the . -::: - -:::important -This feature is in private preview for select customers in Upbound Spaces. If you're interested in this deployment mode, please [contact us](https://www.upbound.io/support/contact). -::: - -Upbound's _Control Plane Topology_ feature lets you build and deploy a platform -of multiple control planes. These control planes work together for a unified platform -experience. - - -With the _Topology_ feature, you can install resource APIs that are -reconciled by other control planes and configure the routing that occurs between -control planes. You can also build compositions that reference other resources -running on your control plane or elsewhere in Upbound. - -This guide explains how to use Control Plane Topology APIs to install, configure -remote APIs, and build powerful compositions that reference other resources. - -## Benefits - -The Control Plane Topology feature provides the following benefits: - -* Decouple your platform architecture into independent offerings to improve your platform's software development lifecycle. -* Install composite APIs from Configurations as CRDs which are fulfilled and reconciled by other control planes. -* Route APIs to other control planes by configuring an _Environment_ resource, which define a set of routable dimensions. - -## How it works - - -Imagine the scenario where you want to let a user reference a subnet when creating a database instance. To your control plane, the `kind: database` and `kind: subnet` are independent resources. To you as the composition author, these resources have an important relationship. It may be that: - -- you don't want your user to ever be able to create a database without specifying a subnet. -- you want to let them create a subnet when they create the database, if it doesn't exist. -- you want to allow them to reuse a subnet that got created elsewhere or gets shared by another user. - -In each of these scenarios, you must resort to writing complex composition logic -to handle each case. The problem is compounded when the resource exists in a -context separate from the current control plane's context. Imagine a scenario -where one control plane manages Database resources and a second control plane -manages networking resources. With the _Topology_ feature, you can offload these -concerns to Upbound machinery. - - -![Control Plane Topology feature arch](/img/topology-arch.png) - -## Prerequisites - -Enable the Control Plane Topology feature in the Space you plan to run your control plane in: - -- Cloud Spaces: Not available yet -- Connected Spaces: Space administrator must enable this feature -- Disconnected Spaces: Space administrator must enable this feature - - - -## Compose resources with _ReferencedObjects_ - - - -_ReferencedObject_ is a resource type available in an Upbound control plane that lets you reference other Kubernetes resources in Upbound. - -:::tip -This feature is useful for composing resources that exist in a -remote context, like another control plane. You can also use -_ReferencedObjects_ to resolve references to any other Kubernetes object -in the current control plane context. This could be a secret, another Crossplane -resource, or more. -::: - -### Declare the resource reference in your XRD - -To compose a _ReferencedObject_, you should start by adding a resource reference -in your Composite Resource Definition (XRD). The convention for the resource -reference follows the shape shown below: - -```yaml -Ref: - type: object - properties: - apiVersion: - type: string - default: "" - enum: [ "" ] - kind: - type: string - default: "" - enum: [ "" ] - grants: - type: array - default: [ "Observe" ] - items: - type: string - enum: [ "Observe", "Create", "Update", "Delete", "*" ] - name: - type: string - namespace: - type: string - required: - - name -``` - -The `Ref` should be the kind of resource you want to reference. The `apiVersion` and `kind` should be the associated API version and kind of the resource you want to reference. - -The `name` and `namespace` strings are inputs that let your users specify the resource instance. - -#### Grants - -The `grants` field is a special array that lets you give users the power to influence the behavior of the referenced resource. You can configure which of the available grants you let your user select and which it defaults to. Similar in behavior as [Crossplane management policies][crossplane-management-policies], each grant value does the following: - -- **Observe:** The composite may observe the state of the referenced resource. -- **Create:** The composite may create the referenced resource if it doesn't exist. -- **Update:** The composite may update the referenced resource. -- **Delete:** The composite may delete the referenced resource. -- **\*:** The composite has full control over the referenced resource. - -Here are some examples that show how it looks in practice: - -
- -Show example for defining the reference to another composite resource - -```yaml -apiVersion: apiextensions.crossplane.io/v1 -kind: CompositeResourceDefinition -metadata: - name: xsqlinstances.database.platform.upbound.io -spec: - type: object - properties: - parameters: - type: object - properties: - networkRef: - type: object - properties: - apiVersion: - type: string - default: "networking.platform.upbound.io" - enum: [ "networking.platform.upbound.io" ] - grants: - type: array - default: [ "Observe" ] - items: - type: string - enum: [ "Observe" ] - kind: - type: string - default: "Network" - enum: [ "Network" ] - name: - type: string - namespace: - type: string - required: - - name -``` - -
- - -
-Show example for defining the reference to a secret -```yaml -apiVersion: apiextensions.crossplane.io/v1 -kind: CompositeResourceDefinition -metadata: - name: xsqlinstances.database.platform.upbound.io -spec: - type: object - properties: - parameters: - type: object - properties: - secretRef: - type: object - properties: - apiVersion: - type: string - default: "v1" - enum: [ "v1" ] - grants: - type: array - default: [ "Observe" ] - items: - type: string - enum: [ "Observe", "Create", "Update", "Delete", "*" ] - kind: - type: string - default: "Secret" - enum: [ "Secret" ] - name: - type: string - namespace: - type: string - required: - - name -``` -
- -### Manually add the jsonPath - -:::important -This step is a known limitation of the preview. We're working on tooling that -removes the need for authors to do this step. -::: - -During the preview timeframe of this feature, you must add an annotation by hand -to the XRD. In your XRD's `metadata.annotations`, set the -`references.upbound.io/schema` annotation. It should be a JSON string in the -following format: - -```json -{ - "apiVersion": "references.upbound.io/v1alpha1", - "kind": "ReferenceSchema", - "references": [ - { - "jsonPath": ".spec.parameters.secretRef", - "kinds": [ - { - "apiVersion": "v1", - "kind": "Secret" - } - ] - } - ] -} -``` - -Flatten this JSON into a string and set the annotation on your XRD. View the -example below for an illustration: - -
-Show example setting the references.upbound.io/schema annotation -```yaml -apiVersion: apiextensions.crossplane.io/v1 -kind: CompositeResourceDefinition -metadata: - name: xthings.networking.acme.com - annotations: - references.upbound.io/schema: '{"apiVersion":"references.upbound.io/v1alpha1","kind":"ReferenceSchema","references":[{"jsonPath":".spec.secretRef","kinds":[{"apiVersion":"v1","kind":"Secret"}]},{"jsonPath":".spec.configMapRef","kinds":[{"apiVersion":"v1","kind":"ConfigMap"}]}]}' -``` -
- -
-Show example for setting multiples references in the references.upbound.io/schema annotation -```yaml -apiVersion: apiextensions.crossplane.io/v1 -kind: CompositeResourceDefinition -metadata: - name: xthings.networking.acme.com - annotations: - references.upbound.io/schema: '{"apiVersion":"references.upbound.io/v1alpha1","kind":"ReferenceSchema","references":[{"jsonPath":".spec.parameters.secretRef","kinds":[{"apiVersion":"v1","kind":"Secret"}]},{"jsonPath":".spec.parameters.configMapRef","kinds":[{"apiVersion":"v1","kind":"ConfigMap"}]}]}' -``` -
- - -You can use a VSCode extension like [vscode-pretty-json][vscode-pretty-json] to make this task easier. - - -### Compose a _ReferencedObject_ - -To pair with the resource reference declared in your XRD, you must compose the referenced resource. Use the _ReferencedObject_ resource type to bring the resource into your composition. _ReferencedObject_ has the following schema: - -```yaml -apiVersion: references.upbound.io/v1alpha1 -kind: ReferencedObject -spec: - managementPolicies: - - Observe - deletionPolicy: Orphan - composite: - apiVersion: - kind: - name: - jsonPath: .spec.parameters.secretRef -``` - -The `spec.composite.apiVersion` and `spec.composite.kind` should match the API version and kind of the `compositeTypeRef` declared in your composition. The `spec.composite.name` should be the name of the composite resource instance. - -The `spec.composite.jsonPath` should be the path to the root of the resource ref you declared in your XRD. - -
-Show example for composing a resource reference to a secret - -```yaml -apiVersion: apiextensions.crossplane.io/v1 -kind: Composition -metadata: - name: demo-composition -spec: - compositeTypeRef: - apiVersion: networking.acme.com/v1alpha1 - kind: XThing - mode: Pipeline - pipeline: - - step: patch-and-transform - functionRef: - name: crossplane-contrib-function-patch-and-transform - input: - apiVersion: pt.fn.crossplane.io/v1beta1 - kind: Resources - resources: - - name: secret-ref-object - base: - apiVersion: references.upbound.io/v1alpha1 - kind: ReferencedObject - spec: - managementPolicies: - - Observe - deletionPolicy: Orphan - composite: - apiVersion: networking.acme.com/v1alpha1 - kind: XThing - name: TO_BE_PATCHED - jsonPath: .spec.parameters.secretRef - patches: - - type: FromCompositeFieldPath - fromFieldPath: metadata.name - toFieldPath: spec.composite.name -``` -
- -By declaring a resource reference in your XRD, Upbound handles resolution of the desired resource. - -## Deploy APIs - -To configure routing resource requests between control planes, you need to deploy APIs in at least two control planes. - -### Deploy into a service-level control plane - -Package the APIs you build into a Configuration package an deploy it on a -control plane in an Upbound Space. In Upbound, it's common to refer to the -control plane where the Configuration package is deployed as a **service-level -control plane**. This control plane runs the controllers that processes the API -requests and provisions underlying resources. In a later section, you learn how -you can use _Topology_ features to [configure routing][configure-routing]. - -### Deploy as Remote APIs on a platform control plane - -You should use the same package source as deployed in the **service-level -control planes**, but this time deploy the Configuration in a separate control -plane as a _RemoteConfiguration_. The _RemoteConfiguration_ installs Kubernetes -CustomResourceDefinitions for the APIs defined in the Configuration package, but -no controllers get deployed. - -### Install a _RemoteConfiguration_ - -_RemoteConfiguration_ is a resource type available in an Upbound manage control -planes that acts like a sort of Crossplane [Configuration][configuration] -package. Unlike standard Crossplane Configurations, which install XRDs, -compositions, and functions into a desired control plane, _RemoteConfigurations_ -install only the CRDs for claimable composite resource types. - -#### Install directly - -Install a _RemoteConfiguration_ by defining the following and applying it to -your control plane: - -```yaml -apiVersion: pkg.upbound.io/v1alpha1 -kind: RemoteConfiguration -metadata: - name: -spec: - package: -``` - -#### Declare as a project dependency - -You can declare _RemoteConfigurations_ as dependencies in your control plane's -[project file][project-file]. Use the up CLI to add the dependency, providing -the `--remote` flag: - -```tsx live -up dep add --remote -``` - -This command adds a declaration in the `spec.apiDependencies` stanza of your -project's `upbound.yaml` as demonstrated below: - -```yaml -apiVersion: meta.dev.upbound.io/v1alpha1 -kind: Project -metadata: - name: service-controlplane -spec: - apiDependencies: - - configuration: xpkg.upbound.io/upbound/remote-configuration - version: '>=v0.0.0' - dependsOn: - - provider: xpkg.upbound.io/upbound/provider-kubernetes - version: '>=v0.0.0' -``` - -Like a Configuration, a _RemoteConfigurationRevision_ gets created when the -package gets installed on a control plane. Unlike Configurations, XRDs and -compositions **don't** get installed by a _RemoteConfiguration_. Only the CRDs -for claimable composite types get installed and Crossplane thereafter manages -their lifecycle. You can tell when a CRD gets installed by a -_RemoteConfiguration_ because it has the `internal.scheduling.upbound.io/remote: -true` label: - -```yaml -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: things.networking.acme.com - labels: - internal.scheduling.upbound.io/remote: "true" -``` - -## Use an _Environment_ to route resources - -_Environment_ is a resource type available in Upbound control planes that works -in tandem with resources installed by _RemoteConfigurations_. _Environment_ is a -namespace-scoped resource that lets you configure how to route remote resources -to other control planes by a set of user-defined dimensions. - -### Define a routing dimension - -To establish a routing dimensions between two control planes, you must do two -things: - -1. Annotate the service control plane with the name and value of a dimension. -2. Configure an environment on another control plane with a dimension matching the field and value of the service control plane. - -The example below demonstrates the creation of a service control plane with a -`region` dimension: - -```yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - labels: - dimension.scheduling.upbound.io/region: "us-east-1" - name: prod-1 - namespace: default -spec: -``` - -Upbound's Spaces controller keeps an inventory of all declared dimensions and -listens for control planes to route to them. - -### Create an _Environment_ - -Next, create an _Environment_ on a separate control plane, referencing the -dimension from before. The example below demonstrates routing all remote -resource requests in the `default` namespace of the control plane based on a -single `region` dimension: - -```yaml -apiVersion: scheduling.upbound.io/v1alpha1 -kind: Environment -metadata: - name: default - namespace: default -spec: - dimensions: - region: us-east-1 -``` - -You can specify whichever dimensions as you want. The example below demonstrates -multiple dimensions: - -```yaml -apiVersion: scheduling.upbound.io/v1alpha1 -kind: Environment -metadata: - name: default - namespace: default -spec: - dimensions: - region: us-east-1 - env: prod - offering: databases -``` - -In order for the routing controller to match, _all_ dimensions must match for a -given service control plane. - -You can specify dimension overrides on a per-resource group basis. This lets you -configure default routing rules for a given _Environment_ and override routing -on a per-offering basis. - -```yaml -apiVersion: scheduling.upbound.io/v1alpha1 -kind: Environment -metadata: - name: default - namespace: default -spec: - dimensions: - region: us-east-1 - resourceGroups: - - name: database.platform.upbound.io # database - dimensions: - region: "us-east-1" - env: "prod" - offering: "databases" - - name: networking.platform.upbound.io # networks - dimensions: - region: "us-east-1" - env: "prod" - offering: "networks" -``` - -### Confirm the configured route - -After you create an _Environment_ on a control plane, the routes selected get -reported in the _Environment's_ `.status.resourceGroups`. This is illustrated -below: - -```yaml -apiVersion: scheduling.upbound.io/v1alpha1 -kind: Environment -metadata: - name: default -... -status: - resourceGroups: - - name: database.platform.upbound.io # database - proposed: - controlPlane: ctp-1 - group: default - space: upbound-gcp-us-central1 - dimensions: - region: "us-east-1" - env: "prod" - offering: "databases" -``` - -If you don't see a response in the `.status.resourceGroups`, this indicates a -match wasn't found or an error establishing routing occurred. - -:::tip -There's no limit to the number of control planes you can route to. You can also -stack routing and form your own topology of control planes, with multiple layers -of routing. -::: - -### Limitations - - -Routing from one control plane to another is currently scoped to control planes -that exist in a single Space. You can't route resource requests to control -planes that exist on a cross-Space boundary. - - -[project-file]: /manuals/cli/howtos/project -[contact-us]: https://www.upbound.io/usage/support/contact -[crossplane-management-policies]: https://docs.crossplane.io/latest/managed-resources/managed-resources/#managementpolicies -[vscode-pretty-json]: https://marketplace.visualstudio.com/items?itemName=chrismeyers.vscode-pretty-json -[configure-routing]: #use-an-environment-to-route-resources -[configuration]: https://docs.crossplane.io/latest/packages/providers diff --git a/spaces_versioned_docs/version-v1.9/howtos/ctp-connector.md b/spaces_versioned_docs/version-v1.9/howtos/ctp-connector.md deleted file mode 100644 index b2cc48c49..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/ctp-connector.md +++ /dev/null @@ -1,508 +0,0 @@ ---- -title: Control Plane Connector -weight: 80 -description: A guide for how to connect a Kubernetes app cluster to a control plane in Upbound using the Control Plane connector feature -plan: "standard" ---- - - - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). Control Plane Connector functionality is consistent across versions. - -For related API specifications and resources, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: - -Control Plane Connector connects arbitrary Kubernetes application clusters outside the -Upbound Spaces to your control planes running in Upbound Spaces. -This lets you interact with your control plane's API from the app cluster. The claim APIs and the namespaced XR APIs -you define via CompositeResourceDefinitions (XRDs) in the control plane, are available in -your app cluster alongside Kubernetes workload APIs like Pod. Control Plane Connector -enables the same experience as a locally installed Crossplane. - -![control plane connector operations flow](/img/ConnectorFlow.png) - -### Control Plane Connector operations - -Control Plane Connector leverages the [Kubernetes API AggregationLayer][kubernetes-api-aggregationlayer] -to create an extension API server and serve the claim APIs and the namespaced XR APIs in the control plane. It -discovers the claim APIs and the namespaced XR APIs available in the control plane and registers corresponding -APIService resources on the app cluster. Those APIService resources refer to the -extension API server of Control Plane Connector. - -The claim APIs and the namespaced XR APIs are available in your Kubernetes cluster, just like all native -Kubernetes APIs. - -The Control Plane Connector processes every request targeting the claim APIs and the namespaced XR APIs and makes the -relevant requests to the connected control plane. - -Only the connected control plane stores and processes all claims and namespaced XRs created in the app -cluster, eliminating any storage use at the application cluster. The control plane -connector provisions a target namespace at the control plane for the app cluster and stores -all claims and namespaced XRs in this target namespace. - -For managing the claims and namespaced XRs, the Control Plane Connector creates a unique identifier for a -resource by combining input parameters from claims, including: -- `metadata.name` -- `metadata.namespace` -- `your cluster name` - - -It employs SHA-256 hashing to generate a hash value and then extracts the first -16 characters of that hash. This ensures the resulting identifier remains within -the 64-character limit in Kubernetes. - - - -For instance, if a claim named `my-bucket` exists in the test namespace in -`cluster-dev`, the system calculates the SHA-256 hash from -`my-bucket-x-test-x-00000000-0000-0000-0000-000000000000` and takes the first 16 -characters. The control plane side then names the claim `claim-c603e518969b413e`. - -For namespaced XRs, the process is similar, only the prefix is different. -The name becomes `nxr-c603e518969b413e`. - - -### Installation - - - - - -Log in with the up CLI: - -```bash -up login -``` - -Connect your app cluster to a namespace in an Upbound control plane with `up controlplane connector install `. This command creates a user token and installs the Control Plane Connector to your cluster. It's recommended you create a values file called `connector-values.yaml` and provide the following below. Select the tab according to which environment your control plane is running in. - - - - - - -```yaml -upbound: - # This is your org account in Upbound e.g. the name displayed after executing `up org list` - account: - # This is a personal access token generated in the Upbound Console - token: - -spaces: - # If your control plane is running in Upbound's GCP Cloud Space, else use upbound-aws-us-east-1.spaces.upbound.io - host: "upbound-gcp-us-west-1.spaces.upbound.io" - insecureSkipTLSVerify: true - controlPlane: - # The name of the control plane you want the Connector to attach to - name: - # The control plane group the control plane resides in - group: - # The namespace within the control plane to sync claims from the app cluster to. NOTE: This must be created before you install the connector. - claimNamespace: -``` - - - - - -1. Create a [kubeconfig][kubeconfig] for the control plane. Update your Upbound context to the path for your desired control plane. -```ini -up login -up ctx /upbound-gcp-us-central-1/default/your-control-plane -up ctx . -f - > context.yaml -``` - -2. Write it to a secret in the cluster where you plan to -install the Control Plane Connector to. -```ini -kubectl create secret generic my-controlplane-kubeconfig --from-file=context.yaml -``` - -3. Reference this secret in the -`spaces.controlPlane.kubeconfigSecret` field below. - -```yaml -spaces: - controlPlane: - # The namespace within the control plane to sync claims from the app cluster to. NOTE: This must be created before you install the connector. - claimNamespace: - kubeconfigSecret: - name: my-controlplane-kubeconfig - key: kubeconfig -``` - - - - - - -Provide the values file above when you run the CLI command: - - -```bash {copy-lines="3"} -up controlplane connector install my-control-plane my-app-ns-1 --file=connector-values.yaml -``` - -The Claim APIs and the namespaced XR APIs from your control plane are now visible in the cluster. -You can verify this with `kubectl api-resources`. - -```bash -kubectl api-resources -``` - -### Uninstall - -Disconnect an app cluster that you prior installed the Control Plane Connector on by -running the following: - -```bash -up ctp connector uninstall -``` - -This command uninstalls the helm chart for the Control Plane Connector from an app -cluster. It moves any claims in the app cluster into the control plane -at the specified namespace. - -:::tip -Make sure your kubeconfig's current context is pointed at the app cluster where -you want to uninstall Control Plane Connector from. -::: - - - - -It's recommended you create a values file called `connector-values.yaml` and -provide the following below. Select the tab according to which environment your -control plane is running in. - - - - - - -```yaml -upbound: - # This is your org account in Upbound e.g. the name displayed after executing `up org list` - account: - # This is a personal access token generated in the Upbound Console - token: - -spaces: - # Upbound GCP US-West-1 upbound-gcp-us-west-1.spaces.upbound.io - # Upbound AWS US-East-1 upbound-aws-us-east-1.spaces.upbound.io - # Upbound GCP US-Central-1 upbound-gcp-us-central-1.spaces.upbound.io - host: "" - insecureSkipTLSVerify: true - controlPlane: - # The name of the control plane you want the Connector to attach to - name: - # The control plane group the control plane resides in - group: - # The namespace within the control plane to sync claims from the app cluster to. - # NOTE: This must be created before you install the connector. - claimNamespace: -``` - - - - -Create a [kubeconfig][kubeconfig-1] for the -control plane. Write it to a secret in the cluster where you plan to -install the Control Plane Connector to. Reference this secret in the -`spaces.controlPlane.kubeconfigSecret` field below. - -```yaml -spaces: - controlPlane: - # The namespace within the control plane to sync claims from the app cluster to. NOTE: This must be created before you install the connector. - claimNamespace: - kubeconfigSecret: - name: my-controlplane-kubeconfig - key: kubeconfig -``` - - - - - - -Provide the values file above when you `helm install` the Control Plane Connector: - - -```bash -helm install --wait mcp-connector oci://xpkg.upbound.io/spaces-artifacts/mcp-connector -n kube-system -f connector-values.yaml -``` -:::tip -Create an API token from the Upbound user account settings page in the console by following [these instructions][these-instructions]. -::: - -### Uninstall - -You can uninstall Control Plane Connector with Helm by running the following: - -```bash -helm uninstall mcp-connector -``` - - - - - -### Example usage - -This example creates a control plane using [Configuration -EKS][configuration-eks]. `KubernetesCluster` is -available as a claim API in your control plane. The following is [an -example][an-example] -object you can create in your control plane. - -```yaml -apiVersion: k8s.starter.org/v1alpha1 -kind: KubernetesCluster -metadata: - name: my-cluster - namespace: default -spec: - id: my-cluster - parameters: - nodes: - count: 3 - size: small - services: - operators: - prometheus: - version: "34.5.1" - writeConnectionSecretToRef: - name: my-cluster-kubeconfig -``` - -After connecting your Kubernetes app cluster to the control plane, you -can create the `KubernetesCluster` object in your app cluster. Although your -local cluster has an Object, the actual resources is in your managed control -plane inside Upbound. - -```bash {copy-lines="3"} -# Applying the claim YAML above. -# kubectl is set up to talk with your Kubernetes cluster. -kubectl apply -f claim.yaml - - -kubectl get claim -A -NAME SYNCED READY CONNECTION-SECRET AGE -my-cluster True True my-cluster-kubeconfig 2m -``` - -Once Kubernetes creates the object, view the console to see your object. - -![Claim by connector in console](/img/ClaimInConsole.png) - -You can interact with the object through your cluster just as if it -lives in your cluster. - -### Migration to control planes - -This guide details the migration of a Crossplane installation to Upbound-managed -control planes using the Control Plane Connector to manage claims on an application -cluster. - -![migration flow application cluster to control plane](/img/ConnectorMigration.png) - -#### Export all resources - -Before proceeding, ensure that you have set the correct kubecontext for your application -cluster. - -```bash -up controlplane migration export --pause-before-export --output=my-export.tar.gz --yes -``` - -This command performs the following: -- Pauses all claim, composite, and managed resources before export. -- Scans the control plane for resource types. -- Exports Crossplane and native resources. -- Archives the exported state into `my-export.tar.gz`. - -Example output: -```bash -Exporting control plane state... - ✓ Pausing all claim resources before export... 1 resources paused! ⏸️ - ✓ Pausing all composite resources before export... 7 resources paused! ⏸️ - ✓ Pausing all managed resources before export... 34 resources paused! ⏸️ - ✓ Scanning control plane for types to export... 231 types found! 👀 - ✓ Exporting 231 Crossplane resources...125 resources exported! 📤 - ✓ Exporting 3 native resources...19 resources exported! 📤 ✓ Archiving exported state... archived to "my-export.tar.gz"! 📦 - -Successfully exported control plane state! -``` - -#### Import all resources - -The system restores the target control plane with the exported -resources, which serves as the destination for the Control Plane Connector. - - -Log into Upbound and select the correct context: - -```bash -up login -up ctx -up ctp create ctp-a -``` - -Output: -```bash -ctp-a created -``` - -Verify that the Crossplane version on both the application cluster and the new managed -control plane matches the core Crossplane version. - -Use the following command to import the resources: -```bash -up controlplane migration import -i my-export.tar.gz \ - --unpause-after-import \ - --mcp-connector-cluster-id=my-appcluster \ - --mcp-connector-claim-namespace=my-appcluster -``` - -This command: -- Note: `--mcp-connector-cluster-id` needs to be unique per application cluster -- Note: `--mcp-connector-claim-namespace` is the namespace the system creates - during the import -- Restores base resources -- Waits for XRDs and packages to establish -- Imports Claims, XRs resources -- Finalizes the import and resumes managed resources - -Example output: -```bash -Importing control plane state... - ✓ Reading state from the archive... Done! 👀 - ✓ Importing base resources... 56 resources imported!📥 - ✓ Waiting for XRDs... Established! ⏳ - ✓ Waiting for Packages... Installed and Healthy! ⏳ - ✓ Importing remaining resources... 88 resources imported! 📥 - ✓ Finalizing import... Done! 🎉 - ✓ Unpausing managed resources ... Done! ▶️ - -fully imported control plane state! -``` - -Verify Imported Claims - - -The Control Plane Connector renames all claims and adds additional labels to them. - -```bash -kubectl get claim -A -``` - -Example output: -```bash -NAMESPACE NAME SYNCED READY CONNECTION-SECRET AGE -my-appcluster cluster.aws.platformref.upbound.io/claim-e708ff592b974f51 True True platform-ref-aws-kubeconfig 3m17s -``` - -Inspect the labels: -```bash -kubectl get -n my-appcluster cluster.aws.platformref.upbound.io/claim-e708ff592b974f51 -o yaml | yq .metadata.labels -``` - -Example output: -```bash -mcp-connector.upbound.io/app-cluster: my-appcluster -mcp-connector.upbound.io/app-namespace: default -mcp-connector.upbound.io/app-resource-name: example -``` - -#### Cleanup the app cluster - -Remove all Crossplane-related resources from the application cluster, including: - -- Managed Resources -- Claims -- Compositions -- XRDs -- Packages (Functions, Configurations, Providers) -- Crossplane and all associated CRDs - - -#### Install Control Plane Connector - - -Follow the preceding installation guide and configure the `connector-values.yaml`: - -```yaml -# NOTE: clusterID needs to match --mcp-connector-cluster-id used in the import on the managed control Plane -clusterID: my-appcluster -upbound: - account: - token: - -spaces: - host: "" - insecureSkipTLSVerify: true - controlPlane: - name: - group: - # NOTE: This is the --mcp-connector-claim-namespace used during the import to the control plane - claimNamespace: -``` -Once the Control Plane Connector installs, verify that resources exist in the application -cluster: - -```bash -kubectl api-resources | grep platform -``` - -Example output: -```bash -awslbcontrollers aws.platform.upbound.io/v1alpha1 true AWSLBController -podidentities aws.platform.upbound.io/v1alpha1 true PodIdentity -sqlinstances aws.platform.upbound.io/v1alpha1 true SQLInstance -clusters aws.platformref.upbound.io/v1alpha1 true Cluster -osss observe.platform.upbound.io/v1alpha1 true Oss -apps platform.upbound.io/v1alpha1 true App -``` - -Restore claims from the control plane to the application cluster: - -```bash -kubectl get claim -A -``` - -Example output: -```bash -NAMESPACE NAME SYNCED READY CONNECTION-SECRET AGE -default cluster.aws.platformref.upbound.io/example True True platform-ref-aws-kubeconfig 127m -``` - -With this guide, you migrated your Crossplane installation to -Upbound-control planes. This ensures seamless integration with your -application cluster using the Control Plane Connector. - -### Connect multiple app clusters to a control plane - -Claims are store in a unique namespace in the Upbound control plane. -Every cluster creates a new control plane namespace. - -![Multi-cluster architecture with control plane connector](/img/ConnectorMulticlusterArch.png) - -There's no limit on the number of clusters connected to a single control plane. -Control plane operators can see all their infrastructure in a central control -plane. - -Without using control planes and Control Plane Connector, users have to install -Crossplane and providers for cluster. Each cluster requires configuration for -providers with necessary credentials. With a single control plane where multiple -clusters connected through Upbound tokens, you don't need to give out any cloud -credentials to the clusters. - - -[kubeconfig]: /manuals/cli/howtos/context-config/#generate-a-kubeconfig-for-a-control-plane-in-a-group -[kubeconfig-1]:/spaces/concepts/control-planes/#connect-directly-to-your-control-plane -[these-instructions]:/manuals/console/#create-a-personal-access-token -[kubernetes-api-aggregationlayer]: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/apiserver-aggregation/ -[configuration-eks]: https://github.com/upbound/configuration-eks -[an-example]: https://github.com/upbound/configuration-eks/blob/9f86b6d/.up/examples/cluster.yaml diff --git a/spaces_versioned_docs/version-v1.9/howtos/debugging-a-ctp.md b/spaces_versioned_docs/version-v1.9/howtos/debugging-a-ctp.md deleted file mode 100644 index 521271e40..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/debugging-a-ctp.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -title: Debugging issues on a control plane -sidebar_position: 70 -description: A guide for how to debug resources on a control plane running in Upbound. ---- - -This guide provides troubleshooting guidance for how to identify and fix issues on a control plane. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). Debugging methods are consistent across versions. - -For related control plane specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version-specific features, see the . -::: - -## Start from Upbound Console - - -The Upbound [Console][console] has a built-in control plane explorer experience -that surfaces status and events for the resources on your control plane. The -explorer is claim-based. Resources in this view exist only if they exist in the -reference chain originating from a claim. This view is a helpful starting point -if you are attempting to debug an issue originating from a claim. - -:::tip -If you directly create Crossplane Managed Resources (`MR`s) or Composite -Resources (`XR`s), they won't render in the explorer. -::: - -### Example - -The example below uses the control plane explorer view to inspect why a claim for an EKS Cluster isn't healthy. - -#### Check the health status of claims - -From the API type card, two claims branching from of it: one shows a healthy green icon, while the other shows an unhealthy red icon. - -![Use control plane explorer view to see status of claims](/img/debug-overview.png) - -Select `More details` on the unhealthy claim card and Upbound shows details for the claim. - -![Use control plane explorer view to see details of claims](/img/debug-claim-more-details.png) - -Looking at the three events for this claim: - -- **ConfigureCompositeResource**: this event indicates Upbound created the claimed Composite Resource (`XR`). - -- **BindCompositeResource**: this indicates the Composite Resource (`XR`) that's being "claimed" isn't ready yet. A claim doesn't show `HEALTHY` until the XR it references is ready. - -- **ConfigureCompositeResource**: the error saying, `cannot apply composite resource...the object has been modified; please apply your changes to the latest version and try again` is a generic event from Crossplane resources. It's safe to ignore this error. - -Next, look at the `status` field of the rendered YAML for the resource. - -![Use control plane explorer view to see status details of claims](/img/debug-claim-status.png) - -The status reports a similar message as the event stream: this claim is waiting for a Composite Resource to be ready. Based on this, investigate the Composite Resource referenced by this claim next. - -#### Check the health status of the Composite Resource - - -The control plane explorer only shows the claim cards by default. Selecting the claim card renders the rest of the Crossplane resource tree associated with the selected claim. - - -The previous claim expands into this screenshot: - -![Use control plane explorer view to expand tree of claim](/img/debug-claim-expansion.png) - -This renders the XR referenced by the claim (along with all its references). You can see the XR is showing the same unhealthy status icon in its card. Notice the XR has itself two nested XRs. One of the nested XRs shows a healthy green icon on its card, while the other shows an unhealthy red icon. Like the claim, a Composite Resource doesn't show healthy until all referenced resources also show healthy. - -#### Inspecting Managed Resources - -Select `more details` to inspect one of the unhealthy Managed Resources shows the following: - -![Use control plane explorer view to view events for an MR](/img/debug-mr-event.png) - -This event reveals it's unhealthy because it's waiting on a reference to another Managed Resource. Searching the rendered YAML of the MR for this resource shows the following: - -![Use control plane explorer view to view status for an MR](/img/debug-mr-status.png) - -The rendered YAML shows this MR is referencing a sibling MR that shares the same controller. The same parent XR created both of these managed resources. Inspect the sibling MR to see what its status is. - -![Use control plane explorer view to view status for a sibling MR](/img/debug-mr-dependency-status.png) - -The sibling MR event stream shows the Provider processed the resource create request. Ignore the `CannotInitalizeManagedResrouce` event. EKS clusters can take 15 minutes or more to provision in AWS. The root cause is everything is fine -- all the resources are still provisioning. Waiting longer and then looking at the control plane explorer again, shows all resources are healthy. reference, below is an example status field for a resource that's healthy and provisioned. - -```yaml -... -status: - atProvider: - id: team-b-app-cluster-bhwfb-hwtgs-20230403135452772300000008 - conditions: - - lastTransitionTime: '2023-04-03T13:56:35Z' - reason: Available - status: 'True' - type: Ready - - lastTransitionTime: '2023-04-03T13:54:02Z' - reason: ReconcileSuccess - status: 'True' - type: Synced - - lastTransitionTime: '2023-04-03T13:54:53Z' - reason: Success - status: 'True' - type: LastAsyncOperation - - lastTransitionTime: '2023-04-03T13:54:53Z' - reason: Finished - status: 'True' - type: AsyncOperation -``` - -### Control plane explorer limitations - -The control plane explorer view is currently designed around claims (`XC`s). The control plane explorer doesn't inspect other Crossplane resources. To inspect other Crossplane resources, use the `up` CLI. - -Some examples of Crossplane resources that require the `up` CLI - -- Managed Resources that aren't associated with a claim -- Composite Resources that aren't associated with a claim -- The status of _deleting_ resources -- ProviderConfigs -- Provider events - -## Use direct CLI access - -If your preference is to use a terminal instead of a GUI, Upbound supports direct access to the API server of the control plane. Use [`up ctx`][up-ctx] to connect directly to your control plane. - - -[console]: /manuals/console/upbound-console -[up-ctx]: /reference/cli-reference diff --git a/spaces_versioned_docs/version-v1.9/howtos/managed-service.md b/spaces_versioned_docs/version-v1.9/howtos/managed-service.md deleted file mode 100644 index 40b983a76..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/managed-service.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: Managed Upbound control planes -description: "Learn about the managed service capabilities of a Space" -sidebar_position: 10 ---- - -Control planes in Upbound are fully isolated [Upbound Crossplane][uxp] instances -that Upbound manages for you. This means: - -- the underlying lifecycle of infrastructure (compute, memory, and storage) required to power your instance. -- scaling of the infrastructure. -- the maintenance of the core Upbound Crossplane components that make up a control plane. - -This lets users focus on building their APIs and operating their control planes, -while Upbound handles the rest. Each control plane has its own dedicated API -server connecting users to their control plane. - -## Learn about Upbound control planes - -Read the [concept][ctp-concept] documentation to learn about Upbound control planes. - -[uxp]: /manuals/uxp/overview -[ctp-concept]: /spaces/concepts/control-planes \ No newline at end of file diff --git a/spaces_versioned_docs/version-v1.9/howtos/mcp-connector-guide.md b/spaces_versioned_docs/version-v1.9/howtos/mcp-connector-guide.md deleted file mode 100644 index 8a3866d07..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/mcp-connector-guide.md +++ /dev/null @@ -1,169 +0,0 @@ ---- -title: Consume control plane APIs in an app cluster with control plane connector -sidebar_position: 99 -description: A tutorial to configure a Space with Argo to declaratively create and - manage control planes ---- - -In this tutorial, you learn how to configure a Kubernetes app cluster to communicate with a control plane in an Upbound self-hosted Space. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Control plane connector functionality is consistent across versions. - -For related API specifications and resources, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . -::: - -The [control plane connector][control-plane-connector] bridges your Kubernetes application clusters---running outside of Upbound--to your control planes running in Upbound. This allows you to interact with your control plane's API right from the app cluster. The claim APIs you define via `CompositeResourceDefinitions` are available alongside Kubernetes workload APIs like `Pod`. In effect, control plane connector provides the same experience as a locally installed Crossplane. - -## Prerequisites - -To complete this tutorial, you need the following: - -- Have already deployed an Upbound Space. -- Have already deployed an Kubernetes cluster (referred to as `app cluster`). - -## Create a control plane - -Create a new control plane in your self-hosted Space. Run the following command in a terminal: - -```bash -up ctp create my-control-plane -``` - -Once the control plane is ready, connect to it. - -```bash -up ctp connect my-control-plane -``` - -For convenience, install a an Upbound [platform reference Configuration][platform-reference-configuration] from the marketplace. production scenarios, replace this with your own Crossplane Configurations or compositions. - -```bash -up ctp configuration install xpkg.upbound.io/upbound/platform-ref-aws:v1.4.0 -``` - -## Fetch the control plane's connection details - -Run the following command in a terminal: - -```shell -kubectl get secret kubeconfig-my-control-plane -n default -o jsonpath='{.data.kubeconfig}' | base64 -d > kubeconfig-my-control-plane.yaml -``` - -This command saves the kubeconfig for the control plane to a file in your working directory. - -## Install control plane connector in your app cluster - -Switch contexts to your Kubernetes app cluster. To install the control plane connector in your app cluster, you must first provide a secret containing your control plane's kubeconfig at install-time. Run the following command in a terminal: - -:::important -Make sure the following commands are executed against your **app cluster**, not your control plane. -::: - -```bash -kubectl create secret generic kubeconfig-my-control-plane -n kube-system --from-file=kubeconfig=./kubeconfig-my-control-plane.yaml -``` - -Set the environment variable below to configure which namespace _in your control plane_ you wish to sync the app cluster's claims to. - -```shell -export CONNECTOR_CTP_NAMESPACE=app-cluster-1 -``` - -Install the Control Plane Connector in the app cluster and point it to your control plane. - -```bash -up ctp connector install my-control-plane $CONNECTOR_CTP_NAMESPACE --control-plane-secret=kubeconfig-my-control-plane -``` - -## Inspect your app cluster - -After you install Control Plane Connector in the app cluster, you can now see APIs which live on the control plane. You can confirm this is the case by running the following command on your app cluster: - -```bash {copy-lines="1"} -kubectl api-resources | grep upbound - -# The output should look like this: -sqlinstances aws.platform.upbound.io/v1alpha1 true SQLInstance -clusters aws.platformref.upbound.io/v1alpha1 true Cluster -osss observe.platform.upbound.io/v1alpha1 true Oss -apps platform.upbound.io/v1alpha1 true App -``` - -## Claim a database instance on your app cluster - -Create a database claim against the `SQLInstance` API and observe resources get created by your control plane. Apply the following resources to your app cluster: - -```yaml -cat < --output - ``` - - The command exports your existing Crossplane control plane configuration/state into an archive file. - -::: note -By default, the export command doesn't make any changes to your existing Crossplane control plane state, leaving it intact. Use the `--pause-before-export` flag to pause the reconciliation on managed resources before exporting the archive file. - -This safety mechanism ensures the control plane you migrate state to doesn't assume ownership of resources before you're ready. -::: - -2. Use the control plane [create command][create-command] to create a managed -control plane in Upbound: - - ```bash - up controlplane create my-controlplane - ``` - -3. Use [`up ctx`][up-ctx] to connect to the control plane created in the previous step: - - ```bash - up ctx "///my-controlplane" - ``` - - The command configures your local `kubeconfig` to connect to the control plane. - -4. Run the following command to import the archive file into the control plane: - - ```bash - up controlplane migration import --input - ``` - -:::note -By default, the import command leaves the control plane in an inactive state by pausing the reconciliation on managed -resources. This pause gives you an opportunity to review the imported configuration/state before activating the control plane. -Use the `--unpause-after-import` flag to change the default behavior and activate the control plane immediately after -importing the archive file. -::: - - - -5. Review and validate the imported configuration/state. When you are ready, activate your managed - control plane by running the following command: - - ```bash - kubectl annotate managed --all crossplane.io/paused- - ``` - - At this point, you can delete the source Crossplane control plane. - -## CLI options - -### Filtering - -The migration tool captures the state of a Control Plane. The only filtering -supported is Kubernetes namespace and Kubernetes resource Type filtering. - -You can exclude namespaces using the `--exclude-namespaces` CLI option. This can prevent the CLI from including unwanted resources in the export. - -```bash ---exclude-namespaces=kube-system,kube-public,kube-node-lease,local-path-storage,... - -# A list of specific namespaces to exclude from the export. Defaults to 'kube-system', 'kube-public','kube-node-lease', and 'local-path-storage'. -``` - -You can exclude Kubernetes Resource types by using the `--exclude-resources` CLI option: - -```bash ---exclude-resources=EXCLUDE-RESOURCES,... - -# A list of resource types to exclude from the export in "resource.group" format. No resources are excluded by default. -``` - -For example, here's an example for excluding the CRDs installed by Crossplane functions (since they're not needed): - -```bash -up controlplane migration export \ - --exclude-resources=gotemplates.gotemplating.fn.crossplane.io,kclinputs.template.fn.crossplane.io -``` - -:::warning -You must specify resource names in lowercase "resource.group" format (for example, `gotemplates.gotemplating.fn.crossplane.io`). Using only the resource kind (for example, `GoTemplate`) isn't supported. -::: - - -:::tip Function Input CRDs - -Exclude function input CRDs (`inputs.template.fn.crossplane.io`, `resources.pt.fn.crossplane.io`, `gotemplates.gotemplating.fn.crossplane.io`, `kclinputs.template.fn.crossplane.io`) from migration exports. Upbound automatically recreates these resources during import. Function input CRDs typically have owner references to function packages and may have restricted RBAC access. Upbound installs these CRDs during the import when function packages are restored. - -::: - - -After export, users can also change the archive file to only include necessary resources. - -### Export non-Crossplane resources - -Use the `--include-extra-resources=` CLI option to select other CRD types to include in the export. - -### Set the kubecontext - -Currently `--context` isn't supported in the migration CLI. You should be able to use the `--kubeconfig` CLI option to use a file that's set to the correct context. example: - -```bash -up controlplane migration export --kubeconfig -``` - -Use this in tandem with `up ctx` to export a control plane's kubeconfig: - -```bash -up ctx --kubeconfig ~/.kube/config - -# To list the current contet -up ctx . --kubeconfig ~/.kube/config -``` - -## Export archive - -The migration CLI exports an archive upon successful completion. Below is an example export of a control plane that excludes several CRD types and skips the confirmation prompt. A file gets written to the working directory, unless you select another output file: - -
- -View the example export - -```bash -$ up controlplane migration export --exclude-resources=gotemplates.gotemplating.fn.crossplane.io,kclinputs.template.fn.crossplane.io --yes -Exporting control plane state... -✓ Scanning control plane for types to export... 121 types found! 👀 -✓ Exporting 121 Crossplane resources...60 resources exported! 📤 -✓ Exporting 3 native resources...8 resources exported! 📤 -✓ Archiving exported state... archived to "xp-state.tar.gz"! 📦 -``` - -
- - -When an export occurs, a file named `xp-state.tar.gz` by default gets created in the working directory. You can unzip the file and all the contents of the export are all text YAML files. - -- Each CRD (for example `vpcs.ec2.aws.upbound.io`) gets its own directory -which contains: - - A `metadata.yaml` file that contains Kubernetes Object Metadata - - A list of Kubernetes Categories the resource belongs to -- A `cluster` directory that contains YAML manifests for all resources provisioned -using the CRD. - -Sample contents for a Cluster with a single `XNetwork` Composite from -[configuration-aws-network][configuration-aws-network] is show below: - - -
- -View the example cluster content - -```bash -├── compositionrevisions.apiextensions.crossplane.io -│ ├── cluster -│ │ ├── kcl.xnetworks.aws.platform.upbound.io-4ca6a8a.yaml -│ │ └── xnetworks.aws.platform.upbound.io-9859a34.yaml -│ └── metadata.yaml -├── configurations.pkg.crossplane.io -│ ├── cluster -│ │ └── configuration-aws-network.yaml -│ └── metadata.yaml -├── deploymentruntimeconfigs.pkg.crossplane.io -│ ├── cluster -│ │ └── default.yaml -│ └── metadata.yaml -├── export.yaml -├── functions.pkg.crossplane.io -│ ├── cluster -│ │ ├── crossplane-contrib-function-auto-ready.yaml -│ │ ├── crossplane-contrib-function-go-templating.yaml -│ │ └── crossplane-contrib-function-kcl.yaml -│ └── metadata.yaml -├── internetgateways.ec2.aws.upbound.io -│ ├── cluster -│ │ └── borrelli-backup-test-xgl4q.yaml -│ └── metadata.yaml -├── mainroutetableassociations.ec2.aws.upbound.io -│ ├── cluster -│ │ └── borrelli-backup-test-t2qh7.yaml -│ └── metadata.yaml -├── namespaces -│ └── cluster -│ ├── crossplane-system.yaml -│ ├── default.yaml -│ └── upbound-system.yaml -├── providerconfigs.aws.upbound.io -│ ├── cluster -│ │ └── default.yaml -│ └── metadata.yaml -├── providerconfigusages.aws.upbound.io -│ ├── cluster -│ │ ├── 0a2a3ec6-ef13-45f9-9cf0-63af7f4a6b6b.yaml -...redacted -│ │ └── f7092b0f-3a78-4bfe-82c8-57e5085a9b11.yaml -│ └── metadata.yaml -├── providers.pkg.crossplane.io -│ ├── cluster -│ │ ├── upbound-provider-aws-ec2.yaml -│ │ └── upbound-provider-family-aws.yaml -│ └── metadata.yaml -├── routes.ec2.aws.upbound.io -│ ├── cluster -│ │ └── borrelli-backup-test-dt9cj.yaml -│ └── metadata.yaml -├── routetableassociations.ec2.aws.upbound.io -│ ├── cluster -│ │ ├── borrelli-backup-test-mr2sd.yaml -│ │ ├── borrelli-backup-test-ngq5h.yaml -│ │ ├── borrelli-backup-test-nrkgg.yaml -│ │ └── borrelli-backup-test-wq752.yaml -│ └── metadata.yaml -├── routetables.ec2.aws.upbound.io -│ ├── cluster -│ │ └── borrelli-backup-test-dv4mb.yaml -│ └── metadata.yaml -├── secrets -│ └── namespaces -│ ├── crossplane-system -│ │ ├── cert-token-signing-gateway-pub.yaml -│ │ ├── mxp-hostcluster-certs.yaml -│ │ ├── package-pull-secret.yaml -│ │ └── xgql-tls.yaml -│ └── upbound-system -│ └── aws-creds.yaml -├── securitygrouprules.ec2.aws.upbound.io -│ ├── cluster -│ │ ├── borrelli-backup-test-472f4.yaml -│ │ └── borrelli-backup-test-qftmw.yaml -│ └── metadata.yaml -├── securitygroups.ec2.aws.upbound.io -│ ├── cluster -│ │ └── borrelli-backup-test-w5jch.yaml -│ └── metadata.yaml -├── storeconfigs.secrets.crossplane.io -│ ├── cluster -│ │ └── default.yaml -│ └── metadata.yaml -├── subnets.ec2.aws.upbound.io -│ ├── cluster -│ │ ├── borrelli-backup-test-8btj6.yaml -│ │ ├── borrelli-backup-test-gbmrm.yaml -│ │ ├── borrelli-backup-test-m7kh7.yaml -│ │ └── borrelli-backup-test-nttt5.yaml -│ └── metadata.yaml -├── vpcs.ec2.aws.upbound.io -│ ├── cluster -│ │ └── borrelli-backup-test-7hwgh.yaml -│ └── metadata.yaml -└── xnetworks.aws.platform.upbound.io -├── cluster -│ └── borrelli-backup-test.yaml -└── metadata.yaml -43 directories, 87 files -``` - -
- - -The `export.yaml` file contains metadata about the export, including the configuration of the export, Crossplane information, and what's included in the export bundle. - -
- -View the export - -```yaml -version: v1alpha1 -exportedAt: 2025-01-06T17:39:53.173222Z -options: - excludedNamespaces: - - kube-system - - kube-public - - kube-node-lease - - local-path-storage - includedResources: - - namespaces - - configmaps - - secrets - excludedResources: - - gotemplates.gotemplating.fn.crossplane.io - - kclinputs.template.fn.crossplane.io -crossplane: - distribution: universal-crossplane - namespace: crossplane-system - version: 1.17.3-up.1 - featureFlags: - - --enable-provider-identity - - --enable-environment-configs - - --enable-composition-functions - - --enable-usages -stats: - total: 68 - nativeResources: - configmaps: 0 - namespaces: 3 - secrets: 5 - customResources: - amicopies.ec2.aws.upbound.io: 0 - amilaunchpermissions.ec2.aws.upbound.io: 0 - amis.ec2.aws.upbound.io: 0 - availabilityzonegroups.ec2.aws.upbound.io: 0 - capacityreservations.ec2.aws.upbound.io: 0 - carriergateways.ec2.aws.upbound.io: 0 - compositeresourcedefinitions.apiextensions.crossplane.io: 0 - compositionrevisions.apiextensions.crossplane.io: 2 - compositions.apiextensions.crossplane.io: 0 - configurationrevisions.pkg.crossplane.io: 0 - configurations.pkg.crossplane.io: 1 -...redacted -``` - -
- -### Skipped resources - -Along with to the resources excluded via CLI options, the following resources aren't -included in the backup: - -- The `kube-root-ca.crt` ConfigMap, since this is cluster-specific -- Resources directly managed via Helm (ArgoCD's helm implementation, which templates -Helm resources and then applies them, get included in the backup). The migration creates the exclusion list by looking for: - - Any Resource with the label `"app.kubernetes.io/managed-by" == "Helm"` - - Kubernetes Secrets with the label prefix `helm.sh/release`. example, `helm.sh/release.v1` -- Resources installed via a Crossplane package. These have an `ownerReference` with -a prefix `pkg.crossplane.io`. The expectation is that during import, the Crossplane Package Manager bears responsibility for installing the resources. -- Crossplane Locks: Any `Lock.pkg.crossplane.io` resource isn't included in the -export. - -## Restore - -The following is an example of a successful import run. At the end of the import, all Managed Resources are in a paused state. - -
- -View the migration import - -```bash -$ up controlplane migration import -Importing control plane state... -✓ Reading state from the archive... Done! 👀 -✓ Importing base resources... 18 resources imported! 📥 -✓ Waiting for XRDs... Established! ⏳ -✓ Waiting for Packages... Installed and Healthy! ⏳ -✓ Importing remaining resources... 50 resources imported! 📥 -✓ Finalizing import... Done! 🎉 -``` - -
- -Your scenario may involve migrating resources which already exist through other automation on the platform. When executing an import in these circumstances, the importer applies the new manifests to the cluster. If the resource already exists, the restore sets fields to what's in the backup. - -The importer restores all resources in the export archive. Managed Resources get imported with the `crossplane.io/paused: "true"` annotation set. Use the `--unpause-after-import` CLI argument to automatically un-pause resources that got -paused during backup, or remove the annotation manually. - -### Restore order - -The importer restores based on Kubernetes types. The restore order doesn't include parent/child relationships. - -Because Crossplane Composites create new Managed Resources if not present on the cluster, all -Claims, Composites and Managed Resources get imported in a paused state. You can un-pause them after the restore completes. - -The first step of import is installing Base Resources into the cluster. These resources (such has -packages and XRDs) must be ready before proceeding with the import. -Base Resources are: - -- Kubernetes Resources - - ConfigMaps - - Namespaces - - Secrets -- Crossplane Resources - - ControllerConfigs: `controllerconfigs.pkg.crossplane.io` - - DeploymentRuntimeConfigs: `deploymentruntimeconfigs.pkg.crossplane.io` - - StoreConfigs: `storeconfigs.secrets.crossplane.io` -- Crossplane Packages - - Providers: `providers.pkg.crossplane.io` - - Functions: `functions.pkg.crossplane.io` - - Configurations: `configurations.pkg.crossplane.io` - -Restore waits for the base resources to be `Ready` before moving on to the next step. Next, restore walks through the archive and restores all the manifests present. - -During import, the `crossplane.io/paused` annotation gets added to Managed Resources, Claims -and Composites. - -To manually un-pause managed resources after an import, remove the annotation by running: - -```bash -kubectl annotate managed --all crossplane.io/paused- -``` - -You can also run import again with the `--unpause-after-import` flag to remove the annotations. - -```bash -up controlplane migration import --unpause-after-import -``` - -### Restoring resource status - -The importer applies the status of all resources during import. The importer determines if the CRD version has a status field defined based on the stored CRD version. - - -[cli-command]: /reference/cli-reference -[up-cli]: /reference/cli-reference -[up-cli-1]: /manuals/cli/overview -[create-command]: /reference/cli-reference -[up-ctx]: /reference/cli-reference -[configuration-aws-network]: https://marketplace.upbound.io/configurations/upbound/configuration-aws-network diff --git a/spaces_versioned_docs/version-v1.9/howtos/observability.md b/spaces_versioned_docs/version-v1.9/howtos/observability.md deleted file mode 100644 index 8fc5c3278..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/observability.md +++ /dev/null @@ -1,395 +0,0 @@ ---- -title: Observability -sidebar_position: 50 -description: A guide for how to use the integrated observability pipeline feature - in a Space. -plan: "enterprise" ---- - - - -This guide explains how to configure observability in Upbound Spaces. Upbound -provides integrated observability features built on -[OpenTelemetry][opentelemetry] to collect, process, and export logs, metrics, -and traces. - -Upbound Spaces offers two levels of observability: - -1. **Space-level observability** - Observes the cluster infrastructure where Spaces software is installed (Self-Hosted only) -2. **Control plane observability** - Observes workloads running within individual control planes - - - - - -:::info API Version Information & Version Selector -This guide applies to all supported versions (v1.9-v1.15+). Observability features evolved: - -- **v1.9-v1.12**: SharedTelemetryConfig available (alpha until v1.11) -- **v1.11+**: Observability promoted to stable with logs export support -- **v1.14+**: Both space-level and control-plane observability GA - -**View API Reference for Your Version**: -| Version | Status | Link | -|---------|--------|------| -| v1.15 | Latest | [Spaces API v1.15](../../../next/reference/apis/spaces-api/) | -| v1.14 | Supported | [Spaces API v1.14](../../../1.14/reference/apis/spaces-api/) | -| v1.13 | Supported | [Spaces API v1.13](../../../1.13/reference/apis/spaces-api/) | -| v1.12 | Supported | [Spaces API v1.12](../../../1.12/reference/apis/spaces-api/) | -| v1.11 | Supported | [Spaces API v1.11](../../../1.11/reference/apis/spaces-api/) | -| v1.10 | Limited | [Spaces API v1.10](../../../1.10/reference/apis/spaces-api/) | -| v1.9 | EOL | [Spaces API v1.9](../../../1.9/reference/apis/spaces-api/) | - -For version support policy and feature availability, see and . -::: - -:::important -**Space-level observability** (available since v1.6.0, GA in v1.14.0): -- Disabled by default -- Requires manual enablement and configuration -- Self-Hosted Spaces only - -**Control plane observability** (available since v1.13.0, GA in v1.14.0): -- Enabled by default -- No additional configuration required -::: - - - - -## Prerequisites - - -**Control plane observability** is enabled by default. No additional setup is -required. - - - -### Self-hosted Spaces - -1. **Enable the observability feature** when installing Spaces: - ```bash - up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - ... - --set "observability.enabled=true" - ``` - -Set `features.alpha.observability.enabled=true` instead if using Spaces version -before `v1.14.0`. - -2. **Install OpenTelemetry Operator** (required for Space-level observability): - ```bash - kubectl apply -f https://github.com/open-telemetry/opentelemetry-operator/releases/download/v0.116.0/opentelemetry-operator.yaml - ``` - - :::important - If running Spaces `v1.11` or later, use OpenTelemetry Operator `v0.110.0` or later due to breaking changes. - ::: - - -## Space-level Observability - -Space-level observability is only available for self-hosted Spaces and allows -administrators to observe the cluster infrastructure. - -### Configuration - -Configure Space-level observability using the `spacesCollector` value in your -Spaces Helm chart: - -```yaml -observability: - spacesCollector: - config: - exporters: - otlphttp: - endpoint: "" - headers: - api-key: YOUR_API_KEY - exportPipeline: - logs: - - otlphttp - metrics: - - otlphttp -``` - -This configuration exports metrics and logs from: - -- Crossplane installation -- Spaces infrastructure (controller, API, router, etc.) - -### Router metrics - -The Spaces router uses Envoy as a reverse proxy and automatically exposes -metrics when you enable Space-level observability. These metrics provide -visibility into: - -- Traffic routing to control planes and services -- Request status codes, timeouts, and retries -- Circuit breaker state preventing cascading failures -- Client connection patterns and request volume -- Request latency (P50, P95, P99) - -For more information about available metrics, example queries, and how to enable -this feature, see the [Space-level observability guide][space-level-o11y]. - -## Control plane observability - -Control plane observability collects telemetry data from workloads running -within individual control planes using `SharedTelemetryConfig` resources. - -The pipeline deploys [OpenTelemetry Collectors][opentelemetry-collectors] per -control plane, defined by a `SharedTelemetryConfig` at the group level. -Collectors pass data to external observability backends. - -:::important -From Spaces `v1.13` and beyond, telemetry only includes user-facing control -plane workloads (Crossplane, providers, functions). - -Self-hosted users can include system workloads (`api-server`, `etcd`) by setting -`observability.collectors.includeSystemTelemetry=true` in Helm. -::: - -:::important -Spaces validates `SharedTelemetryConfig` resources before applying them by -sending telemetry to configured exporters. self-hosted Spaces, ensure that -`spaces-controller` can reach the exporter endpoints. -::: - -### `SharedTelemetryConfig` - -`SharedTelemetryConfig` is a group-scoped custom resource that defines telemetry -configuration for control planes. - -#### New Relic example - -```yaml -apiVersion: observability.spaces.upbound.io/v1alpha1 -kind: SharedTelemetryConfig -metadata: - name: newrelic - namespace: default -spec: - controlPlaneSelector: - labelSelectors: - - matchLabels: - org: foo - exporters: - otlphttp: - endpoint: https://otlp.nr-data.net - headers: - api-key: YOUR_API_KEY - exportPipeline: - metrics: [otlphttp] - traces: [otlphttp] - logs: [otlphttp] -``` - -#### Datadog Example - -```yaml -apiVersion: observability.spaces.upbound.io/v1alpha1 -kind: SharedTelemetryConfig -metadata: - name: datadog - namespace: default -spec: - controlPlaneSelector: - labelSelectors: - - matchLabels: - org: foo - exporters: - datadog: - api: - site: ${DATADOG_SITE} - key: ${DATADOG_API_KEY} - exportPipeline: - metrics: [datadog] - traces: [datadog] - logs: [datadog] -``` - -### Control plane selection - -Use `spec.controlPlaneSelector` to specify which control planes should use the -telemetry configuration. - -#### Label-based selection - -```yaml -spec: - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production -``` - -#### Expression-based selection - -```yaml -spec: - controlPlaneSelector: - labelSelectors: - - matchExpressions: - - { key: environment, operator: In, values: [production,staging] } -``` - -#### Name-based selection - -```yaml -spec: - controlPlaneSelector: - names: - - controlplane-dev - - controlplane-staging - - controlplane-prod -``` - -### Manage sensitive data - -:::important -Available from Spaces `v1.10` -::: - -Store sensitive data in Kubernetes secrets and reference them in your -`SharedTelemetryConfig`: - -1. **Create the secret:** - ```bash - kubectl create secret generic sensitive -n \ - --from-literal=apiKey='YOUR_API_KEY' - ``` - -2. **Reference in SharedTelemetryConfig:** - ```yaml - apiVersion: observability.spaces.upbound.io/v1alpha1 - kind: SharedTelemetryConfig - metadata: - name: newrelic - spec: - configPatchSecretRefs: - - name: sensitive - key: apiKey - path: exporters.otlphttp.headers.api-key - controlPlaneSelector: - labelSelectors: - - matchLabels: - org: foo - exporters: - otlphttp: - endpoint: https://otlp.nr-data.net - headers: - api-key: dummy # Replaced by secret value - exportPipeline: - metrics: [otlphttp] - traces: [otlphttp] - logs: [otlphttp] - ``` - -### Telemetry processing - -:::important -Available from Spaces `v1.11` -::: - -Configure processing pipelines to transform telemetry data using the [transform -processor][transform-processor]. - -#### Add labels to metrics - -```yaml -spec: - processors: - transform: - error_mode: ignore - metric_statements: - - context: datapoint - statements: - - set(attributes["newLabel"], "someLabel") - processorPipeline: - metrics: [transform] -``` - -#### Remove labels - -From metrics: -```yaml -processors: - transform: - metric_statements: - - context: datapoint - statements: - - delete_key(attributes, "kubernetes_namespace") -``` - -From logs: -```yaml -processors: - transform: - log_statements: - - context: log - statements: - - delete_key(attributes, "log.file.name") -``` - -#### Modify log messages - -```yaml -processors: - transform: - log_statements: - - context: log - statements: - - set(attributes["original"], body) - - set(body, Concat(["log message:", body], " ")) -``` - -### Monitor status - -Check the status of your `SharedTelemetryConfig`: - -```bash -kubectl get stc -NAME SELECTED FAILED PROVISIONED AGE -datadog 1 0 1 63s -``` - -- `SELECTED`: Number of control planes selected -- `FAILED`: Number of control planes that failed provisioning -- `PROVISIONED`: Number of successfully running collectors - -For detailed status information: - -```bash -kubectl describe stc -``` - -## Supported exporters - -Both Space-level and control plane observability support: -- `datadog` -. Datadog integration -- `otlphttp` - General-purpose exporter (used by New Relic, among others) -- `debug` -. troubleshooting - -## Considerations - -- **Control plane conflicts**: Each control plane can only use one `SharedTelemetryConfig`. Multiple configs selecting the same control plane conflict. -- **Custom collector image**: Both Space-level and control plane observability use the same custom OpenTelemetry Collector image with supported exporters. -- **Resource scope**: `SharedTelemetryConfig` resources are group-scoped, allowing different telemetry configurations per group. - -For more advanced configuration options, review the [Helm chart -reference][helm-chart-reference] and [OpenTelemetry Transformation Language -documentation][opentelemetry-transformation-language]. - - -[opentelemetry]: https://opentelemetry.io/ -[opentelemetry-collectors]: https://opentelemetry.io/docs/collector/ -[opentelemetry-collector-configuration]: https://opentelemetry.io/docs/collector/configuration/#exporters -[opentelemetry-operator]: https://opentelemetry.io/docs/kubernetes/operator/ -[transform-processor]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/transformprocessor/README.md -[opentelemetry-transformation-language]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl -[space-level-o11y]: /spaces/howtos/self-hosted/space-observability -[helm-chart-reference]: /reference/helm-reference -[opentelemetry-transformation-language-functions]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/ottlfuncs/README.md -[opentelemetry-transformation-language-contexts]: https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts -[guide-on-ottl]: https://betterstack.com/community/guides/observability/ottl/#a-brief-overview-of-the-ottl-grammar diff --git a/spaces_versioned_docs/version-v1.9/howtos/query-api.md b/spaces_versioned_docs/version-v1.9/howtos/query-api.md deleted file mode 100644 index 78163de2f..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/query-api.md +++ /dev/null @@ -1,320 +0,0 @@ ---- -title: Query API -sidebar_position: 40 -description: Use the `up` CLI to query objects and resources ---- - - - - -Upbound's Query API allows users to inspect objects and resources within their control planes. The read-only `up alpha query` and `up alpha get` CLI commands allow you to gather information on your control planes in a fast and efficient package. These commands follow the [`kubectl` conventions][kubectl-conventions] for filtering, sorting, and retrieving information from your Space. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). Query API is available in Cloud Spaces since v1.6 and Self-Hosted since v1.8. - -For detailed deployment procedures, see [Deploy Query API infrastructure - Self-Hosted](./self-hosted/query-api.md). version compatibility details, see the . -::: - - - -## Using the Query API - - -The Query API allows you to retrieve control plane information faster than traditional `kubectl` commands. This feature lets you debug your Crossplane resources with the CLI or within the Upbound Console's enhanced management views. - -### Query within a single control plane - -Use the `up alpha get` command to retrieve information about objects within the current control plane context. This command uses the **Query** endpoint and targets the current control plane. - -To switch between control plane groups, use the [`up ctx` ][up-ctx] and change to your desired context with an interactive prompt or specify with your control plane path: - -```shell -up ctx /// -``` - -You can query within a single control plane with the [`up alpha get` command][up-alpha-get-command] to return more information about a given object within the current kubeconfig context. - -The `up alpha get` command can query resource types and aliases to return objects in your control plane. - -```shell -up alpha get managed -NAME READY SYNCED AGE -custom-account1-5bv5j-sa True True 15m -custom-cluster1-bq6dk-net True True 15m -custom-account1-5bv5j-subnet True True 15m -custom-cluster1-bq6dk-nodepool True True 15m -custom-cluster1-bq6dk-cluster True True 15m -custom-account1-5bv5j-net True True 15m -custom-cluster1-bq6dk-subnet True True 15m -custom-cluster1-bq6dk-sa True True 15m -``` - -The [`-A` flag][a-flag] queries for objects across all namespaces. - -```shell -up alpha get configmaps -A -NAMESPACE NAME AGE -crossplane-system uxp-versions-config 18m -crossplane-system universal-crossplane-config 18m -crossplane-system kube-root-ca.crt 18m -upbound-system kube-root-ca.crt 18m -kube-system kube-root-ca.crt 18m -kube-system coredns 18m -default kube-root-ca.crt 18m -kube-node-lease kube-root-ca.crt 18m -kube-public kube-root-ca.crt 18m -kube-system kube-apiserver-legacy-service-account-token-tracking 18m -kube-system extension-apiserver-authentication 18m -``` - -To query for [multiple resource types][multiple-resource-types], you can add the name or alias for the resource as a comma separated string. - -```shell -up alpha get providers,providerrevisions - -NAME HEALTHY REVISION IMAGE STATE DEP-FOUND DEP-INSTALLED AGE -providerrevision.pkg.crossplane.io/crossplane-contrib-provider-nop-ecc25c121431 True 1 xpkg.upbound.io/crossplane-contrib/provider-nop:v0.2.1 Active 18m -NAME INSTALLED HEALTHY PACKAGE AGE -provider.pkg.crossplane.io/crossplane-contrib-provider-nop True True xpkg.upbound.io/crossplane-contrib/provider-nop:v0.2.1 18m -``` - -### Query multiple control planes - -The [`up alpha query` command][up-alpha-query-command] returns a list of objects of any kind within all the control planes in your Space. This command uses either the **SpaceQuery** or **GroupQuery** endpoints depending on your query scope. The `-A` flag switches the query context from the group level to the entire Space - -The `up alpha query` command accepts resources and aliases to return objects across your group or Space. - -```shell -up alpha query crossplane - -NAME ESTABLISHED OFFERED AGE -compositeresourcedefinition.apiextensions.crossplane.io/xnetworks.platform.acme.co True True 20m -compositeresourcedefinition.apiextensions.crossplane.io/xaccountscaffolds.platform.acme.co True True 20m - - -NAME XR-KIND XR-APIVERSION AGE -composition.apiextensions.crossplane.io/xaccountscaffolds.platform.acme.co XAccountScaffold platform.acme.co/v1alpha1 20m -composition.apiextensions.crossplane.io/xnetworks.platform.acme.co XNetwork platform.acme.co/v1alpha1 20m - - -NAME REVISION XR-KIND XR-APIVERSION AGE -compositionrevision.apiextensions.crossplane.io/xaccountscaffolds.platform.acme.co-5ae9da5 1 XAccountScaffold platform.acme.co/v1alpha1 20m -compositionrevision.apiextensions.crossplane.io/xnetworks.platform.acme.co-414ce80 1 XNetwork platform.acme.co/v1alpha1 20m - -NAME READY SYNCED AGE -nopresource.nop.crossplane.io/custom-cluster1-bq6dk-subnet True True 19m -nopresource.nop.crossplane.io/custom-account1-5bv5j-net True True 19m - -## Output truncated... - -``` - - -The [`--sort-by` flag][sort-by-flag] allows you to return information to your specifications. You can construct your sort order in a JSONPath expression string or integer. - - -```shell -up alpha query crossplane -A --sort-by="{.metadata.name}" - -CONTROLPLANE NAME AGE -default/test deploymentruntimeconfig.pkg.crossplane.io/default 10m - -CONTROLPLANE NAME AGE TYPE DEFAULT-SCOPE -default/test storeconfig.secrets.crossplane.io/default 10m Kubernetes crossplane-system -``` - -To query for multiple resource types, you can add the name or alias for the resource as a comma separated string. - -```shell -up alpha query namespaces,configmaps -A - -CONTROLPLANE NAME AGE -default/test namespace/upbound-system 15m -default/test namespace/crossplane-system 15m -default/test namespace/kube-system 16m -default/test namespace/default 16m - -CONTROLPLANE NAMESPACE NAME AGE -default/test crossplane-system configmap/uxp-versions-config 15m -default/test crossplane-system configmap/universal-crossplane-config 15m -default/test crossplane-system configmap/kube-root-ca.crt 15m -default/test upbound-system configmap/kube-root-ca.crt 15m -default/test kube-system configmap/coredns 16m -default/test default configmap/kube-root-ca.crt 16m - -## Output truncated... - -``` - -The Query API also allows you to return resource types with specific [label columns][label-columns]. - -```shell -up alpha query composite -A --label-columns=crossplane.io/claim-namespace - -CONTROLPLANE NAME SYNCED READY COMPOSITION AGE CLAIM-NAMESPACE -query-api-test/test xeks.argo.discover.upbound.io/test-k7xbk False xeks.argo.discover.upbound.io 51d default - -CONTROLPLANE NAME EXTERNALDNS SYNCED READY COMPOSITION AGE CLAIM-NAMESPACE -spaces-clusters/controlplane-query-api-test-spaces-playground xexternaldns.externaldns.platform.upbound.io/spaces-cluster-0-xd8v2-lhnl7 6.34.2 True True xexternaldns.externaldns.platform.upbound.io 19d default -default/query-api-test xexternaldns.externaldns.platform.upbound.io/space-awg-kine-f7dxq-nkk2q 6.34.2 True True xexternaldns.externaldns.platform.upbound.io 55d default - -## Output truncated... - -``` - -### Query API request format - -The CLI can also return a version of your query request with the [`--debug` flag][debug-flag]. This flag returns the API spec request for your query. - -```shell -up alpha query composite -A -d - -apiVersion: query.spaces.upbound.io/v1alpha1 -kind: SpaceQuery -metadata: - creationTimestamp: null -spec: - cursor: true - filter: - categories: - - composite - controlPlane: {} - limit: 500 - objects: - controlPlane: true - table: {} - page: {} -``` - -For more complex queries, you can interact with the Query API like a Kubernetes-style API by creating a query and applying it with `kubectl`. - -The example below is a query for `claim` resources in every control plane from oldest to newest and returns specific information about those claims. - - -```yaml -apiVersion: query.spaces.upbound.io/v1alpha1 -kind: SpaceQuery -spec: - filter: - categories: - - claim - order: - - creationTimestamp: Asc - cursor: true - count: true - objects: - id: true - controlPlane: true - object: - kind: true - apiVersion: true - metadata: - name: true - uid: true - spec: - containers: - image: true -``` - - -The Query API is served by the Spaces API endpoint. You can use `up ctx` to -switch the kubectl context to the Spaces API ingress. After that, you can use -`kubectl create` and receive the `response` for your query parameters. - - -```shell -kubectl create -f spaces-query.yaml -o yaml -``` - -Your `response` should look similar to this example: - -```yaml {copy-lines="none"} -apiVersion: query.spaces.upbound.io/v1alpha1 -kind: SpaceQuery -metadata: - creationTimestamp: "2024-08-08T14:41:46Z" - name: default -response: - count: 3 - cursor: - next: "" - page: 0 - pageSize: 100 - position: 0 - objects: - - controlPlane: - name: query-api-test - namespace: default - id: default/query-api-test/823b2781-7e70-4d91-a6f0-ee8f455d67dc - object: - apiVersion: spaces.platform.upbound.io/v1alpha1 - kind: Space - metadata: - name: space-awg-kine - resourceVersion: "803868" - uid: 823b2781-7e70-4d91-a6f0-ee8f455d67dc - spec: {} - - controlPlane: - name: test-1 - namespace: test - id: test/test-1/08a573dd-851a-42cc-a600-b6f6ed37ee8d - object: - apiVersion: argo.discover.upbound.io/v1alpha1 - kind: EKS - metadata: - name: test-1 - resourceVersion: "4270320" - uid: 08a573dd-851a-42cc-a600-b6f6ed37ee8d - spec: {} - - controlPlane: - name: controlplane-query-api-test-spaces-playground - namespace: spaces-clusters - id: spaces-clusters/controlplane-query-api-test-spaces-playground/b5a6770f-1f85-4d09-8990-997c84bd4159 - object: - apiVersion: spaces.platform.upbound.io/v1alpha1 - kind: Space - metadata: - name: spaces-cluster-0 - resourceVersion: "1408337" - uid: b5a6770f-1f85-4d09-8990-997c84bd4159 - spec: {} -``` - - -## Query API Explorer - - - -import CrdDocViewer from '@site/src/components/CrdViewer'; - -### Query - -The Query resource allows you to query objects in a single control plane. - - - -### GroupQuery - -The GroupQuery resource allows you to query objects across a group of control planes. - - - -### SpaceQuery - -The SpaceQuery resource allows you to query objects across all control planes in a space. - - - - - - -[documentation]: /spaces/howtos/self-hosted/query-api -[up-ctx]: /reference/cli-reference -[up-alpha-get-command]: /reference/cli-reference -[a-flag]: /reference/cli-reference -[multiple-resource-types]: /reference/cli-reference -[up-alpha-query-command]: /reference/cli-reference -[sort-by-flag]: /reference/cli-reference -[label-columns]: /reference/cli-reference -[debug-flag]: /reference/cli-reference -[kubectl-conventions]: https://kubernetes.io/docs/reference/kubectl/generated/kubectl_get/ diff --git a/spaces_versioned_docs/version-v1.9/howtos/secrets-management.md b/spaces_versioned_docs/version-v1.9/howtos/secrets-management.md deleted file mode 100644 index 88e730ae5..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/secrets-management.md +++ /dev/null @@ -1,719 +0,0 @@ ---- -title: Secrets Management -sidebar_position: 20 -description: A guide for how to configure synchronizing external secrets into control - planes in a Space. ---- - -Upbound's _Shared Secrets_ is a built in secrets management feature that -provides an integrated way to manage secrets across your platform. It allows you -to store sensitive data like passwords and certificates for your managed control -planes as secrets in an external secret store. - -This feature is a wrapper around the [External Secrets Operator (ESO)][external-secrets-operator-eso] that pulls secrets from external vaults and distributes them across your platform. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). The SharedSecretStore and SharedExternalSecret CRDs have been stable since v1.9. - -For API specifications and configuration details, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: - -## Benefits - -The Shared Secrets feature allows you to: - -* Access secrets from a variety of external secret stores without operation overhead -* Configure synchronization for multiple control planes in a group -* Store and manage all your secrets centrally -* Use Shared Secrets across all Upbound environments(Cloud and Disconnected Spaces) -* Synchronize secrets across groups of control planes while maintaining clear security boundaries -* Manage secrets at scale programmatically while ensuring proper isolation and access control - -## Understanding the Architecture - -The Shared Secrets feature uses a hierarchical approach to centrally manage -secrets and effectively control their distribution. - -![Shared Secrets workflow diagram](/img/shared-secrets-workflow.png) - -1. The flow begins at the group level, where you define your secret sources and distribution rules -2. These rules automatically create corresponding resources in your control planes -3. In each control plane, specific namespaces receive the secrets -4. Changes at the group level automatically propagate through this chain - -## Component configuration - -Upbound Shared Secrets consists of two components: - -1. **SharedSecretStore**: Defines connections to external secret providers -2. **SharedExternalSecret**: Specifies which secrets to synchronize and where - - -### Connect to an External Vault - - -The `SharedSecretStore` component is the connection point to your external -secret vaults. It provisions ClusterSecretStore resources into control planes -within the group. - - -#### AWS Secrets Manager - - - -In this example, you'll create a `SharedSecretStore` to connect to AWS -Secrets Manager in `us-west-2`. Then apply access to all control planes labeled with -`environment: production`, and make these secrets available in the `default` and -`crossplane-system` namespaces. - - -You can configure access to AWS Secrets Manager using static credentials or -workload identity. - -:::important -While the underlying ESO API supports more auth methods, static credentials are currently the only supported auth method in Cloud Spaces. -::: - -##### Static credentials - -1. Use the AWS CLI to create access credentials. - - -2. Create your access credentials. -```ini -# Create a text file with AWS credentials -cat > aws-credentials.txt << EOF -[default] -aws_access_key_id = -aws_secret_access_key = -EOF -``` - -3. Next,store the access credentials in a secret in the namespace you want to have access to the `SharedSecretStore`. -```shell -kubectl create secret \ - generic aws-credentials \ - -n default \ - --from-file=creds=./aws-credentials.txt -``` - -4. Create a `SharedSecretStore` custom resource file called `secretstore.yaml`. - Paste the following configuration: -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: aws-secrets -spec: - # Define which control planes should receive this configuration - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production - - # Define which namespaces within those control planes can access secrets - namespaceSelector: - names: - - default - - crossplane-system - - # Configure the connection to AWS Secrets Manager - provider: - aws: - service: SecretsManager - region: us-west-2 - auth: - secretRef: - accessKeyIDSecretRef: - name: aws-credentials - key: access-key-id - secretAccessKeySecretRef: - name: aws-credentials - key: secret-access-key -``` - - - -##### Workload Identity with IRSA - - - -You can also use AWS IAM Roles for Service Accounts (IRSA) depending on your -organizations needs: - -1. Ensure you have deployed the Spaces software into an IRSA-enabled EKS cluster. -2. Follow the AWS instructions to create an IAM OIDC provider with your EKS OIDC - provider URL. -3. Determine the Spaces-generated `controlPlaneID` of your control plane: -```shell -kubectl get controlplane -o jsonpath='{.status.controlPlaneID}' -``` - -4. Create an IAM trust policy in your AWS account to match the control plane. -```yaml -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "arn:aws:iam:::oidc-provider/" - }, - "Action": "sts:AssumeRoleWithWebIdentity", - "Condition": { - "StringEquals": { - ":aud": "sts.amazonaws.com", - ":sub": [ -"system:serviceaccount:mxp--system:external-secrets-controller"] - } - } - } - ] -} -``` - -5. Update your Spaces deployment to annotate the SharedSecrets service account - with the role ARN. -```shell -up space upgrade ... \ - --set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."eks\.amazonaws\.com/role-arn"="" -``` - -6. Create a SharedSecretStore and reference the SharedSecrets service account: -```ini {copy-lines="all"} -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: aws-sm - namespace: default -spec: - provider: - aws: - service: SecretsManager - region: - auth: - jwt: - serviceAccountRef: - name: external-secrets-controller - controlPlaneSelector: - names: - - - namespaceSelector: - names: - - default -``` - -When you create a `SharedSecretStore` the underlying mechanism: - -1. Applies at the group level -2. Determines which control planes should receive this configuration by the `controlPlaneSelector` -3. Automatically creates a ClusterSecretStore inside each identified control plane -4. Maintains a connection in each control plane with the ClusterSecretStore - credentials and configuration from the parent SharedSecretStore - -Upbound automatically generates a ClusterSecretStore in each matching control -plane when you create a SharedSecretStore. - -```yaml {copy-lines="none"} -# Automatically created in each matching control plane -apiVersion: external-secrets.io/v1beta1 -kind: ClusterSecretStore -metadata: - name: aws-secrets # Name matches the parent SharedSecretStore -spec: - provider: - upboundspaces: - storeRef: - name: aws-secret -``` - -When you create the SharedSecretStore controller, it replaces the provider with -a special provider called `upboundspaces`. This provider references the -SharedSecretStore object in the Spaces API. This avoids copying the actual cloud -credentials from Spaces to each control plane. - -This workflow allows you to configure the store connection only once at the -group level and automatically propagates to each control plane. Individual control -planes can use the store without exposure to the group-level configuration and -updates all child ClusterSecretStores when updated. - - -#### Azure Key Vault - - -:::important -While the underlying ESO API supports more auth methods, static credentials are currently the only supported auth method in Cloud Spaces. -::: - -##### Static credentials - -1. Use the Azure CLI to create a service principal and authentication file. -2. Create a service principal and save credentials in a file: -```json -{ - "appId": "myAppId", - "displayName": "myServicePrincipalName", - "password": "myServicePrincipalPassword", - "tenant": "myTentantId" -} -``` - -3. Store the credentials as a Kubernetes secret: -```shell -kubectl create secret \ - generic azure-secret-sp \ - -n default \ - --from-file=creds=./azure-credentials.json -``` - -4. Create a SharedSecretStore referencing these credentials: -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: azure-kv -spec: - provider: - azurekv: - tenantId: "" - vaultUrl: "" - authSecretRef: - clientId: - name: azure-secret-sp - key: ClientID - clientSecret: - name: azure-secret-sp - key: ClientSecret - controlPlaneSelector: - names: - - - namespaceSelector: - names: - - default -``` - -##### Workload Identity - - -You can also use Entra Workload Identity Federation to access Azure Key Vault -without needing to manage secrets. - -To use Entra Workload ID with AKS: - - -1. Deploy the Spaces software into a [workload identity-enabled AKS cluster][workload-identity-enabled-aks-cluster]. -2. Retrieve the OIDC issuer URL of the AKS cluster: -```ini -az aks show --name "" \ - --resource-group "" \ - --query "oidcIssuerProfile.issuerUrl" \ - --output tsv -``` - -3. Use the Azure CLI to make a managed identity: -```ini -az identity create \ - --name "" \ - --resource-group "" \ - --location "" \ - --subscription "" -``` - -4. Look up the managed identity's client ID: -```ini -az identity show \ - --resource-group "" \ - --name "" \ - --query 'clientId' \ - --output tsv -``` - -5. Update your Spaces deployment to annotate the SharedSecrets service account with the associated Entra application client ID from the previous step: -```ini -up space upgrade ... \ - --set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."azure\.workload\.identity/client-id"="" \ - --set-string controlPlanes.sharedSecrets.pod.customLabels."azure\.workload\.identity/use"="true" -``` - -6. Determine the Spaces-generated `controlPlaneID` of your control plane. When you deploy a `kind: controlplane` in a Space, the Spaces software deploys a set of pods in a new namespace following the format `mxp--system`. -```ini -kubectl get controlplane -o jsonpath='{.status.controlPlaneID}' -``` - -7. Create a federated identity credential. -```ini -FEDERATED_IDENTITY_CREDENTIAL_NAME= -USER_ASSIGNED_IDENTITY_NAME= -RESOURCE_GROUP= -AKS_OIDC_ISSUER= -CONTROLPLANE_ID= -az identity federated-credential create --name ${FEDERATED_IDENTITY_CREDENTIAL_NAME} --identity-name "${USER_ASSIGNED_IDENTITY_NAME}" --resource-group "${RESOURCE_GROUP}" --issuer "${AKS_OIDC_ISSUER}" --subject system:serviceaccount:"mxp-${CONTROLPLANE_ID}-system:external-secrets-controller" --audience api://AzureADTokenExchange -``` - -8. Assign the `Key Vault Secrets User` role to the user-assigned managed identity that you created earlier. This step gives the managed identity permission to read secrets from the key vault: -```ini -az role assignment create \ - --assignee-object-id "${IDENTITY_PRINCIPAL_ID}" \ - --role "Key Vault Secrets User" \ - --scope "${KEYVAULT_RESOURCE_ID}" \ - --assignee-principal-type ServicePrincipal -``` - -:::important -You must manually restart a workload's pod when you add the annotation to the running pod's service account. The Entra workload identity mutating admission webhook requires a restart to inject the necessary environment. -::: - -8. Create a `SharedSecretStore`. Replace `vaultURL` with the URL of your Azure Key Vault instance. Replace `identityId` with the client ID of the managed identity created earlier: -```yaml {copy-lines="all"} -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: azure-kv -spec: - provider: - azurekv: - authType: WorkloadIdentity - vaultUrl: "" - controlPlaneSelector: - names: - - - namespaceSelector: - names: - - default -``` - - - - -#### Google Cloud Secret Manager - - - -You can configure access to Google Cloud Secret Manager using static credentials or workload identity. Below are instructions for configuring either. See the [ESO provider API][eso-provider-api] for more information. - -:::important -While the underlying ESO API supports more auth methods, static credentials are currently the only supported auth method in Cloud Spaces. -::: - -##### Static credentials - -1. Use the [GCP CLI][gcp-cli] to create access credentials. -2. Save the output in a file called `gcp-credentials.json`. -3. Store the access credentials in a secret in the same namespace as the `SharedSecretStore`. - ```shell {label="kube-create-secret",copy-lines="all"} - kubectl create secret \ - generic gcpsm-secret \ - -n default \ - --from-file=creds=./gcp-credentials.json - ``` - -4. Create a `SharedSecretStore`, referencing the secret created earlier. Replace `projectID` with your GCP Project ID: -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: gcp-sm -spec: - provider: - gcpsm: - auth: - secretRef: - secretAccessKeySecretRef: - name: gcpsm-secret - key: creds - projectID: - controlPlaneSelector: - names: - - - namespaceSelector: - names: - - default -``` - -:::tip -The example above maps a Shared Secret Store into a single namespace of a single control plane. Read [control plane selection][control-plane-selection] and [namespace selection][namespace-selection] to learn how to map into one or more namespaces of one or more control planes. -::: - - -##### Workload identity with Service Accounts to IAM Roles - - -To configure, grant the `roles/iam.workloadIdentityUser` role to the Kubernetes -service account in the control plane namespace to impersonate the IAM service -account. - -1. Ensure you've deployed Spaces on a [Workload Identity Federation-enabled][workload-identity-federation-enabled] GKE cluster. -2. Determine the Spaces-generated `controlPlaneID` of your control plane. When you deploy a `kind: controlplane` in a Space, the Spaces software deploys a set of pods in a new namespace following the format `mxp--system`. -```ini -kubectl get controlplane -o jsonpath='{.status.controlPlaneID}' -``` - -3. Create a GCP IAM service account with the [GCP CLI][gcp-cli-1]: -```ini -gcloud iam service-accounts create \ - --project= -``` - -4. Grant the IAM service account the role to access GCP Secret Manager: -```ini -SA_NAME= -IAM_SA_PROJECT_ID= -gcloud projects add-iam-policy-binding IAM_SA_PROJECT_ID \ - --member "serviceAccount:SA_NAME@IAM_SA_PROJECT_ID.iam.gserviceaccount.com" \ - --role roles/secretmanager.secretAccessor -``` - -5. When you enable the Shared Secrets feature, a service account gets created in each control plane for the External Secrets Operator. Apply a [GCP IAM policy binding][gcp-iam-policy-binding] to associate this service account with the desired GCP IAM role. -```ini -PROJECT_ID= -PROJECT_NUMBER= -CONTROLPLANE_ID= -gcloud projects add-iam-policy-binding projects/${PROJECT_ID} \ - --role "roles/iam.workloadIdentityUser" \ - --member=principal://iam.googleapis.com/projects/${PROJECT_NUMBER}/locations/global/workloadIdentityPools/${PROJECT_ID}.svc.id.goog/subject/ns/mxp-${CONTROLPLANE_ID}-system/sa/external-secrets-controller -``` - -6. Update your Spaces deployment to annotate the SharedSecrets service account with GCP IAM service account's identifier: -```ini -up space upgrade ... \ - --set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."iam\.gke\.io/gcp-service-account"="" -``` - -7. Create a `SharedSecretStore`. Replace `projectID` with your GCP Project ID: -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: gcp-sm -spec: - provider: - gcpsm: - projectID: - controlPlaneSelector: - names: - - - namespaceSelector: - names: - - default -``` - -:::tip -The example above maps a Shared Secret Store into a single namespace of a single control plane. Read [control plane selection][control-plane-selection-1] and [namespace selection][namespace-selection-2] to learn how to map into one or more namespaces of one or more control planes. -::: - -### Manage your secret distribution - -After you create your SharedSecretStore, you can define which secrets to -distribute using SharedExternalSecret: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedExternalSecret -metadata: - name: database-credentials - namespace: default -spec: - # Select the same control planes as your SharedSecretStore - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production - - externalSecretSpec: - refreshInterval: 1h - secretStoreRef: - name: aws-secrets # References the SharedSecretStore name - kind: ClusterSecretStore - target: - name: db-credentials - data: - - secretKey: username - remoteRef: - key: prod/database/credentials - property: username - - secretKey: password - remoteRef: - key: prod/database/credentials - property: password -``` - -This configuration: - -* Pulls database credentials from your external secret provider -* Creates secrets in all production control planes -* Refreshes the secrets every hour -* Creates a secret called `db-credentials` in each control plane - -When you create a SharedExternalSecret at the group level, Upbound's system -creates a template for the corresponding ClusterExternalSecrets in each selected -control plane. - -The example below simulates the ClusterExternalSecret that Upbound creates: - -```yaml -# Inside each matching control plane: -apiVersion: external-secrets.io/v1beta1 -kind: ClusterExternalSecret -metadata: - name: database-credentials -spec: - refreshInterval: 1h - secretStoreRef: - name: aws-secrets - kind: ClusterSecretStore - data: - - secretKey: username - remoteRef: - key: prod/database/credentials - property: username -``` - -The hierarchy in this configuration is: - -1. SharedExternalSecret (group level) defines what secrets to distribute -2. ClusterExternalSecret (control plane level) manages the distribution within - each control plane - -3. Kubernetes Secrets (namespace level) are created in specified namespaces - - -#### Control plane selection - -To configure which control planes in a group you want to project a SecretStore into, use the `spec.controlPlaneSelector` field. You can either use `labelSelectors` or the `names` of a control plane directly. A control plane matches if any of the label selectors match. - -This example matches all control planes in the group that have `environment: production` as a label: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: my-secret-store -spec: - controlPlaneSelector: - labelSelectors: - - matchLabels: - environment: production -``` - -You can use the more complex `matchExpressions` to match labels based on an expression. This example matches control planes that have label `environment: production` or `environment: staging`: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: my-secret-store -spec: - controlPlaneSelector: - labelSelectors: - - matchExpressions: - - { key: environment, operator: In, values: [production,staging] } -``` - -You can also specify the names of control planes directly: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: my-secret-store -spec: - controlPlaneSelector: - names: - - controlplane-dev - - controlplane-staging - - controlplane-prod -``` - - -#### Namespace selection - -To configure which namespaces **within each matched control plane** to project the secret store into, use `spec.namespaceSelector` field. The projected secret store only appears in the namespaces matching the provided selector. You can either use `labelSelectors` or the `names` of namespaces directly. A control plane matches if any of the label selectors match. - -**For all control planes matched by** `spec.controlPlaneSelector`, This example matches all namespaces in each selected control plane that have `team: team1` as a label: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: my-secret-store -spec: - namespaceSelector: - labelSelectors: - - matchLabels: - team: team1 -``` - -You can use the more complex `matchExpressions` to match labels based on an expression. This example matches namespaces that have label `team: team1` or `team: team2`: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: my-secret-store -spec: - namespaceSelector: - labelSelectors: - - matchExpressions: - - { key: team, operator: In, values: [team1,team2] } -``` - -You can also specify the names of namespaces directly: - -```yaml -apiVersion: spaces.upbound.io/v1alpha1 -kind: SharedSecretStore -metadata: - name: my-secret-store -spec: - namespaceSelector: - names: - - team1-namespace - - team2-namespace -``` - -## Configure secrets directly in a control plane - - -The above explains using group-scoped resources to project secrets into multiple control planes. You can also use ESO API types directly in a control plane as you would in standalone Crossplane or Kubernetes. - - -See the [ESO documentation][eso-documentation] for a full guide on using the API types. - -## Best practices - -When you configure secrets management in your Upbound environment, keep the -following best practices in mind: - -**Use consistent labeling schemes** across your control planes for predictable -and manageable secret distribution. - -**Organize your secrets** in your external provider using a hierarchical -structure that mirrors your control plane organization. - -**Set appropriate refresh intervals** based on your security requires and the -nature of the secrets. - -**Use namespace selection sparingly** to limit secret distribution to only the -namespaces that need them. - -**Use separate tokens for each environment.** Keep them in distinct -SharedSecretStores. Users could bypass SharedExternalSecret selectors by -creating ClusterExternalSecrets directly in control planes. This grants access to all -secrets available to that token. - -**Document your secret management architecture**, including which control planes -should receive which secrets. - -[control-plane-selection]: #control-plane-selection -[namespace-selection]: #namespace-selection -[control-plane-selection-1]: #control-plane-selection -[namespace-selection-2]: #namespace-selection - -[external-secrets-operator-eso]: https://external-secrets.io -[workload-identity-enabled-aks-cluster]: https://learn.microsoft.com/en-us/azure/aks/workload-identity-deploy-cluster -[eso-provider-api]: https://external-secrets.io/latest/provider/google-secrets-manager/ -[gcp-cli]: https://cloud.google.com/iam/docs/creating-managing-service-account-keys -[workload-identity-federation-enabled]: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#enable_on_clusters_and_node_pools -[gcp-cli-1]: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity#kubernetes-sa-to-iam -[gcp-iam-policy-binding]: https://cloud.google.com/sdk/gcloud/reference/iam/service-accounts/add-iam-policy-binding -[eso-documentation]: https://external-secrets.io/latest/introduction/getting-started/ diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/_category_.json b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/_category_.json deleted file mode 100644 index 5bf23bb0a..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/_category_.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "label": "Self-Hosted Spaces", - "position": 2, - "collapsed": true, - "customProps": { - "plan": "business" - } - -} - - diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/administer-features.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/administer-features.md deleted file mode 100644 index ce878014e..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/administer-features.md +++ /dev/null @@ -1,121 +0,0 @@ ---- -title: Administer features -sidebar_position: 12 -description: Enable and disable features in Spaces ---- - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Feature flags and their availability may vary by version. - -For detailed feature availability across versions, see the . -::: - -This guide shows how to enable or disable features in your self-hosted Space. - -## Shared secrets - -**Status:** Preview - -This feature is enabled by default in Cloud Spaces. - -To enable this feature in a self-hosted Space, set -`features.alpha.sharedSecrets.enabled=true` when installing the Space: - -```bash -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - ... - --set "features.alpha.sharedSecrets.enabled=true" \ -``` - - -## Observability - -**Status:** GA -**Available from:** Spaces v1.13+ - -This feature is enabled by default in Cloud Spaces. - - - -To enable this feature in a self-hosted Space, set -`observability.enabled=true` (`features.alpha.observability.enabled=true` before `v1.14.0`) when installing the Space: - -```bash -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - ... - --set "observability.enabled=true" \ -``` - -The observability feature collects telemetry data from user-facing control -plane workloads like: - -* Crossplane -* Providers -* Functions - -Self-hosted Spaces users can add control plane system workloads such as the -`api-server`, `etcd` by setting the -`observability.collectors.includeSystemTelemetry` Helm flag to true. - -### Sensitive data - -To avoid exposing sensitive data in the `SharedTelemetryConfig` resource, use -Kubernetes secrets to store the sensitive data and reference the secret in the -`SharedTelemetryConfig` resource. - -Create the secret in the same namespace/group as the `SharedTelemetryConfig` -resource. The example below uses `kubectl create secret` to create a new secret: - -```bash -kubectl create secret generic sensitive -n \ - --from-literal=apiKey='YOUR_API_KEY' -``` - -Next, reference the secret in the `SharedTelemetryConfig` resource: - -```yaml -apiVersion: observability.spaces.upbound.io/v1alpha1 -kind: SharedTelemetryConfig -metadata: - name: newrelic -spec: - configPatchSecretRefs: - - name: sensitive - key: apiKey - path: exporters.otlphttp.headers.api-key - controlPlaneSelector: - labelSelectors: - - matchLabels: - org: foo - exporters: - otlphttp: - endpoint: https://otlp.nr-data.net - headers: - api-key: dummy # This value is replaced by the secret value, can be omitted - exportPipeline: - metrics: [otlphttp] - traces: [otlphttp] - logs: [otlphttp] -``` - -The `configPatchSecretRefs` field in the `spec` specifies the secret `name`, -`key`, and `path` values to inject the secret value in the -`SharedTelemetryConfig` resource. - -## Shared backups - -As of Spaces `v.12.0`, this feature is enabled by default. - -To disable in a self-hosted Space, pass the `features.alpha.sharedBackup.enabled=false` as a Helm chart value. -`--set "features.alpha.sharedBackup.enabled=false"` - -## Query API - -**Status:** Preview -The Query API is available in the Cloud Space offering and enabled by default. - -Query API is required for self-hosted deployments with connected Spaces. See the -related [documentation][documentation] -to enable this feature. - -[documentation]: /spaces/howtos/query-api/ diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/attach-detach.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/attach-detach.md deleted file mode 100644 index 1465921cf..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/attach-detach.md +++ /dev/null @@ -1,198 +0,0 @@ ---- -title: Connect or disconnect a Space -sidebar_position: 12 -description: Enable and connect self-hosted Spaces to the Upbound console ---- -:::info API Version Information -This guide applies to **Spaces v1.8.0 and later** (Self-Hosted only). Connection to Upbound console requires Query API and RBAC to be enabled. - -For version-specific features and requirements, see the . Query API setup details, see [Deploy Query API infrastructure](./query-api.md). -::: - -:::important -This feature is in preview. Starting in Spaces `v1.8.0` and later, you must -deploy and [enable the Query API][enable-the-query-api] and [enable Upbound -RBAC][enable-upbound-rbac] to connect a Space to Upbound. -::: - -[Upbound][upbound] allows you to connect self-hosted Spaces and enables a streamlined operations and debugging experience in your Console. - -## Usage - -### Connect - -Before you begin, make sure you have: - -- An existing Upbound [organization][organization] in Upbound SaaS. -- The `up` CLI installed and logged into your organization -- `kubectl` installed with the kubecontext of your self-hosted Space cluster. -- A `token.json` license, provided by your Upbound account representative. -- You enabled the [Query API][query-api] in the self-hosted Space. - -Create a new `UPBOUND_SPACE_NAME`. If you don't create a name, `up` automatically generates one for you: - -```ini -export UPBOUND_SPACE_NAME=your-self-hosted-space -``` - -#### With up CLI - -:::tip -The command tries to connect the Space to the org account context pointed at by your `up` CLI profile. Make sure you've logged into Upbound SaaS with `up login -a ` before trying to connect the Space. -::: - -Connect the Space to the Console: - -```bash -up space connect "${UPBOUND_SPACE_NAME}" -``` - -This command installs a Connect agent, creates a service account, and configures permissions in your Upbound cloud organization in the `upbound-system` namespace of your Space. - -#### With Helm - -Export your Upbound org account name to an environment variable called `UPBOUND_ORG_NAME`. You can see this value by running `up org list` after logging on to Upbound. - -```ini -export UPBOUND_ORG_NAME=your-org-name -``` - -Create a new robot token and export it to an environment variable called `UPBOUND_TOKEN`: - -```bash -up robot create "${UPBOUND_SPACE_NAME}" --description="Robot used for authenticating Space '${UPBOUND_SPACE_NAME}' with Upbound Connect" -export UPBOUND_TOKEN=$(up robot token create "$UPBOUND_SPACE_NAME" "$UPBOUND_SPACE_NAME" --file - | jq -r '.token') -``` - -:::note -Follow the [`jq` installation guide][jq-install] if your machine doesn't include -it by default. -::: - -Create a secret containing the robot token: - -```bash -kubectl create secret -n upbound-system generic connect-token --from-literal=token=${UPBOUND_TOKEN} -``` - -Specify your username and password for the helm OCI registry: - -```bash -jq -r .token $SPACES_TOKEN_PATH | helm registry login xpkg.upbound.io -u $(jq -r .accessId $SPACES_TOKEN_PATH) --password-stdin -``` - -In the same cluster where you installed the Spaces software, install the Upbound connect agent with your token secret. - -```bash -helm -n upbound-system upgrade --install agent \ - oci://xpkg.upbound.io/spaces-artifacts/agent \ - --version "0.0.0-441.g68777b9" \ - --set "image.repository=xpkg.upbound.io/spaces-artifacts/agent" \ - --set "registration.image.repository=xpkg.upbound.io/spaces-artifacts/register-init" \ - --set "imagePullSecrets[0].name=upbound-pull-secret" \ - --set "registration.enabled=true" \ - --set "space=${UPBOUND_SPACE_NAME}" \ - --set "organization=${UPBOUND_ORG_NAME}" \ - --set "tokenSecret=connect-token" \ - --wait -``` - - -#### View your Space in the Console - - -Go to the [Upbound Console][upbound-console], log in, and choose the newly connected Space from the Space selector dropdown. - -![A screenshot of the Upbound Console space selector dropdown](/img/attached-space.png) - -:::note -You can only connect a self-hosted Space to a single organization at a time. -::: - -### Disconnect - -#### With up CLI - -To disconnect a self-hosted Space or a deleted self-hosted Space, run the following command: - -```bash -up space disconnect "${UPBOUND_SPACE_NAME}" -``` - -If the Space still exists, this command uninstalls the Connect agent and deletes the associated service account and permissions. - -#### With Helm - -To disconnect a self-hosted Space or a deleted self-hosted Space, run the following command: - -```bash -helm delete -n upbound-system agent -``` - -Clean up the robot token you created for this self-hosted Space: - -```bash -up robot delete "${UPBOUND_SPACE_NAME}" --force -``` - -## Security model - -### Architecture - -![An architectural diagram of a self-hosted Space attached to Upbound](/img/console-attach-architecture.jpg) - -:::note -This diagram illustrates a self-hosted Space running in AWS connected to the global Upbound Console. The same model applies to a Space running in AKS, GKE, or other Kubernetes environments. -::: - -### Data path - -Upbound uses a Pub/Sub model over TLS to communicate between Upbound's global -console and your self-hosted Space. Self-hosted Spaces establishes a secure -connection with `connect.upbound.io`. `api.upbound.io`, and `auth.upbound.io` and subscribes to an -endpoint. - -:::important -Add `connect.upbound.io`, `api.upbound.io`, and `auth.upbound.io` to your organization's list of -allowed endpoints. -::: - -The -Upbound Console communicates to the Space through that endpoint. The data flow -is: - -1. Users sign in to the Upbound Console, redirecting to authenticate with an organization's configured Identity Provider via SSO. -2. Once authenticated, actions in the Console, like listing control planes or specific resource types from a control plane. These requests post as messages to the Upbound Connect service. -3. A user's self-hosted Space polls the Upbound Connect service periodically for new messages, verifies the authenticity of the message, and fulfills the request contained. -4. A user's self-hosted Space returns the results of the request to the Upbound Connect service and the Console renders the results in the user's browser session. - -**Upbound never stores data originated from a self-hosted Space.** The data is transient and only exposed in the user's browser session. The Console needs this data to render your resources and control planes in the UI. - -### Data transmitted - -Users interact with the Upbound Console to generate request queries to the Upbound Connect Service while exploring, managing, or debugging a self-hosted Space. These requests send data back to the user's browser session in the Console, including: - -* Metadata for the Space -* Metadata for control planes in the state -* Configuration manifests for various resource types within your Space: Crossplane managed resources, composite resources, composite resource claims, Upbound shared secrets, Upbound shared backups, Crossplane providers, ProviderConfigs, Configurations, and Crossplane Composite Functions. - -:::important -This data only concerns resource configuration. The data _inside_ the managed -resource in your Space isn't visible at any point. -::: - -**Upbound can't see your data.** Upbound doesn't have access to session-based data rendered for your users in the Upbound Console. Upbound has no information about your self-hosted Space, other than that you've connected a self-hosted Space. - -### Threat vectors - -Only users with editor or administrative permissions can make changes using the Console like creating or deleting control planes or groups. - - -[enable-the-query-api]: /spaces/howtos/self-hosted/query-api -[enable-upbound-rbac]: /manuals/platform/concepts/authorization/upbound-rbac -[upbound]: /manuals/console/upbound-console -[organization]: /manuals/platform/concepts/identity-management/organizations -[query-api]: /spaces/howtos/self-hosted/query-api -[jq-install]: https://jqlang.org/download/ - -[upbound-console]: https://console.upbound.io diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/billing.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/billing.md deleted file mode 100644 index 145ff9f03..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/billing.md +++ /dev/null @@ -1,307 +0,0 @@ ---- -title: Self-Hosted Space Billing -sidebar_position: 50 -description: A guide for how billing works in an Upbound Space ---- - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Billing models may evolve between versions; see Capacity Licensing for alternative models. - -For version-specific features and capacity-based licensing details, see the . reference specifications, see [Capacity Licensing](/spaces/howtos/self-hosted/capacity-licensing). -::: - -Spaces are a self-hosting feature of Upbound's [flagship product][flagship-product] for platform teams to deploy control planes in their self-managed environments. You can install Spaces into any Kubernetes cluster in your own cloud account, on-premises data center, or on the edge. The pricing usage-based and requires an Upbound account and subscription. The billing unit is a `Loop`. - - -:::info -This guide describes the traditional usage-based billing model using object storage. disconnected or air-gapped environments, consider [Capacity Licensing](/spaces/howtos/self-hosted/capacity-licensing), which provides a simpler fixed-capacity model with local usage tracking. -::: - -## Billing details - -Spaces **aren't connected** to Upbound's global service. To enable proper billing, the Spaces software ships a controller whose responsibility is to collect billing data from your Spaces deployment. The collection and storage of your billing data happens expressly locally within your environment; no data is automatically emitted back to Upbound's global service. This data gets written to object storage of your choice. AWS, Azure, and GCP are currently supported. The Spaces software exports billing usage software every ~15 seconds. - -Spaces customers must periodically provide the billing data to Upbound. Contact your Upbound sales representative to learn more. - - - -## AWS S3 - - - -Configure billing to write to an S3 bucket by providing the following values at install-time. Create an S3 bucket if you don't already have one. - -### IAM policy - -You must create an IAM policy and attach it to the IAM user (for static credentials) or IAM role (for assumed -roles). - -The policy example below enables the necessary S3 permissions: - -```json -{ - "Sid":"EnableS3Permissions", - "Effect":"Allow", - "Action": [ - "s3:PutObject", - "s3:GetObject", - "s3:ListBucket", - "s3:DeleteObject" - ], - "Resource": [ - "arn:aws:s3:::your-bucket-name/*", - "arn:aws:s3:::your-bucket-name" - ] -}, -{ - "Sid": "ListBuckets", - "Effect": "Allow", - "Action": "s3:ListAllMyBuckets", - "Resource": "*" -} -``` - -### Authentication with static credentials - -In your Spaces install cluster, create a secret in the `upbound-system` -namespace. This secret must contain keys `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`. - -```bash -kubectl create secret generic billing-credentials -n upbound-system \ - --from-literal=AWS_ACCESS_KEY_ID= \ - --from-literal=AWS_SECRET_ACCESS_KEY= -``` - -Install the Space software, providing the billing details to the other required values. - - - - - - -```bash {hl_lines="2-6"} -helm -n upbound-system upgrade --install spaces ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=aws" \ - --set "billing.storage.aws.region=" \ - --set "billing.storage.aws.bucket=" \ - --set "billing.storage.secretRef.name=billing-credentials" - ... -``` - - - - - -```bash {hl_lines="2-6"} -up space init ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=aws" \ - --set "billing.storage.aws.region=" \ - --set "billing.storage.aws.bucket=" \ - --set "billing.storage.secretRef.name=billing-credentials" - ... -``` - - - - - - - -### Authentication with an IAM role - - -To use short-lived credentials with an assumed IAM role, create an IAM role with -established trust to the `vector`-serviceaccount in all `mxp-*-system` -namespaces. - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "arn:aws:iam::12345678912:oidc-provider/oidc.eks.eu-west-2.amazonaws.com/id/YOUROIDCPROVIDERID" - }, - "Action": "sts:AssumeRoleWithWebIdentity", - "Condition": { - "StringLike": { - "oidc.eks.eu-west-2.amazonaws.com/id/YOUROIDCPROVIDERID:sub": "system:serviceaccount:mxp-*-system:vector" - } - } - } - ] -} -``` - -For more information about workload identities, review the [Workload-identity -Configuration documentation][workload-identity-configuration-documentation] - - - - - - -```bash {hl_lines="2-7"} -helm -n upbound-system upgrade --install spaces ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=aws" \ - --set "billing.storage.aws.region=" \ - --set "billing.storage.aws.bucket=" \ - --set "billing.storage.secretRef.name=" \ - --set "controlPlanes.vector.serviceAccount.customAnnotations[eks.amazonaws.com/role-arn]=" - ... -``` - - - - - -```bash {hl_lines="2-7"} -up space init ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=aws" \ - --set "billing.storage.aws.region=" \ - --set "billing.storage.aws.bucket=" \ - --set "billing.storage.secretRef.name=" \ - --set "controlPlanes.vector.serviceAccount.customAnnotations[eks.amazonaws.com/role-arn]=" - ... -``` - - - - - - -*Note*: You must set `billing.storage.secretRef.name` to an empty string when using an assumed role. - - -## Azure blob storage - -Configure billing to write to a blob in Azure by providing the following values at install-time. Create a storage account and container if you don't already have one. - -Then, on the cluster where you installed the Spaces software, create a secret in `upbound-system`. This secret must contain keys `AZURE_TENANT_ID`, `AZURE_CLIENT_ID`, and `AZURE_CLIENT_SECRET`. Make sure to replace the values with details generated from your Azure account. - -```bash -kubectl create secret generic billing-credentials -n upbound-system \ - --from-literal=AZURE_TENANT_ID= \ - --from-literal=AZURE_CLIENT_ID= \ - --from-literal=AZURE_CLIENT_SECRET= -``` - -Install the Space software, providing the billing details to the other required values. - - - - - - -```bash {hl_lines="2-6"} -helm -n upbound-system upgrade --install spaces ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=azure" \ - --set "billing.storage.azure.storageAccount=" \ - --set "billing.storage.azure.container=" \ - --set "billing.storage.secretRef.name=billing-credentials" - ... -``` - - - - - -```bash {hl_lines="2-6"} -up space init ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=azure" \ - --set "billing.storage.azure.storageAccount=" \ - --set "billing.storage.azure.container=" \ - --set "billing.storage.secretRef.name=billing-credentials" - ... -``` - - - - - - - -## GCP Cloud Storage Buckets - - -Configure billing to write to a Cloud Storage bucket in GCP by providing the following values at install-time. Create a bucket if you don't already have one. - -Then, on the cluster where you installed the Spaces software, create a secret in `upbound-system`. This secret must contain the key `google_application_credentials`. Make sure to replace the value with a GCP service account key JSON generated from your GCP account. - -```bash -kubectl create secret generic billing-credentials -n upbound-system \ - --from-literal=google_application_credentials= -``` - -Install the Space software, providing the billing details to the other required values. - - - - - - -```bash {hl_lines="2-5"} -helm -n upbound-system upgrade --install spaces ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=gcp" \ - --set "billing.storage.gcp.bucket=" \ - --set "billing.storage.secretRef.name=billing-credentials" - ... -``` - - - - - -```bash {hl_lines="2-5"} -up space init ... \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=gcp" \ - --set "billing.storage.gcp.bucket=" \ - --set "billing.storage.secretRef.name=billing-credentials" - ... -``` - - - - - -## Export billing data to send to Upbound - -To prepare the billing data to send to Upbound, do the following: - -Ensure the current context of your kubeconfig points at the Spaces cluster. Then, run the [export][export] command. - - -:::important -Your current CLI must have read access to the bucket to run this command. -::: - - -The example below exports billing data stored in AWS: - -```bash -up space billing export --provider=aws \ - --bucket=spaces-billing-bucket \ - --account=your-upbound-org \ - --billing-month=2024-07 \ - --force-incomplete -``` - -The command creates a billing report that's zipped up in your current working directory. Send the output to your Upbound sales representative. - - -You can find full instructions and command options in the up [CLI reference][cli-reference] docs. - - -[export]: /reference/cli-reference -[cli-reference]: /reference/cli-reference -[flagship-product]: https://www.upbound.io/platform -[workload-identity-configuration-documentation]: https://docs.upbound.io/operate/accounts/authentication/oidc-configuration diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/capacity-licensing.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/capacity-licensing.md deleted file mode 100644 index a1dc6c101..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/capacity-licensing.md +++ /dev/null @@ -1,591 +0,0 @@ ---- -title: Capacity Licensing -sidebar_position: 60 -description: A guide for capacity-based licensing in self-hosted Spaces -plan: "enterprise" ---- - - - - - -This guide explains how to configure and monitor capacity-based licensing in -self-hosted Upbound Spaces. Capacity licensing provides a simplified billing -model for disconnected or air-gapped environments where automated usage -reporting isn't possible. - -:::info -Spaces `v1.15` and later support Capacity Licensing as an -alternative to the traditional usage-based billing model described in the -[Self-Hosted Space Billing][space-billing] guide. -::: - -## Overview - -Capacity licensing allows organizations to purchase a fixed capacity of -resources upfront. The Spaces software tracks usage locally and provides -visibility into consumption against your purchased capacity, all without -requiring external connectivity to Upbound's services. - -### Key concepts - -- **Resource Hours**: The primary billing unit representing all resources - managed by Crossplane over time. This includes managed resources, - composites (XRs), claims (XRCs), and all composed resources - essentially - everything Crossplane manages. The system aggregates resource counts over each - hour using trapezoidal integration to accurately account for changes in - resource count throughout the hour. -- **Operations**: The number of Operations invoked by Crossplane. -- **License Capacity**: The total amount of resource hours and operations included in your license. -- **Usage Tracking**: Continuous monitoring of consumption with real-time utilization percentages. - -### How it works - -1. Upbound provides you with a license file containing your purchased capacity -2. You configure a `SpaceLicense` in your Spaces cluster -3. The metering system automatically: - - Collects measurements from all control planes every minute - - Aggregates usage data into hourly intervals - - Stores usage data in a local PostgreSQL database - - Updates the `SpaceLicense` status with current consumption - -## Prerequisites - -### PostgreSQL database - -Capacity licensing requires a PostgreSQL database to store usage measurements. You can use: - -- An existing PostgreSQL instance -- A managed PostgreSQL service (AWS RDS, Azure Database, Google Cloud SQL) -- A PostgreSQL instance deployed in your cluster - -The database must be: - -- Accessible from the Spaces cluster -- Configured with a dedicated database and credentials - -#### Example: Deploy PostgreSQL with CloudNativePG - -If you don't have an existing PostgreSQL instance, you can deploy one in your -cluster using [CloudNativePG] (CNPG). CNPG is a Kubernetes operator that -manages PostgreSQL clusters. - -1. Install the CloudNativePG operator: - -```bash -kubectl apply -f https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.24/releases/cnpg-1.24.1.yaml -``` - -2. Create a PostgreSQL cluster for metering: - -```yaml -apiVersion: postgresql.cnpg.io/v1 -kind: Cluster -metadata: - name: metering-postgres - namespace: upbound-system -spec: - instances: 1 - imageName: ghcr.io/cloudnative-pg/postgresql:16 - bootstrap: - initdb: - database: metering - owner: metering - postInitApplicationSQL: - - ALTER ROLE "metering" CREATEROLE; - storage: - size: 5Gi - # Optional: Configure resources for production use - # resources: - # requests: - # memory: "512Mi" - # cpu: "500m" - # limits: - # memory: "1Gi" - # cpu: "1000m" ---- -apiVersion: v1 -kind: Secret -metadata: - name: metering-postgres-app - namespace: upbound-system - labels: - cnpg.io/reload: "true" -stringData: - username: metering - password: "your-secure-password-here" -type: kubernetes.io/basic-auth -``` - -```bash -kubectl apply -f metering-postgres.yaml -``` - -3. Wait for the cluster to be ready: - -```bash -kubectl wait --for=condition=ready cluster/metering-postgres -n upbound-system --timeout=5m -``` - -4. You can access the PostgreSQL cluster at `metering-postgres-rw.upbound-system.svc.cluster.local:5432`. - -:::tip -For production deployments, consider: -- Increasing `instances` to 3 for high availability -- Configuring [backups] to object storage -- Setting appropriate resource requests and limits -- Using a dedicated storage class with good I/O performance -::: - -### License file - -Contact your Upbound sales representative to obtain a license file for your organization. The license file contains: -- Your unique license ID -- Purchased capacity (resource hours and operations) -- License validity period -- Any usage restrictions (such as cluster UUID pinning) - -## Configuration - -### Step 1: Create database credentials secret - -Create a Kubernetes secret containing your PostgreSQL password using the pgpass format: - -```bash -# Create a pgpass file with format: hostname:port:database:username:password -# Note: The database name and username must be 'metering' -# For CNPG clusters, use the read-write service endpoint: -rw..svc.cluster.local -echo "metering-postgres-rw.upbound-system.svc.cluster.local:5432:metering:metering:your-secure-password-here" > pgpass - -# Create the secret -kubectl create secret generic metering-postgres-credentials \ - -n upbound-system \ - --from-file=pgpass=pgpass - -# Clean up the pgpass file -rm pgpass -``` - -The secret must contain a single key: -- **`pgpass`**: PostgreSQL password file in the format `hostname:port:metering:metering:password` - -:::note -The database name and username are fixed as `metering`. Ensure your PostgreSQL instance has a database named `metering` with a user `metering` that has appropriate permissions. - -If you deployed PostgreSQL using CNPG as shown in the example above, the password should match what you set in the `metering-postgres-app` secret. -::: - -:::tip -For production environments, consider using external secret management solutions: -- [External Secrets Operator][eso] -- Cloud-specific secret managers (AWS Secrets Manager, Azure Key Vault, GCP Secret Manager) -::: - -### Step 2: Enable metering in Spaces - -Enable the metering feature when installing or upgrading Spaces: - - - - - -```bash {hl_lines="2-7"} -helm -n upbound-system upgrade --install spaces ... \ - --set "metering.enabled=true" \ - --set "metering.storage.postgres.connection.url=metering-postgres-rw.upbound-system.svc.cluster.local:5432" \ - --set "metering.storage.postgres.connection.credentials.secret.name=metering-postgres-credentials" \ - --set "metering.interval=1m" \ - --set "metering.workerCount=10" \ - --set "metering.aggregationInterval=1h" \ - --set "metering.measurementRetentionDays=30" - ... -``` - - - - - -```bash {hl_lines="2-7"} -up space init ... \ - --set "metering.enabled=true" \ - --set "metering.storage.postgres.connection.url=metering-postgres-rw.upbound-system.svc.cluster.local:5432" \ - --set "metering.storage.postgres.connection.credentials.secret.name=metering-postgres-credentials" \ - --set "metering.interval=1m" \ - --set "metering.workerCount=10" \ - --set "metering.aggregationInterval=1h" \ - --set "metering.measurementRetentionDays=30" - ... -``` - - - - - -#### Configuration options - -| Option | Default | Description | -|--------|---------|-------------| -| `metering.enabled` | `false` | Enable the metering feature | -| `metering.storage.postgres.connection.url` | - | PostgreSQL host and port (format: `host:port`, required) | -| `metering.storage.postgres.connection.credentials.secret.name` | - | Name of the secret containing PostgreSQL credentials (required) | -| `metering.storage.postgres.connection.sslmode` | `require` | SSL mode for PostgreSQL connection (`disable`, `allow`, `prefer`, `require`, `verify-ca`, `verify-full`) | -| `metering.storage.postgres.connection.ca.name` | - | Name of the secret containing CA certificate for TLS connections (optional) | -| `metering.interval` | `1m` | How often to collect measurements from control planes | -| `metering.workerCount` | `10` | Number of parallel workers for measurement collection | -| `metering.aggregationInterval` | `1h` | How often to aggregate measurements into hourly usage data | -| `metering.measurementRetentionDays` | `30` | Days to retain raw measurements (0 = indefinite) | - - -#### Database sizing and retention - -The metering system uses two PostgreSQL tables to track usage: - -**Raw measurements table** (`measurements`): -- Stores point-in-time snapshots collected every measurement interval (default: 1 minute) -- One row per control plane per interval -- Affected by the `measurementRetentionDays` setting -- Used for detailed auditing and troubleshooting - -**Aggregated usage table** (`hourly_usage`): -- Stores hourly aggregated resource hours and operations per license -- One row per hour per license -- Never deleted (required for accurate license tracking) -- Grows much slower than raw measurements - -##### Storage sizing guidelines - -Estimate your PostgreSQL storage needs based on these factors: - - -| Deployment Size | Control Planes | Measurement Interval | Retention Days | Raw Measurements | Indexes & Overhead | Total Storage | -|----------------|----------------|---------------------|----------------|------------------|-------------------|---------------| -| Small | 10 | 1m | 30 | ~85 MB | ~40 MB | **~125 MB** | -| Medium | 50 | 1m | 30 | ~430 MB | ~215 MB | **~645 MB** | -| Large | 200 | 1m | 30 | ~1.7 GB | ~850 MB | **~2.5 GB** | -| Large (90-day retention) | 200 | 1m | 90 | ~5.2 GB | ~2.6 GB | **~7.8 GB** | - -The aggregated hourly usage table adds minimal overhead (~50 KB per year per license). - -**Formula for custom calculations**: -``` -Daily measurements per control plane = (24 * 60) / interval_minutes -Total rows = control_planes × daily_measurements × retention_days -Storage (MB) ≈ (total_rows × 200 bytes) / 1,048,576 × 1.5 (with indexes) -``` - -##### Retention behavior - -The `measurementRetentionDays` setting controls retention of raw measurement data: - -- **Default: 30 days** - Balances audit capabilities with storage efficiency -- **Set to 0**: Disables cleanup, retains all raw measurements indefinitely -- **Cleanup runs**: Every aggregation interval (default: hourly) -- **What's kept forever**: Aggregated hourly usage data (needed for license tracking) -- **What's cleaned up**: Raw point-in-time measurements older than retention period - -**Recommendations**: -- **30 days**: For most troubleshooting and short-term auditing -- **60 to 90 days**: For environments requiring extended audit trails -- **Unlimited (0)**: Only for environments with ample storage or specific compliance requirements - -:::note -Increasing retention period linearly increases storage requirements for raw measurements. The aggregated hourly data is always retained regardless of this setting. -::: - -### Step 3: Apply your license - -Use the `up` CLI to apply your license file: - -```bash -up space license apply /path/to/license.json -``` - -This command automatically: -- Creates a secret containing your license file in the `upbound-system` namespace -- Creates the `SpaceLicense` resource configured to use that secret - -:::tip -You can specify a different namespace for the license secret using the `--namespace` flag: -```bash -up space license apply /path/to/license.json --namespace my-namespace -``` -::: - -
-Alternative: Manual kubectl approach - -If you prefer not to use the `up` CLI, you can manually create the resources: - -1. Create the license secret: - -```bash -kubectl create secret generic space-license \ - -n upbound-system \ - --from-file=license.json=/path/to/license.json -``` - -2. Create the SpaceLicense resource: - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceLicense -metadata: - name: space -spec: - secretRef: - name: space-license - namespace: upbound-system - key: license.json -``` - -```bash -kubectl apply -f spacelicense.yaml -``` - -:::important -You **must** name the `SpaceLicense` resource `space`. This resource is a singleton and only one can exist in the cluster. -::: - -
- -## Monitoring usage - -### Check license status - -Use the `up` CLI to view your license details and current usage: - -```bash -up space license show -``` - -Example output: - -``` -Spaces License Status: Valid (License is valid) - -Created: 2024-01-01T00:00:00Z -Expires: 2025-01-01T00:00:00Z - -Plan: enterprise - -Resource Hour Limit: 1000000 -Operation Limit: 500000 - -Enabled Features: -- spaces -- query-api -- backup-restore -``` - -The output shows: -- License validity status and any validation messages -- Creation and expiration dates -- Your commercial plan tier -- Capacity limits for resource hours and operations -- Enabled features in your license -- Any restrictions (such as cluster UUID pinning) - -
-Alternative: View detailed status with kubectl - -For detailed information including usage statistics, use kubectl: - -```bash -kubectl get spacelicense space -o yaml -``` - -Example output showing usage data: - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceLicense -metadata: - name: space -spec: - secretRef: - name: space-license - namespace: upbound-system -status: - conditions: - - type: LicenseValid - status: "True" - reason: Valid - message: "License is valid" - id: "lic_abc123xyz" - plan: "enterprise" - capacity: - resourceHours: 1000000 - operations: 500000 - usage: - resourceHours: 245680 - operations: 12543 - resourceHoursUtilization: "24.57%" - operationsUtilization: "2.51%" - firstMeasurement: "2024-01-15T10:00:00Z" - lastMeasurement: "2024-02-10T14:30:00Z" - createdAt: "2024-01-01T00:00:00Z" - expiresAt: "2025-01-01T00:00:00Z" - enabledFeatures: - - "spaces" - - "query-api" - - "backup-restore" -``` - -
- -### Understanding the status fields - -| Field | Description | -|-------|-------------| -| `status.id` | Unique license identifier | -| `status.plan` | Your commercial plan (community, standard, enterprise) | -| `status.capacity` | Total capacity included in your license | -| `status.usage.resourceHours` | Total resource hours consumed | -| `status.usage.operations` | Total operations performed | -| `status.usage.resourceHoursUtilization` | Percentage of resource hours capacity used | -| `status.usage.operationsUtilization` | Percentage of operations capacity used | -| `status.usage.firstMeasurement` | When usage tracking began | -| `status.usage.lastMeasurement` | Most recent usage update | -| `status.expiresAt` | License expiration date | - -### Monitor with kubectl - -Watch your license utilization in real-time: - -```bash -kubectl get spacelicense space -w -``` - -Short output format: - -``` -NAME PLAN VALID REASON AGE -space enterprise True Valid 45d -``` - -## Managing licenses - -### Updating your license - -To update your license with a new license file (for example, when renewing or upgrading capacity), apply the new license: - -```bash -up space license apply /path/to/new-license.json -``` - -This command replaces the existing license secret and updates the SpaceLicense resource. - -### Removing a license - -To remove a license: - -```bash -up space license remove -``` - -This command: -- Prompts for confirmation before proceeding -- Removes the license secret - -To skip the confirmation prompt, use the `--force` flag: - -```bash -up space license remove --force -``` - -## Troubleshooting - -### License not updating - -If the license status doesn't update with usage data: - -1. **Check metering controller logs**: - ```bash - kubectl logs -n upbound-system deployment/spaces-controller -c metering - ``` - -2**Check if the system captures your measurements**: - - ```bash - # Connect to PostgreSQL and query the measurements table - kubectl exec -it -- psql -U -d \ - -c "SELECT COUNT(*) FROM measurements WHERE timestamp > NOW() - INTERVAL '1 hour';" - ``` - -### High utilization warnings - -If you're approaching your capacity limits: - -1. **Review resource usage** by control plane to identify high consumers -2. **Contact your Upbound sales representative** to discuss capacity expansion -3. **Optimize managed resources** by cleaning up unused resources - -### License validation failures - -If your license shows as invalid: - -1. **Check expiration date**: `kubectl get spacelicense space -o jsonpath='{.status.expiresAt}'` -2. **Verify license file integrity**: Ensure the secret contains valid JSON -3. **Check for cluster UUID restrictions**: Upbound pins some licenses to - specific clusters -4. **Review controller logs** for detailed error messages - -## Differences from traditional billing - -### Capacity licensing - -- ✅ Works in disconnected environments -- ✅ Provides real-time usage visibility -- ✅ No manual data export required -- ✅ Requires PostgreSQL database -- ✅ Fixed capacity model - -### Traditional billing (object storage) - - -- ❌ Requires periodic manual export -- ❌ Delayed visibility into usage -- ✅ Works with S3/Azure Blob/GCS -- ❌ Requires cloud storage access -- ✅ Pay-as-you-go model - -## Best practices - -### Database management - -1. **Regular backups**: Back up your metering database regularly to preserve usage history -2. **Monitor database size**: Set appropriate retention periods to manage storage growth -3. **Use managed databases**: Consider managed PostgreSQL services for production -4. **Connection pooling**: Use connection pooling for better performance at scale - -### License management - -1. **Monitor utilization**: Set up alerts before reaching 80% capacity -2. **Plan renewals early**: Start renewal discussions 60 days before expiration -3. **Track grace periods**: Note the `gracePeriodEndsAt` date for planning -4. **Secure license files**: Treat license files as sensitive credentials - -### Operational monitoring - -1. **Set up dashboards**: Create Grafana dashboards for usage trends -2. **Enable alerting**: Configure alerts for high utilization and expiration -3. **Regular audits**: Periodically review usage patterns across control planes -4. **Capacity planning**: Use historical data to predict future capacity needs - -## Next steps - -- Learn about [Observability] to monitor your Spaces deployment -- Explore [Backup and Restore][backup-restore] to protect your control plane data -- Review [Self-Hosted Space Billing][space-billing] for the traditional billing model -- Contact [Upbound Sales][sales] to discuss capacity licensing options - - -[space-billing]: /spaces/howtos/self-hosted/billing -[CloudNativePG]: https://cloudnative-pg.io/ -[backups]: https://cloudnative-pg.io/documentation/current/backup_recovery/ -[backup-restore]: /spaces/howtos/backup-and-restore -[sales]: https://www.upbound.io/contact -[eso]: https://external-secrets.io/ -[Observability]: /spaces/howtos/observability - - diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/certs.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/certs.md deleted file mode 100644 index e517c250e..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/certs.md +++ /dev/null @@ -1,274 +0,0 @@ ---- -title: Istio Ingress Gateway With Custom Certificates -sidebar_position: 20 -description: Install self hosted spaces using istio ingress gateway in a Kind cluster ---- - -:::important -Prerequisites - -- Spaces Token available in a file -- `docker login xpkg.upbound.io -u -p ` -- [`istioctl`][istioctl] installation -- `jq` installation -::: - -This document describes the installation of a self hosted space on an example `kind` -cluster along with Istio Ingress Gateway and certificates. The service mesh and certificates -installation is transferable to self hosted spaces in arbitrary clouds. - -## Create a kind cluster - -```shell -cat < -## Install Istio - - - -:::important -This is an example and not recommended for use in production. -::: - - -1. Create the `istio-values.yaml` file - -```shell -cat > istio-values.yaml << 'EOF' -apiVersion: install.istio.io/v1alpha1 -kind: IstioOperator -spec: - hub: gcr.io/istio-release - components: - ingressGateways: - - enabled: true - name: istio-ingressgateway - k8s: - nodeSelector: - ingress-ready: "true" - overlays: - - apiVersion: apps/v1 - kind: Deployment - name: istio-ingressgateway - patches: - - path: spec.template.spec.containers.[name:istio-proxy].ports - value: - - containerPort: 8080 - hostPort: 80 - - containerPort: 8443 - hostPort: 443 -EOF -``` - -2. Install istio via `istioctl` - -```shell -istioctl install -f istio-values.yaml -``` - -## Create a self-signed Certificate via cert-manager - -:::important -This Certificate manifest creates a self-signed certificate for a proof of concept -environment and isn't recommended for production use cases. -::: - -1. Create the upbound-system namespace - -```shell -kubectl create namespace upbound-system -``` - -2. Create a self-signed certificate - -```shell -cat < -## Create an Istio Gateway and VirtualService - - - - -Configure an Istio Gateway and VirtualService to use TLS passthrough. - - -```shell -cat < spaces-values.yaml << 'EOF' -# Configure spaces-router to use the TLS secret created by cert-manager. -externalTLS: - tlsSecret: - name: example-tls-secret - caBundleSecret: - name: example-tls-secret - key: ca.crt -ingress: - provision: false - # Allow Istio Ingress Gateway to communicate to the spaces-router - namespaceLabels: - kubernetes.io/metadata.name: istio-system - podLabels: - app: istio-ingressgateway - istio: ingressgateway -EOF -``` - -2. Set the required environment variables - -```shell -# Update these according to your account/token file -export SPACES_TOKEN_PATH= -export UPBOUND_ACCOUNT= -# Replace SPACES_ROUTER_HOST with your Spaces ingress hostname -export SPACES_ROUTER_HOST="proxy.upbound-127.0.0.1.nip.io" -export SPACES_VERSION="1.14.1" -``` - -3. Create an image pull secret for Spaces - -```shell -kubectl -n upbound-system create secret docker-registry upbound-pull-secret \ - --docker-server=https://xpkg.upbound.io \ - --docker-username="$(jq -r .accessId $SPACES_TOKEN_PATH)" \ - --docker-password="$(jq -r .token $SPACES_TOKEN_PATH)" -``` - -4. Install the Spaces helm chart - -```shell -# Login to xpkg.upbound.io -jq -r .token $SPACES_TOKEN_PATH | helm registry login xpkg.upbound.io -u $(jq -r .accessId $SPACES_TOKEN_PATH) --password-stdin - -# Install spaces helm chart -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "${SPACES_VERSION}" \ - --set "ingress.host=${SPACES_ROUTER_HOST}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "authentication.hubIdentities=true" \ - --set "authorization.hubRBAC=true" \ - --wait -f spaces-values.yaml -``` - -## Validate the installation - -Successful access of the `up` command to interact with your self hosted space validates the -certificate installation. - -- `up ctx .` - -You can also issue control plane creation, list and deletion commands. - -- `up ctp create cert-test` -- `up ctp list` -- `up ctx disconnected/kind-kind/default/cert-test && kubectl get namespace` -- `up ctp delete cert-test` - -:::note -If `up` can't connect to your control plane, follow [this guide to create a new profile][up-profile]. -::: - -## Troubleshooting - -Examine your certificate with `openssl`: - -```shell -openssl s_client -connect proxy.upbound-127.0.0.1.nip.io:443 -showcerts -``` - -[istioctl]: https://istio.io/latest/docs/ops/diagnostic-tools/istioctl/ -[up-profile]: /manuals/cli/howtos/profile-config/ diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/configure-ha.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/configure-ha.md deleted file mode 100644 index ddf36c55e..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/configure-ha.md +++ /dev/null @@ -1,450 +0,0 @@ ---- -title: Production Scaling and High Availability -description: Configure your Self-Hosted Space for production -sidebar_position: 5 ---- - - - -This guide explains how to configure an existing Upbound Space deployment for -production operation at scale. - -Use this guide when you're ready to deploy production scaling, high availability, -and monitoring in your Space. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. - -For API specifications on ControlPlane resources and configurations, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . -::: - -## Prerequisites - -Before you begin scaling your Spaces deployment, make sure you have: - - -* A working Space deployment -* Cluster administrator access -* An understanding of load patterns and growth in your organization -* A familiarity with node affinity, tainting, and Horizontal Pod Autoscaling - (HPA) - - -## Production scaling strategy - - -In this guide, you will: - - - -* Create dedicated node pools for different component types -* Configure high-availability to ensure there are no single points of failure -* Set dynamic scaling for variable workloads -* Optimize your storage and component operations -* Monitor your deployment health and performance - -## Spaces architecture - -The basic Spaces workflow follows the pattern below: - - -![Spaces workflow][spaces-workflow] - -## Node architecture - -You can mitigate resource contention and improve reliability by separating system -components into dedicated node pools. - -### `etcd` dedicated nodes - -`etcd` performance directly impacts your entire Space, so isolate it for -consistent performance. - -1. Create a dedicated `etcd` node pool - - **Requirements:** - - **Minimum**: 3 nodes for HA - - **Instance type**: General purpose with high network throughput/low latency - - **Storage**: High performance storage (`etcd` is I/O sensitive) - -2. Taint `etcd` nodes to reserve them - - ```bash - kubectl taint nodes target=etcd:NoSchedule - ``` - -3. Configure `etcd` storage - - `etcd` is sensitive to storage I/O performance. Review the [`etcd` scaling - documentation][scaling] - for specific storage guidance. - -### API server dedicated nodes - -API servers handle all control plane requests and should run on dedicated -infrastructure. - -1. Create dedicated API server nodes - - **Requirements:** - - **Minimum**: 2 nodes for HA - - **Instance type**: Compute-optimized, memory-optimized, or general-purpose - - **Scaling**: Scale vertically based on API server load patterns - -2. Taint API server nodes - - ```bash - kubectl taint nodes target=apiserver:NoSchedule - ``` - -### Configure cluster autoscaling - -Enable cluster autoscaling for all node pools. - -For AWS EKS clusters, Upbound recommends using [`Karpenter`][karpenter] for -improved bin-packing and instance type selection. - -For GCP GKE clusters, follow the [GKE autoscaling][gke-autoscaling] guide. - -For Azure AKS clusters, follow the [AKS autoscaling][aks-autoscaling] guide. - - -## Configure high availability - -Ensure control plane components can survive node and zone failures. - -### Enable high availability mode - -1. Configure control planes for high availability - - ```yaml - controlPlanes: - ha: - enabled: true - ``` - - This configures control plane pods to run with multiple replicas and - associated pod disruption budgets. - -### Configure component distribution - -1. Set up API server pod distribution - - ```yaml - controlPlanes: - vcluster: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: target - operator: In - values: - - apiserver - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - vcluster - topologyKey: "kubernetes.io/hostname" - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - vcluster - topologyKey: topology.kubernetes.io/zone - weight: 100 - ``` - -2. Configure `etcd` pod distribution - - ```yaml - controlPlanes: - etcd: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: target - operator: In - values: - - etcd - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - vcluster-etcd - topologyKey: "kubernetes.io/hostname" - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchExpressions: - - key: app - operator: In - values: - - vcluster-etcd - topologyKey: topology.kubernetes.io/zone - weight: 100 - ``` - -### Configure tolerations - -Allow control plane pods to schedule on the tainted dedicated nodes (available -in Spaces v1.14+). - -1. Add tolerations for `etcd` pods - - ```yaml - controlPlanes: - etcd: - tolerations: - - key: "target" - operator: "Equal" - value: "etcd" - effect: "NoSchedule" - ``` - -2. Add tolerations for API server pods - - ```yaml - controlPlanes: - vcluster: - tolerations: - - key: "target" - operator: "Equal" - value: "apiserver" - effect: "NoSchedule" - ``` - - -## Configure autoscaling for Spaces components - - -Set up the Spaces system components to handle variable load automatically. - -### Scale API and `apollo` services - -1. Configure minimum replicas for availability - - ```yaml - api: - replicaCount: 2 - - features: - alpha: - apollo: - enabled: true - replicaCount: 2 - ``` - - Both services support horizontal and vertical scaling based on load patterns. - -### Configure router autoscaling - -The `spaces-router` is the entry point for all traffic and needs intelligent -scaling. - - -1. Enable Horizontal Pod Autoscaler - - ```yaml - router: - hpa: - enabled: true - minReplicas: 2 - maxReplicas: 8 - targetCPUUtilizationPercentage: 80 - targetMemoryUtilizationPercentage: 80 - ``` - -2. Monitor scaling factors - - **Router scaling behavior:** - - **Vertical scaling**: Scales based on number of control planes - - **Horizontal scaling**: Scales based on request volume - - **Resource monitoring**: Monitor CPU and memory usage - - - -### Configure controller scaling - -The `spaces-controller` manages Space-level resources and requires vertical -scaling. - -1. Configure adequate resources with headroom - - ```yaml - controller: - resources: - requests: - cpu: "500m" - memory: "1Gi" - limits: - cpu: "2000m" - memory: "4Gi" - ``` - - **Important**: The controller can spike when reconciling large numbers of - control planes, so provide adequate headroom for resource spikes. - -## Set up production storage - - -### Configure Query API database - - -1. Use a managed PostgreSQL database - - **Recommended services:** - - [AWS RDS][rds] - - [Google Cloud SQL][gke-sql] - - [Azure Database for PostgreSQL][aks-sql] - - **Requirements:** - - Minimum 400 IOPS performance - - -## Monitoring - - - -Monitor key metrics to ensure healthy scaling and identify issues quickly. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -### Control plane health - -Track these `spaces-controller` metrics: - -1. **Total control planes** - - ``` - spaces_control_plane_exists - ``` - - Tracks the total number of control planes in the system. - -2. **Degraded control planes** - - ``` - spaces_control_plane_degraded - ``` - - Returns control planes that don't have a `Synced`, `Ready`, and - `Healthy` state. - -3. **Stuck control planes** - - ``` - spaces_control_plane_stuck - ``` - - Control planes stuck in a provisioning state. - -4. **Deletion issues** - - ``` - spaces_control_plane_deletion_stuck - ``` - - Control planes stuck during deletion. - -### Alerting - -Configure alerts for critical scaling and health metrics: - -- **High error rates**: Alert when 4xx/5xx response rates exceed thresholds -- **Control plane health**: Alert when degraded or stuck control planes exceed acceptable counts - -## Architecture overview - -**Spaces System Components:** - -- **`spaces-router`**: Entry point for all endpoints, dynamically builds routes to control plane API servers -- **`spaces-controller`**: Reconciles Space-level resources, serves webhooks, works with `mxp-controller` for provisioning -- **`spaces-api`**: API for managing groups, control planes, shared secrets, and telemetry objects (accessed only through spaces-router) -- **`spaces-apollo`**: Hosts the Query API, connects to PostgreSQL database populated by `apollo-syncer` pods - - -**Control Plane Components (per control plane):** -- **`mxp-controller`**: Handles provisioning tasks, serves webhooks, installs UXP and `XGQL` -- **`XGQL`**: GraphQL API powering console views -- **`kube-state-metrics`**: Collects usage metrics for billing (updated by `mxp-controller` when CRDs change) -- **`vector`**: Works with `kube-state-metrics` to send usage data to external storage for billing -- **`apollo syncer`**: Syncs `etcd` data into PostgreSQL for the Query API - - -### `up ctx` workflow - - - up ctx workflow diagram - - -### Access a control plane API server via kubectl - - - kubectl workflow diagram - - -### Query API/Apollo - - - query API workflow diagram - - -## See also - -* [Upbound Spaces deployment requirements][deployment] -* [Upbound `etcd` scaling resources][scaling] - -[up-ctx-workflow]: /img/up-ctx-workflow.png -[kubectl]: /img/kubectl-workflow.png -[query-api]: /img/query-api-workflow.png -[spaces-workflow]: /img/up-basic-flow.png -[rds]: https://aws.amazon.com/rds/postgresql/ -[gke-sql]: https://cloud.google.com/kubernetes-engine/docs/tutorials/stateful-workloads/postgresql -[aks-sql]: https://learn.microsoft.com/en-us/azure/aks/deploy-postgresql-ha?tabs=azuredisk -[deployment]: https://docs.upbound.io/spaces/howtos/self-hosted/deployment-reqs/ -[karpenter]: https://docs.aws.amazon.com/eks/latest/best-practices/karpenter.html -[gke-autoscaling]: https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-autoscaler -[aks-autoscaling]: https://learn.microsoft.com/en-us/azure/aks/cluster-autoscaler-overview -[scaling]: https://docs.upbound.io/deploy/self-hosted-spaces/scaling-resources#scaling-etcd-storage diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/controllers.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/controllers.md deleted file mode 100644 index 692740638..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/controllers.md +++ /dev/null @@ -1,389 +0,0 @@ ---- -title: Controllers -weight: 250 -description: A guide to how to wrap and deploy an Upbound controller into control planes on Upbound. ---- - -:::important -This feature is in private preview for select customers in Upbound Spaces. If you're interested in this feature, please [contact us](https://www.upbound.io/contact-us). -::: - -Upbound's _Controllers_ feature lets you build and deploy control plane software from the Kubernetes ecosystem. With the _Controllers_ feature, you're not limited to just managing resource types defined by Crossplane. Now you can create resources from _CustomResourceDefinitions_ defined by other Kubernetes ecosystem tooling. - -This guide explains how to bundle and deploy control plane software from the Kubernetes ecosystem on a control plane in Upbound. - -## Benefits - -The Controllers feature provides the following benefits: - -* Deploy control plane software from the Kubernetes ecosystem. -* Use your control plane's package manager to handle the lifecycle of the control plane software and define dependencies between package. -* Build powerful compositions that combine both Crossplane and Kubernetes _CustomResources_. - -## How it works - -A _Controller_ is a package type that bundles control plane software from the Kubernetes ecosystem. Examples of such software includes: - -- Kubernetes policy engines -- CI/CD tooling -- Your own private custom controllers defined by your organization - -You build a _Controller_ package by wrapping a helm chart along with its requisite _CustomResourceDefinitions_. Your _Controller_ package gets pushed to an OCI registry, and from there you can apply it to a control plane like you would any other Crossplane package. Your control plane's package manager is responsible for managing the lifecycle of the software once applied. - -## Prerequisites - -Enable the Controllers feature in the Space you plan to run your control plane in: - -- Cloud Spaces: Not available yet -- Connected Spaces: Space administrator must enable this feature -- Disconnected Spaces: Space administrator must enable this feature - -Packaging a _Controller_ requires [up CLI][cli] `v0.39.0` or later. - - - -## Build a _Controller_ package - - - -_Controllers_ are a package type that get administered by your control plane's package manager. - -### Prepare the package - -To define a _Controller_, you need a Helm chart. This guide assumes the control plane software you want to build into a _Controller_ already has a Helm chart available. - -Start by making a working directory to assemble the necessary parts: - -```ini -mkdir controller-package -cd controller-package -``` - -Inside the working directory, pull the Helm chart: - -```shell -export CHART_REPOSITORY= -export CHART_NAME= -export CHART_VERSION= - -helm pull $CHART_NAME --repo $CHART_REPOSITORY --version $CHART_VERSION -``` - -Be sure to update the Helm chart repository, name, and version with your own. - -Move the Helm chart into its own folder: - -```ini -mkdir helm -mv $CHART_NAME-$CHART_VERSION.tgz helm/chart.tgz -``` - -Unpack the CRDs from the Helm chart into their own directory: - -```shell -export RELEASE_NAME= -export RELEASE_NAMESPACE= - -mkdir crds -helm template $RELEASE_NAME helm/chart.tgz -n $RELEASE_NAMESPACE --include-crds | \ - yq e 'select(.kind == "CustomResourceDefinition")' - | \ - yq -s '("crds/" + .metadata.name + ".yaml")' - -``` -Be sure to update the Helm release name, and namespace with your own. - -:::info -The instructions above assume your CRDs get deployed as part of your Helm chart. If they're deployed another way, you need to manually copy your CRDs instead. -::: - -Create a `crossplane.yaml` with your controller metadata: - -```yaml -cat < crossplane.yaml -apiVersion: meta.pkg.upbound.io/v1alpha1 -kind: Controller -metadata: - annotations: - friendly-name.meta.crossplane.io: Controller - meta.crossplane.io/description: | - A brief description of what the controller does. - meta.crossplane.io/license: Apache-2.0 - meta.crossplane.io/maintainer: - meta.crossplane.io/readme: | - An explanation of your controller. - meta.crossplane.io/source: - name: -spec: - packagingType: Helm - helm: - releaseName: - releaseNamespace: - # Value overrides for the helm release can be provided below. - # values: - # foo: bar -EOF -``` - -Your controller's file structure should look like this: - -```ini -. -├── crds -│ ├── your-crd.yaml -│ ├── second-crd.yaml -│ └── another-crd.yaml -├── crossplane.yaml -└── helm - └── chart.tgz -``` - -### Package and push the _Controller_ - -At the root of your controller's working directory, build the contents into an xpkg: - -```ini -up xpkg build -``` - -This causes an xpkg to get saved to your current directory with a name like `controller-f7091386b4c0.xpkg`. - -Push the package to your desired OCI registry: - -```shell -export UPBOUND_ACCOUNT= -export CONTROLLER_NAME= -export CONTROLLER_VERSION= -export XPKG_FILENAME= - -up xpkg push xpkg.upbound.io/$UPBOUND_ACCOUNT/$CONTROLLER_NAME:$CONTROLLER_VERSION -f $XPKG_FILENAME -``` - - - -## Deploy a _Controller_ package - - - -:::important -_Controllers_ are only installable on control planes running Crossplane `v1.19.0` or later. -::: - -Set your kubecontext to the desired control plane in Upbound. Change the package path to the OCI registry you pushed it to. Then, deploy the _Controller_ directly: - -```shell -export CONTROLLER_NAME= -export CONTROLLER_VERSION= - -cat < crossplane.yaml -apiVersion: meta.pkg.upbound.io/v1alpha1 -kind: Controller -metadata: - annotations: - friendly-name.meta.crossplane.io: Controller ArgoCD - meta.crossplane.io/description: | - The ArgoCD Controller enables continuous delivery and declarative configuration - management for Kubernetes applications using GitOps principles. - meta.crossplane.io/license: Apache-2.0 - meta.crossplane.io/maintainer: Upbound Maintainers - meta.crossplane.io/readme: | - ArgoCD is a declarative GitOps continuous delivery tool for Kubernetes that - follows the GitOps methodology to manage infrastructure and application - configurations. - meta.crossplane.io/source: https://github.com/argoproj/argo-cd - name: argocd -spec: - packagingType: Helm - helm: - releaseName: argo-cd - releaseNamespace: argo-system - # values: - # foo: bar -EOF -``` - -Your controller's file structure should look like this: - -```ini -. -├── crds -│ ├── applications.argoproj.io.yaml -│ ├── applicationsets.argoproj.io.yaml -│ └── appprojects.argoproj.io.yaml -├── crossplane.yaml -└── helm - └── chart.tgz -``` - -### Package and push controller-argocd - -At the root of your controller's working directory, build the contents into an xpkg: - -```ini -up xpkg build -``` - -This causes an xpkg to get saved to your current directory with a name like `argocd-f7091386b4c0.xpkg`. - -Push the package to your desired OCI registry: - -```shell -export UPBOUND_ACCOUNT= -export CONTROLLER_NAME=controller-argocd -export CONTROLLER_VERSION=v7.8.8 -export XPKG_FILENAME= - -up xpkg push --create xpkg.upbound.io/$UPBOUND_ACCOUNT/$CONTROLLER_NAME:$CONTROLLER_VERSION -f $XPKG_FILENAME -``` - -### Deploy controller-argocd to a control plane - -Set your kubecontext to the desired control plane in Upbound. Change the package path to the OCI registry you pushed it to. Then, deploy the _Controller_ directly: - -```ini -cat < - -## Frequently asked questions - -
-Can I package any software or are there any prerequisites to be a Controller? - -We define a *Controller* as a software that has at least one Custom Resource Definition (CRD) and a Kubernetes controller for that CRD. This is the minimum requirement to be a *Controller*. We have some checks to enforce this at packaging time. - -
- -
-How can I package my software as a Controller? - -Currently, we support Helm charts as the underlying package format for *Controllers*. As long as you have a Helm chart, you can package it as a *Controller*. - -If you don't have a Helm chart, you can't deploy the software. We only support Helm charts as the underlying package format for *Controllers*. We may extend this to support other packaging formats like Kustomize in the future. - -
- -
-Can I package Crossplane XRDs/Compositions as a Helm chart to deploy as a Controller? - -This is not recommended. For packaging Crossplane XRDs/ and Compositions, we recommend using the `Configuration` package format. A helm chart only with Crossplane XRDs/Compositions does not qualify as a *Controller*. - -
- -
-How can I override the Helm values when deploying a Controller? - -Overriding the Helm values is possible at two levels: -- During packaging time, in the package manifest file. -- At runtime, using a `ControllerRuntimeConfig` resource (similar to Crossplane `DeploymentRuntimeConfig`). - -
- -
-How can I configure the helm release name and namespace for the controller? - -Right now, it is not possible to configure this at runtime. The package author configures release name and namespace during packaging, so it is hardcoded inside the package. Unlike a regular application that is deployed by a Helm chart, *Controllers* can only be deployed once in a given control plane, so, we hope it should be ok to rely on predefined release names and namespaces. We may consider exposing these in `ControllerRuntimeConfig` later, but, we would like to keep it opinionated unless there are strong reasons to do so. - -
- -
-Can I deploy more than one instance of a Controller package? - -No, this is not possible. Remember, a *Controller* package introduces CRDs which are cluster-scoped objects. Just like one cannot deploy more than one instance of the same Crossplane Provider package today, it is not possible to deploy more than one instance of a *Controller*. - -
- -
-Do I need a specific Crossplane version to run Controllers? - -Yes, you need to use Crossplane v1.19.0 or later to use *Controllers*. This is because of the changes in the Crossplane codebase to support third-party package formats in dependencies. - -Spaces `v1.12.0` supports Crossplane `v1.19` in the *Rapid* release channel. - -
- -
-Can I deploy Controllers outside of an Upbound control plane? With UXP? - -No, *Controllers* are a proprietary package format and are only available for control planes running in Spaces hosting environments in Upbound. - -
- - -[cli]: /manuals/uxp/overview - diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/ctp-audit-logs.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/ctp-audit-logs.md deleted file mode 100644 index 52f52c776..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/ctp-audit-logs.md +++ /dev/null @@ -1,549 +0,0 @@ ---- -title: Control plane audit logging ---- - -This guide explains how to enable and configure audit logging for control planes -in Self-Hosted Upbound Spaces. - -Starting in Spaces `v1.14.0`, each control plane contains an API server that -supports audit log collection. You can use audit logging to track creation, -updates, and deletions of Crossplane resources. Control plane audit logs -use observability features to collect audit logs with `SharedTelemetryConfig` and -send logs to an OpenTelemetry (`OTEL`) collector. - -:::info API Version Information -This guide applies to **Spaces v1.14.0 and later** (Self-Hosted only). Audit logging is not available in earlier versions. - -For API specifications on observability resources, see the [Spaces API Reference - v1.14](../../../../../1.14/reference/apis/spaces-api/). details on observability evolution across versions, see the . -::: - -## Prerequisites - -Before you begin, make sure you have: - -* Spaces `v1.14.0` or greater -* Admin access to your Spaces host cluster -* `kubectl` configured to access the host cluster -* `helm` installed -* `yq` installed -* `up` CLI installed and logged in to your organization - -## Enable observability - - -Observability graduated to General Available in `v1.14.0` but is disabled by -default. - - - - - -### Before `v1.14` -To enable the GA Observability feature, upgrade your Spaces installation to `v1.14.0` -or later and update your installation setting to the new flag: - -```diff -helm upgrade spaces upbound/spaces -n upbound-system \ -- --set "features.alpha.observability.enabled=true" -+ --set "observability.enabled=true" -``` - - - -### After `v1.14` - -To enable the GA Observability feature for `v1.14.0` and later, pass the feature -flag: - -```sh -helm upgrade spaces upbound/spaces -n upbound-system \ - --set "observability.enabled=true" - -``` - - - - -To confirm Observability is enabled, run the `helm get values` command: - - -```shell -helm get values --namespace upbound-system spaces | yq .observability -``` - -Your output should return: - -```shell-noCopy - enabled: true -``` - -## Install an observability backend - -:::note -If you already have an observability backend in your environment, skip to the -next section. -::: - - -For this guide, you'll use Grafana's `docker-otel-lgtm` bundle to validate audit log -generation. production environments, configure a dedicated observability -backend like Datadog, Splunk, or an enterprise-grade Grafana stack. - - - -First, make sure your `kubectl` context points to your Spaces host cluster: - -```shell -kubectl config current-context -``` - -The output should return your cluster name. - -Next, install `docker-otel-lgtm` as a deployment using port-forwarding to -connect to Grafana. Create a manifest file and paste the -following configuration: - -```yaml title="otel-lgtm.yaml" -apiVersion: v1 -kind: Namespace -metadata: - name: observability ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app: otel-lgtm - name: otel-lgtm - namespace: observability -spec: - ports: - - name: grpc - port: 4317 - protocol: TCP - targetPort: 4317 - - name: http - port: 4318 - protocol: TCP - targetPort: 4318 - - name: grafana - port: 3000 - protocol: TCP - targetPort: 3000 - selector: - app: otel-lgtm ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: otel-lgtm - labels: - app: otel-lgtm - namespace: observability -spec: - replicas: 1 - selector: - matchLabels: - app: otel-lgtm - template: - metadata: - labels: - app: otel-lgtm - spec: - containers: - - name: otel-lgtm - image: grafana/otel-lgtm - ports: - - containerPort: 4317 - - containerPort: 4318 - - containerPort: 3000 -``` - -Next, apply the manifest: - -```shell -kubectl apply --filename otel-lgtm.yaml -``` - -Your output should return the resources: - -```shell -namespace/observability created - service/otel-lgtm created - deployment.apps/otel-lgtm created -``` - -To verify your resources deployed, use `kubectl get` to display resources with -an `ACTIVE` or `READY` status. - -Next, forward the Grafana port: - -```shell -kubectl port-forward svc/otel-lgtm --namespace observability 3000:3000 -``` - -Now you can access the Grafana UI at http://localhost:3000. - - -## Create an audit-enabled control plane - -To enable audit logging for a control plane, you need to label it so the -`SharedTelemetryConfig` can identify and apply audit settings. This section -creates a new control plane with the `audit-enabled: "true"` label. The -`audit-enabled: "true"` label marks this control plane for audit logging. The -`SharedTelemetryConfig` (created in the next section) finds control planes with -this label and enables audit logging on them. - -Create a new manifest file and paste the configuration below: - -
-```yaml title="ctp-audit.yaml" -apiVersion: v1 -kind: Namespace -metadata: - name: audit-test ---- -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - labels: - audit-enabled: "true" - name: ctp1 - namespace: audit-test -spec: - writeConnectionSecretToRef: - name: kubeconfig-ctp1 - namespace: audit-test -``` -
- -The `metadata.labels` section contains the `audit-enabled` setting. - -Apply the manifest: - -```shell -kubectl apply --filename ctp-audit.yaml -``` - -Confirm your control plane reaches the `READY` status: - -```shell -kubectl get --filename ctp-audit.yaml -``` - -## Create a `SharedTelemetryConfig` - -The `SharedTelemetryConfig` applies to all control plane objects in a namespace -and enables audit logging and routes logs to your `OTEL` endpoint. - -Create a `SharedTelemetryConfig` manifest file and paste the configuration -below: - -
-```yaml title="sharedtelemetryconfig.yaml" -apiVersion: observability.spaces.upbound.io/v1alpha1 -kind: SharedTelemetryConfig -metadata: - name: apiserver-audit - namespace: audit-test -spec: - apiServer: - audit: - enabled: true - exporters: - otlphttp: - endpoint: http://otel-lgtm.observability:4318 - exportPipeline: - logs: [otlphttp] - controlPlaneSelector: - labelSelectors: - - matchLabels: - audit-enabled: "true" -``` -
- -This configuration: - -* Sets `apiServer.audit.enabled` to `true` -* Configures the `otlphttp` exporter to point to the `docker-otel-lgtm` service -* Uses `controlPlaneSelector` to match any control plane in the namespace with the `audit-enabled` label set to `true` - -:::note -You can configure the `SharedTelemetryConfig` to select control planes in -several ways. more information on control plane selection, see the [control -plane selection][ctp-selection] documentation. -::: - -Apply the `SharedTelemetryConfig`: - -```shell -kubectl apply --filename sharedtelemetryconfig.yaml -``` - -Confirm the configuration selected the control plane: - -```shell -kubectl get --filename sharedtelemetryconfig.yaml -``` - -The output should return `SELECTED` as `1` and `VALIDATED` as `TRUE`. - -For more detailed status information, use `kubectl get`: - -```shell -kubectl get --filename sharedtelemetryconfig.yaml --output yaml | yq .status -``` - -## Generate and monitor audit events - -You enabled telemetry on your new control plane and can now generate events to -test the audit logging. This guide uses the `nop-provider` to simulate resource -operations. - -Switch your `up` context to the new control plane: - -```shell -up ctx /// -``` - -Create a new Provider manifest: - -```yaml title="provider-nop.yaml" -apiVersion: pkg.crossplane.io/v1 - kind: Provider - metadata: - name: crossplane-contrib-provider-nop - spec: - package: xpkg.upbound.io/crossplane-contrib/provider-nop:v0.4.0 -``` - -Apply the provider manifest: - -```shell -kubectl apply --filename provider-nop.yaml -``` - -Verify the provider installed and returns `HEALTHY` status as `TRUE`. - -Apply an example resource to kick off event generation: - - -```shell -kubectl apply --filename https://raw.githubusercontent.com/crossplane-contrib/provider-nop/refs/heads/main/examples/nopresource.yaml -``` - -In your Grafana dashboard, navigate to **Drilldown** > **Logs** under the -Grafana menu. - - -Filter for `controlplane-audit` log messages. - -Create a query to find `create` events on `nopresources` by filtering: - -* The `verb` field for `create` events -* The `objectRef_resource` field to match the Kind `nopresources` - -Review the audit log results. The log stream displays: - -*The client applying the create operation -* The resource kind -* Client details -* The response code - -Expand the example below for an audit log entry: - -
- Audit log entry - -```json -{ - "level": "Metadata", - "auditID": "51bbe609-14ad-4874-be78-1289c10d506a", - "stage": "ResponseComplete", - "requestURI": "/apis/nop.crossplane.io/v1alpha1/nopresources?fieldManager=kubectl-client-side-apply&fieldValidation=Strict", - "verb": "create", - "user": { - "username": "kubernetes-admin", - "groups": ["system:masters", "system:authenticated"] - }, - "impersonatedUser": { - "username": "upbound:spaces:host:masterclient", - "groups": [ - "system:authenticated", - "upbound:controlplane:admin", - "upbound:spaces:host:system:masters" - ] - }, - "sourceIPs": ["10.244.0.135", "127.0.0.1"], - "userAgent": "kubectl/v1.32.2 (darwin/arm64) kubernetes/67a30c0", - "objectRef": { - "resource": "nopresources", - "name": "example", - "apiGroup": "nop.crossplane.io", - "apiVersion": "v1alpha1" - }, - "responseStatus": { "metadata": {}, "code": 201 }, - "requestReceivedTimestamp": "2025-09-19T23:03:24.540067Z", - "stageTimestamp": "2025-09-19T23:03:24.557583Z", - "annotations": { - "authorization.k8s.io/decision": "allow", - "authorization.k8s.io/reason": "RBAC: allowed by ClusterRoleBinding \"controlplane-admin\" of ClusterRole \"controlplane-admin\" to Group \"upbound:controlplane:admin\"" - } - } -``` -
- -## Customize the audit policy - -Spaces `v1.14.0` includes a default audit policy. You can customize this policy -by creating a configuration file and passing the values to -`observability.collectors.apiServer.auditPolicy` in the helm values file. - -An example custom audit policy: - -```yaml -observability: - controlPlanes: - apiServer: - auditPolicy: | - apiVersion: audit.k8s.io/v1 - kind: Policy - rules: - # ============================================================================ - # RULE 1: Exclude health check and version endpoints - # ============================================================================ - - level: None - nonResourceURLs: - - '/healthz*' - - '/readyz*' - - /version - # ============================================================================ - # RULE 2: ConfigMaps - Write operations only - # ============================================================================ - - level: Metadata - resources: - - group: "" - resources: - - configmaps - verbs: - - create - - update - - patch - - delete - omitStages: - - RequestReceived - - ResponseStarted - # ============================================================================ - # RULE 3: Secrets - ALL operations - # ============================================================================ - - level: Metadata - resources: - - group: "" - resources: - - secrets - verbs: - - get - - list - - watch - - create - - update - - patch - - delete - omitStages: - - RequestReceived - - ResponseStarted - # ============================================================================ - # RULE 4: Global exclusion of read-only operations - # ============================================================================ - - level: None - verbs: - - get - - list - - watch - # ========================================================================== - # RULE 5: Exclude standard Kubernetes resources from write operation logging - # ========================================================================== - - level: None - resources: - - group: "" - - group: "apps" - - group: "networking.k8s.io" - - group: "policy" - - group: "rbac.authorization.k8s.io" - - group: "storage.k8s.io" - - group: "batch" - - group: "autoscaling" - - group: "metrics.k8s.io" - - group: "node.k8s.io" - - group: "scheduling.k8s.io" - - group: "coordination.k8s.io" - - group: "discovery.k8s.io" - - group: "events.k8s.io" - - group: "flowcontrol.apiserver.k8s.io" - - group: "internal.apiserver.k8s.io" - - group: "authentication.k8s.io" - - group: "authorization.k8s.io" - - group: "admissionregistration.k8s.io" - verbs: - - create - - update - - patch - - delete - # ============================================================================ - # RULE 6: Catch-all for ALL custom resources and any missed resources - # ============================================================================ - - level: Metadata - verbs: - - create - - update - - patch - - delete - omitStages: - - RequestReceived - - ResponseStarted - # ============================================================================ - # RULE 7: Final catch-all - exclude everything else - # ============================================================================ - - level: None - omitStages: - - RequestReceived - - ResponseStarted -``` -You can apply this policy during Spaces installation or upgrade using the helm values file. - -Audit policies use rules evaluated in order from top to bottom where the first -matching rule applies. Control plane audit policies follow Kubernetes conventions and use the -following logging levels: - -* **None** - Don't log events matching this rule -* **Metadata** - Log request metadata (user, timestamp, resource, verb) but not request or response bodies -* **Request** - Log metadata and request body but not response body -* **RequestResponse** - Log metadata, request body, and response body - -For more information, review the Kubernetes [Auditing] documentation. - -## Disable audit logging - -You can disable audit logging on a control plane by removing it from the -`SharedTelemetryConfig` selector or by deleting the `SharedTelemetryConfig`. - -### Disable for specific control planes - -Remove the `audit-enabled` label from control planes that should stop sending audit logs: - -```bash -kubectl label controlplane --namespace audit-enabled- -``` - -The `SharedTelemetryConfig` no longer selects this control plane, and audit log collection stops. - -### Disable for all control planes - -Delete the `SharedTelemetryConfig` to stop audit logging for all control planes it manages: - -```bash -kubectl delete sharedtelemetryconfig --namespace -``` - -[ctp-selection]: /spaces/howtos/observability/#control-plane-selection -[Auditing]: https://kubernetes.io/docs/tasks/debug/debug-cluster/audit/ diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/declarative-ctps.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/declarative-ctps.md deleted file mode 100644 index 2c3e5331b..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/declarative-ctps.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -title: Declaratively create control planes -sidebar_position: 99 -description: A tutorial to configure a Space with Argo to declaratively create and - manage control planes ---- - -In this tutorial, you learn how to configure [Argo CD][argo-cd] to communicate with a self-hosted Space. This flow allows you to declaratively create and manage control planes from Git. Argo CD is a continuous delivery tool for Kubernetes that you can use to drive GitOps flows for your control plane infrastructure. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. - -For API specifications on ControlPlane resources and their declarative creation, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . -::: - -## Prerequisites - -To complete this tutorial, you need the following: - -- Have already deployed an Upbound Space. -- Have already deployed an instance of Argo CD on a Kubernetes cluster. - -## Connect your Space to Argo CD - -Fetch the kubeconfig for the Space cluster, the Kubernetes cluster where you installed the Upbound Spaces software. You must add the Space cluster as a context to Argo. - -```ini -export SPACES_CLUSTER_SERVER="https://url" -export SPACES_CLUSTER_NAME="cluster" -``` - -Switch contexts to the Kubernetes cluster where you've installed Argo. Create a secret on the Argo cluster whose data contains the connection details of the Space cluster. - -:::important -Make sure the following commands are executed against your **Argo** cluster, not your Space cluster. -::: - -Run the following command in a terminal: - -```yaml -cat < -When you install a Crossplane provider on a control plane, memory gets consumed -according to the number of custom resources it defines. Upbound [Official Provider families][official-provider-families] provide higher fidelity control -to platform teams to install providers for only the resources they need, -reducing the bloat of needlessly installing unused custom resources. Still, you -must factor provider memory usage into your calculations to ensure you've -rightsized the memory available in your Spaces cluster. - - -:::important -Be careful not to conflate `managed resource` with `custom resource definition`. -The former is an "instance" of an external resource in Crossplane, while the -latter defines the API schema of that resource. -::: - -It's estimated that each custom resource definition consumes ~3 MB of memory. -The calculation is: - -```bash -number_of_managed_resources_defined_in_provider x 3 MB = memory_required -``` - -For example, if you plan to use [provider-aws-ec2][provider-aws-ec2], [provider-aws-s3][provider-aws-s3], and [provider-aws-iam][provider-aws-iam], the resulting calculation is: - -```bash -provider-aws-ec2: 98 x 3 MB = 294 MB -provider-aws-s3: 23 x 3 MB = 69 MB -provider-aws-iam 22 x 3 MB = 66 MB ---- -total memory: 429 MB -``` - -In this scenario, you should budget ~430 MB of memory for provider usage on this control plane. - -:::tip -Do this calculation for each provider you plan to install on your control plane. -Then do this calculation for each control plane you plan to run in your Space. -::: - - -#### Total memory usage - -Add the memory usage from the previous sections. Given the preceding examples, -they result in a recommendation to budget ~1 GB memory for each control plane -you plan to run in the Space. - -:::important - -The 1 GB recommendation is an example. -You should input your own provider requirements to arrive at a final number for -your own deployment. - -::: - -### CPU considerations - -#### Managed resource CPU usage - -The number of managed resources under management by a control plane is the largest contributing factor for CPU usage in a Space. CPU usage scales linearly according to the number of managed resources under management by your control plane. In Upbound's testing, CPU usage requirements _does_ vary from provider to provider. Using the Upbound Official Provider families as a baseline: - - -| Provider | MR create operation (CPU core seconds) | MR update or reconciliation operation (CPU core seconds) | -| ---- | ---- | ---- | -| provider-family-aws | 10 | 2 to 3 | -| provider-family-gcp | 7 | 1.5 | -| provider-family-azure | 7 to 10 | 1.5 to 3 | - - -When resources are in a non-ready state, Crossplane providers reconcile often (as fast as every 15 seconds). Once a resource reaches `READY`, each Crossplane provider defaults to a 10 minute poll interval. Given this, a 16-core machine has `16x10x60 = 9600` CPU core seconds available. Interpreting this table: - -- A single control plane that needs to create 100 AWS MRs concurrently would consume 1000 CPU core seconds, or about 1.5 cores. -- A single control plane that continuously reconciles 100 AWS MRs once they've reached a `READY` state would consume 300 CPU core seconds, or a little under half a core. - -Since `provider-family-aws` has the highest recorded numbers for CPU time required, you can use that as an upper limit in your calculations. - -Using these calculations and extrapolating values, given a 16 core machine, it's recommended you don't exceed a single control plane managing 1000 MRs. Suppose you plan to run 10 control planes, each managing 1000 MRs. You want to make sure your node pool has capacity for 160 cores. If you are using a machine type that has 16 cores per machine, that would mean having a node pool of size 10. If you are using a machine type that has 32 cores per machine, that would mean having a node pool of size 5. - -#### Cloud API latency - -Oftentimes, you are using Crossplane providers to talk to external cloud APIs. Those external cloud APIs often have global API rate limits (examples: [Azure limits][azure-limits], [AWS EC2 limits][aws-ec2-limits]). - -For Crossplane providers built on [Upjet][upjet] (such as Upbound Official Provider families), these providers use Terraform under the covers. They expose some knobs (such as `--max-reconcile-rate`) you can use to tweak reconciliation rates. - -### Resource buffers - -The guidance in the preceding sections explains how to calculate CPU and memory usage requirements for: - -- a set of control planes in a Space -- tuned to the number of providers you plan to use -- according to the number of managed resource instances you plan to have managed by your control planes - -Upbound recommends budgeting an extra buffer of 20% to your resource capacity calculations. The numbers shared in the preceding sections don't account for peaks or surges since they're based off average measurements. Upbound recommends budgeting this buffer to account for these things. - -## Deploying more than one Space - -You are welcome to deploy more than one Space. You just need to make sure you have a 1:1 mapping of Space to Kubernetes clusters. Spaces are by their nature constrained to a single Kubernetes Cluster, which are regional entities. If you want to offer control planes in multiple cloud environments or multiple public clouds entirely, these are justifications for deploying >1 Spaces. - -## Cert-manager - -A Spaces deployment uses the [Certificate Custom Resource] from cert-manager to -provision certificates within the Space. This establishes a nice API boundary -between what your platform may need and the Certificate requirements of a -Space. - - -In the event you would like more control over the issuing Certificate Authority -for your deployment or the deployment of cert-manager itself, this guide is for -you. - - -### Deploying - -An Upbound Space deployment doesn't have any special requirements for the -cert-manager deployment itself. The only expectation is that cert-manager and -the corresponding Custom Resources exist in the cluster. - -You should be free to install cert-manager in the cluster in any way that makes -sense for your organization. You can find some [installation ideas] in the -cert-manager docs. - -### Issuers - -A default Upbound Space install includes a [ClusterIssuer]. This `ClusterIssuer` -is a `selfSigned` issuer that other certificates are minted from. You have a -couple of options available to you for changing the default deployment of the -Issuer: -1. Changing the issuer name. -2. Providing your own ClusterIssuer. - - -#### Changing the issuer name - -The `ClusterIssuer` name is controlled by the `certificates.space.clusterIssuer` -Helm property. You can adjust this during installation by providing the -following parameter (assuming your new name is 'SpaceClusterIssuer'): -```shell ---set "certificates.space.clusterIssuer=SpaceClusterIssuer" -``` - - - -#### Providing your own ClusterIssuer - -To provide your own `ClusterIssuer`, you need to first setup your own -`ClusterIssuer` in the cluster. The cert-manager docs have a variety of options -for providing your own. See the [Issuer Configuration] docs for more details. - -Once you have your own `ClusterIssuer` set up in the cluster, you need to turn -off the deployment of the `ClusterIssuer` included in the Spaces deployment. -To do that, provide the following parameter during installation: -```shell ---set "certificates.provision=false" -``` - -###### Considerations -If your `ClusterIssuer` has a name that's different from the default name that -the Spaces installation expects ('spaces-selfsigned'), you need to also specify -your `ClusterIssuer` name during install using: -```shell ---set "certificates.space.clusterIssuer=" -``` - -## Ingress - -To route requests from an external client (kubectl, ArgoCD, etc) to a -control plane, a Spaces deployment includes a default [Ingress] manifest. In -order to ease getting started scenarios, the current `Ingress` includes -configurations (properties and annotations) that assume that you installed the -commonly used [ingress-nginx ingress controller] in the cluster. This section -walks you through using a different `Ingress`, if that's something that your -organization needs. - -### Default manifest - -An example of what the current `Ingress` manifest included in a Spaces install -is below: - -```yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: mxe-router-ingress - namespace: upbound-system - annotations: - nginx.ingress.kubernetes.io/use-regex: "true" - nginx.ingress.kubernetes.io/ssl-redirect: "false" - nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" - nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" - nginx.ingress.kubernetes.io/proxy-request-buffering: "off" - nginx.ingress.kubernetes.io/proxy-body-size: "0" - nginx.ingress.kubernetes.io/proxy-http-version: "1.1" - nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" - nginx.ingress.kubernetes.io/proxy-ssl-verify: "on" - nginx.ingress.kubernetes.io/proxy-ssl-secret: "upbound-system/mxp-hostcluster-certs" - nginx.ingress.kubernetes.io/proxy-ssl-name: spaces-router - nginx.ingress.kubernetes.io/configuration-snippet: | - more_set_headers "X-Request-Id: $req_id"; - more_set_headers "Request-Id: $req_id"; - more_set_headers "Audit-Id: $req_id"; -spec: - ingressClassName: nginx - tls: - - hosts: - - {{ .Values.ingress.host }} - secretName: mxe-router-tls - rules: - - host: {{ .Values.ingress.host }} - http: - paths: - - path: "/v1/controlPlanes" - pathType: Prefix - backend: - service: - name: spaces-router - port: - name: http -``` - -The notable pieces are: -1. Namespace - - - -This property represents the namespace that the spaces-router is deployed to. -In most cases this is `upbound-system`. - - - -2. proxy-ssl-* annotations - -The spaces-router pod terminates TLS using certificates located in the -mxp-hostcluster-certs `Secret` located in the `upbound-system` `Namespace`. - -3. proxy-* annotations - -Requests coming into the ingress-controller can be variable depending on what -the client is requesting. For example, `kubectl get crds` has different -requirements for the connection compared to a 'watch', for example -`kubectl get pods -w`. The ingress-controller is configured to be able to -account for either scenario. - - -4. configuration-snippets - -These commands add headers to the incoming requests that help with telemetry -and diagnosing problems within the system. - -5. Rules - -Requests coming into the control planes use a `/v1/controlPlanes` prefix and -need to be routed to the spaces-router. - - -### Using a different ingress manifest - -Operators can choose to use an `Ingress` manifest and ingress controller that -makes the most sense for their organization. If they want to turn off deploying -the default `Ingress` manifest, they can do so during installation by providing -the following parameter during installation: -```shell ---set ".Values.ingress.provision=false" -``` - -#### Considerations - - - - - -Operators will need to take into account the following considerations when -disabling the default `Ingress` deployment. - -1. Ensure the custom `Ingress` manifest is placed in the same namespace as the -`spaces-router` pod. -2. Ensure that the ingress is configured to use a `spaces-router` as a secure -backend and that the secret used is the mxp-hostcluster-certs secret. -3. Ensure that the ingress is configured to handle long-lived connections. -4. Ensure that the routing rule sends requests prefixed with -`/v1/controlPlanes` to the `spaces-router` using the `http` port. - - - - - - -[cert-manager]: https://cert-manager.io/ -[Certificate Custom Resource]: https://cert-manager.io/docs/usage/certificate/ -[ClusterIssuer]: https://cert-manager.io/docs/concepts/issuer/ -[ingress-nginx ingress controller]: https://kubernetes.github.io/ingress-nginx/deploy/ -[installation ideas]: https://cert-manager.io/docs/installation/ -[Ingress]: https://kubernetes.io/docs/concepts/services-networking/ingress/ -[Issuer Configuration]: https://cert-manager.io/docs/configuration/ -[official-provider-families]: /manuals/packages/providers/provider-families -[aws-eks]: https://aws.amazon.com/eks/ -[google-cloud-gke]: https://cloud.google.com/kubernetes-engine -[microsoft-aks]: https://azure.microsoft.com/en-us/products/kubernetes-service -[upbound-account]: https://www.upbound.io/register/?utm_source=docs&utm_medium=cta&utm_campaign=docs_spaces -[provider-aws-ec2]: https://marketplace.upbound.io/providers/upbound/provider-aws-ec2 -[provider-aws-s3]: https://marketplace.upbound.io/providers/upbound/provider-aws-s3 -[provider-aws-iam]: https://marketplace.upbound.io/providers/upbound/provider-aws-iam -[azure-limits]: https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/request-limits-and-throttling -[aws-ec2-limits]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/throttling.html#throttling-limits-rate-based -[upjet]: https://github.com/upbound/upjet diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/dr.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/dr.md deleted file mode 100644 index 67ecbfecf..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/dr.md +++ /dev/null @@ -1,412 +0,0 @@ ---- -title: Disaster Recovery -sidebar_position: 13 -description: Configure Space-wide backups for disaster recovery. ---- - -:::info API Version Information -This guide applies to **Spaces v1.9.0 and later** (Self-Hosted only). Space Backups is a alpha feature enabled by default starting in v1.14.0. - -- **v1.9.0-v1.13.x**: Alpha feature (requires manual enablement) -- **v1.14.0+**: GA (enabled by default) - -For version-specific features and backup resources, see the . control-plane backups, see [Backup and Restore](../backup-and-restore.md). -::: - -:::important -For Connected and Disconnected Spaces, this feature requires Spaces `v1.9.0` and, starting with `v1.14.0`, Spaces enables it by default. - -To enable it on versions earlier than `v1.14.0`, set features.alpha.spaceBackup.enabled=true when you install Spaces. - -```bash -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - ... - --set "features.alpha.spaceBackup.enabled=true" -``` -::: - -Upbound's _Space Backups_ is a built-in Space-wide backup and restore feature. This guide explains how to configure Space Backups and how to restore from one of them in case of disaster recovery. - -This feature is meant for Space administrators. Group or Control Plane users can leverage [Shared Backups][shared-backups] to backup and restore their ControlPlanes. - -## Benefits -The Space Backups feature provides the following benefits: - -* Automatic backups for all resources in a Space and all resources in control planes, without any operational overhead. -* Backup schedules. -* Selectors to specify resources to backup. - -## Prerequisites - -Enabled the Space Backups feature in the Space: - -- Cloud Spaces: Not accessible to users. -- Connected Spaces: Space administrator must enable this feature. -- Disconnected Spaces: Space administrator must enable this feature. - -## Configure a Space Backup Config - -[SpaceBackupConfig][spacebackupconfig] is a cluster-scoped resource. This resource configures the storage details and provider. Whenever a backup executes (either by schedule or manually initiated), it references a SpaceBackupConfig to tell it where store the snapshot. - - -### Backup config provider - - -The `spec.objectStorage.provider` and `spec.objectStorage.config` fields configures: - -* The object storage provider -* The path to the provider -* The credentials needed to communicate with the provider - -You can only set one provider. Upbound currently supports AWS, Azure, and GCP as providers. - - -`spec.objectStorage.config` is a freeform map of configuration options for the object storage provider. See [Thanos object storage][thanos-object-storage] for more information on the formats for each supported cloud provider. `spec.bucket` and `spec.provider` overrides the required values in the config. - - -#### AWS as a storage provider - -This example demonstrates how to use AWS as a storage provider for your backups: - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackupConfig -metadata: - name: default -spec: - objectStorage: - provider: AWS - bucket: spaces-backup-bucket - config: - endpoint: s3.eu-west-2.amazonaws.com - region: eu-west-2 - credentials: - source: Secret - secretRef: - name: bucket-creds - namespace: upbound-system - key: creds -``` - -This example assumes you've already created an S3 bucket called -`spaces-backup-bucket` in the `eu-west-2` AWS region. To access the bucket, -define the account credentials as a Secret in the specified Namespace -(`upbound-system` in this example). - -#### Azure as a storage provider - -This example demonstrates how to use Azure as a storage provider for your backups: - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackupConfig -metadata: - name: default - namespace: default -spec: - objectStorage: - provider: Azure - bucket: upbound-backups - config: - storage_account: upbackupstore - container: upbound-backups - endpoint: blob.core.windows.net - credentials: - source: Secret - secretRef: - name: bucket-creds - namespace: upbound-system - key: creds -``` - - -This example assumes you've already created an Azure storage account called -`upbackupstore` and blob `upbound-backups`. To access the blob, -define the account credentials as a Secret in the specified Namespace -(`upbound-system` in this example). - - -#### GCP as a storage provider - -This example demonstrates how to use Google Cloud Storage as a storage provider for your backups: - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackupConfig -metadata: - name: default - namespace: default -spec: - objectStorage: - provider: GCP - bucket: spaces-backup-bucket - credentials: - source: Secret - secretRef: - name: bucket-creds - namespace: upbound-system - key: creds -``` - - -This example assumes you've already created a Cloud bucket called -"spaces-backup-bucket" and a service account with access to this bucket. Define the key file as a Secret in the specified Namespace -(`upbound-system` in this example). - - -## Configure a Space Backup Schedule - - -[SpaceBackupSchedule][spacebackupschedule] is a cluster-scoped resource. This resource defines a backup schedule for the whole Space. - -Below is an example of a Space Backup Schedule running every day. It backs up all groups having `environment: production` labels and all control planes in those groups having `backup: please` labels. - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackupSchedule -metadata: - name: daily-schedule -spec: - schedule: "@daily" - configRef: - kind: SpaceBackupConfig - name: default - match: - groups: - labelSelectors: - - matchLabels: - environment: production - controlPlanes: - labelSelectors: - - matchLabels: - backup: please -``` - -### Define a schedule - -The `spec.schedule` field is a [Cron-formatted][cron-formatted] string. Some common examples are below: - -| Entry | Description | -| ----------------- | ------------------------------------------------------------------------------------------------- | -| `@hourly` | Run once an hour. | -| `@daily` | Run once a day. | -| `@weekly` | Run once a week. | -| `0 0/4 * * *` | Run every 4 hours. | -| `0/15 * * * 1-5` | Run every fifteenth minute on Monday through Friday. | -| `@every 1h30m10s` | Run every 1 hour, 30 minutes, and 10 seconds. Hour is the largest measurement of time for @every. | - -### Suspend a schedule - -Use `spec.suspend` field to suspend the schedule. It creates no new backups, but allows running backups to complete. - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackupSchedule -metadata: - name: daily-schedule -spec: - suspend: true -... -``` - -### Garbage collect backups when the schedule gets deleted - -Set the `spec.useOwnerReferencesInBackup` to garbage collect associated `SpaceBackup` when a `SpaceBackupSchedule` gets deleted. If set to true, backups are garbage collected when the schedule gets deleted. - -### Set the time to live - -Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. - -The time to live is a duration, for example, `168h` for 7 days. - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackupSchedule -metadata: - name: daily-schedule -spec: - ttl: 168h # Backup is garbage collected after 7 days -... -``` - -## Selecting space resources to backup - -By default, a SpaceBackup selects all groups and, for each of them, all control planes, secrets, and any other group-scoped resources. - -By setting `spec.match`, you can include only specific groups, control planes, secrets, or other Space resources in the backup. - -By setting `spec.exclude`, you can filter out some matched Space API resources from the backup. - -### Including space resources in a backup - -Different fields are available to include resources based on labels or names: -- `spec.match.groups` to include only some groups in the backup. -- `spec.match.controlPlanes` to include only some control planes in the backup. -- `spec.match.secrets` to include only some secrets in the backup. -- `spec.match.extras` to include only some extra resources in the backup. - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackup -metadata: - name: my-backup -spec: - configRef: - kind: SpaceBackupConfig - name: default - match: - groups: - labelSelectors: - - matchLabels: - environment: production - controlPlanes: - labelSelectors: - - matchLabels: - backup: please - secrets: - names: - - my-secret - extras: - - apiGroup: "spaces.upbound.io" - kind: "SharedBackupConfig" - names: - - my-shared-backup -``` - -### Excluding Space resources from the backup - -Use the `spec.exclude` field to exclude matched Space API resources from the backup. - -Different fields are available to exclude resources based on labels or names: -- `spec.exclude.groups` to exclude some groups from the backup. -- `spec.exclude.controlPlanes` to exclude some control planes from the backup. -- `spec.exclude.secrets` to exclude some secrets from the backup. -- `spec.exclude.extras` to exclude some extra resources from the backup. - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackup -metadata: - name: my-backup -spec: - ttl: 168h # Backup is garbage collected after 7 days - configRef: - kind: SpaceBackupConfig - name: default - match: - groups: - labelSelectors: - - matchLabels: - environment: production - exclude: - groups: - names: - - not-this-one-please -``` - -### Exclude resources in control planes' backups - -By default, it backs up all resources in a selected control plane. - -Use the `spec.controlPlaneBackups.excludedResources` field to exclude resources from control planes' backups. - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackup -metadata: - name: my-backup -spec: - ttl: 168h # Backup is garbage collected after 7 days - configRef: - kind: SpaceBackupConfig - name: default - controlPlaneBackups: - excludedResources: - - secrets - - buckets.s3.aws.upbound.io -``` - -## Create a manual backup - -[SpaceBackup][spacebackup] is a cluster-scoped resource that causes a single backup to occur for the whole Space. - -Below is an example of a manual SpaceBackup: - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackup -metadata: - name: my-backup -spec: - configRef: - kind: SpaceBackupConfig - name: default - deletionPolicy: Delete -``` - - -The backup specification `DeletionPolicy` defines backup deletion actions, -including the deletion of the backup file from the bucket. The `Deletion Policy` -value defaults to `Orphan`. Set it to `Delete` to remove uploaded files -in the bucket. -For more information on the backup and restore process, review the [Spaces API documentation][spaces-api-documentation]. - -### Set the time to live - -Set the `spec.ttl` field to define the time to live for the backup. After this time, the backup is eligible for garbage collection. If this field isn't set, the backup isn't garbage collected. The time to live is a duration, for example, `168h` for 7 days. - -```yaml -apiVersion: admin.spaces.upbound.io/v1alpha1 -kind: SpaceBackup -metadata: - name: my-backup -spec: - ttl: 168h # Backup is garbage collected after 7 days -... -``` - -## Restore from a space backup - -Space Backup and Restore focuses only on disaster recovery. The restore procedure assumes a new Space installation with no existing resources. The restore procedure is idempotent, so you can run it multiple times without any side effects in case of failures. - -To restore a Space from an existing Space Backup, follow these steps: - -1. Install Spaces from scratch as needed. -2. Create a `SpaceBackupConfig` as needed to access the SpaceBackup from the object storage, for example named `my-backup-config`. -3. Select the backup you want to restore from, for example `my-backup`. -4. Run the following command to restore the Space: - -```shell -export SPACE_BACKUP_CONFIG=my-backup-config -export SPACE_BACKUP=my-backup -kubectl exec -ti -n upbound-system deployments/spaces-controller -c spaces -- hyperspace restore $SPACE_BACKUP $SPACE_BACKUP_CONFIG -``` - -### Restore specific control planes - -:::important -This feature is available from Spaces v1.11. -::: - -Instead of restoring the whole Space, you can choose to restore specific control planes -from a backup using the `--controlplanes` flag. You can also use -the `--skip-space-restore` flag to skip restoring Space objects. -This allows Spaces admins to restore individual control planes without -needing to restore the entire Space. - -```shell -export SPACE_BACKUP_CONFIG=my-backup-config -export SPACE_BACKUP=my-backup -kubectl exec -ti -n upbound-system deployments/spaces-controller -c spaces --- hyperspace restore $SPACE_BACKUP $SPACE_BACKUP_CONFIG --controlplanes default/ctp1,default/ctp2 --skip-space-restore -``` - - -[shared-backups]: /spaces/howtos/self-hosted/workload-id/backup-restore-config/ -[spacebackupconfig]: /reference/apis/spaces-api/v1_9 -[thanos-object-storage]: https://thanos.io/tip/thanos/storage.md/ -[spacebackupschedule]: /reference/apis/spaces-api/v1_9 -[cron-formatted]: https://en.wikipedia.org/wiki/Cron -[spacebackup]: /reference/apis/spaces-api/v1_9 -[spaces-api-documentation]: /reference/apis/spaces-api/v1_9 - diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/gitops-with-argocd.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/gitops-with-argocd.md deleted file mode 100644 index 004247a10..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/gitops-with-argocd.md +++ /dev/null @@ -1,142 +0,0 @@ ---- -title: GitOps with ArgoCD in Self-Hosted Spaces -sidebar_position: 80 -description: Set up GitOps workflows with Argo CD in self-hosted Spaces -plan: "business" ---- - -:::info Deployment Model -This guide applies to **self-hosted Spaces** deployments. For Upbound Cloud Spaces, see [GitOps with Upbound Control Planes](/spaces/howtos/cloud-spaces/gitops-on-upbound/). -::: - -GitOps is an approach for managing a system by declaratively describing desired resources' configurations in Git and using controllers to realize the desired state. Upbound's control planes are compatible with this pattern and it's strongly recommended you integrate GitOps in the platforms you build on Upbound. - - -## Integrate with Argo CD - - -[Argo CD][argo-cd] is a project in the Kubernetes ecosystem commonly used for -GitOps. You can use it in tandem with Upbound control planes to achieve GitOps -flows. The sections below explain how to integrate these tools with Upbound. - -### Configure connection secrets for control planes - -You can configure control planes to write their connection details to a secret. -Do this by setting the -[`spec.writeConnectionSecretToRef`][spec-writeconnectionsecrettoref] field in a -control plane manifest. For example: - -```yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - name: ctp1 - namespace: default -spec: - writeConnectionSecretToRef: - name: kubeconfig-ctp1 - namespace: default -``` - - -### Configure Argo CD - - -To configure Argo CD for Annotation resource tracking, edit the Argo CD -ConfigMap in the Argo CD namespace. Add `application.resourceTrackingMethod: -annotation` to the data section as below. - -Next, configure the [auto respect RBAC for the Argo CD -controller][auto-respect-rbac-for-the-argo-cd-controller-1]. By default, Argo CD -attempts to discover some Kubernetes resource types that don't exist in a -control plane. You must configure Argo CD to respect the cluster's RBAC rules so -that Argo CD can sync. Add `resource.respectRBAC: normal` to the data section as -below. - -```bash -apiVersion: v1 -kind: ConfigMap -metadata: - name: argocd-cm -data: - ... - application.resourceTrackingMethod: annotation - resource.respectRBAC: normal -``` - -:::tip -The `resource.respectRBAC` configuration above tells Argo to respect RBAC for -_all_ cluster contexts. If you're using an Argo CD instance to manage more than -only control planes, you should consider changing the `clusters` string match -for the configuration to apply only to control planes. For example, if every -control plane context name followed the convention of being named -`controlplane-`, you could set the string match to be `controlplane-*` -::: - - -### Create a cluster context definition - - -Once the control plane is ready, extract the following values from the secret -containing the kubeconfig: - -```bash -kubeconfig_content=$(kubectl get secrets kubeconfig-ctp1 -n default -o jsonpath='{.data.kubeconfig}' | base64 -d) -server=$(echo "$kubeconfig_content" | grep 'server:' | awk '{print $2}') -bearer_token=$(echo "$kubeconfig_content" | grep 'token:' | awk '{print $2}') -ca_data=$(echo "$kubeconfig_content" | grep 'certificate-authority-data:' | awk '{print $2}') -``` - -Generate a new secret in the cluster where you installed Argo, using the prior -values extracted: - -```yaml -cat < - -import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; - - - - -:::important -This feature is only available for select Business Critical customers. You can't -set up your own Managed Space without the assistance of Upbound. If you're -interested in this deployment mode, please [contact us][contact]. -::: - - - -A Managed Space deployed on AWS is a single-tenant deployment of a control plane -space in your AWS organization in an isolated sub-account. With Managed Spaces, -you can use the same API, CLI, and Console that Upbound offers, with the benefit -of running entirely in a cloud account that you own and Upbound manages for you. - -The following guide walks you through setting up a Managed Space in your AWS -organization. If you have any questions while working through this guide, -contact your Upbound Account Representative for help. - - - - - -A Managed Space deployed on GCP is a single-tenant deployment of a control plane -space in your GCP organization in an isolated project. With Managed Spaces, you -can use the same API, CLI, and Console that Upbound offers, with the benefit of -running entirely in a cloud account that you own and Upbound manages for you. - -The following guide walks you through setting up a Managed Space in your GCP -organization. If you have any questions while working through this guide, -contact your Upbound Account Representative for help. - - - - -## Managed Space on your cloud architecture - - - -A Managed Space is a deployment of the Upbound Spaces software inside an -Upbound-controlled sub-account in your AWS cloud environment. The Spaces -software runs in this sub-account, orchestrated by Kubernetes. Backups and -billing data get stored inside bucket or blob storage in the same sub-account. -The control planes deployed and controlled by the Spaces software runs on the -Kubernetes cluster which gets deployed into the sub-account. - -The diagram below illustrates the high-level architecture of Upbound Managed Spaces: - -![Upbound Managed Spaces arch](/img/managed-arch-aws.png) - -The Spaces software gets deployed on an EKS Cluster in the region of your -choice. This EKS cluster is where your control planes are ultimately run. -Upbound also deploys buckets, 1 for the collection of the billing data and 1 for -control plane backups. - -Upbound doesn't have access to other sub-accounts nor your organization-level -settings in your cloud environment. Outside of your cloud organization, Upbound -runs the Upbound Console, which includes the Upbound API and web application, -including the dashboard you see at `console.upbound.io`. By default, all -connections are encrypted, but public. Optionally, you also have the option to -use private network connectivity through [AWS PrivateLink][aws-privatelink]. - - - - - - -A Managed Space is a deployment of the Upbound Spaces software inside an -Upbound-controlled project in your GCP cloud environment. The Spaces software -runs in this project, orchestrated by Kubernetes. Backups and billing data get -stored inside bucket or blob storage in the same project. The control planes -deployed and controlled by the Spaces software runs on the Kubernetes cluster -which gets deployed into the project. - -The diagram below illustrates the high-level architecture of Upbound Managed Spaces: - -![Upbound Managed Spaces arch](/img/managed-arch-gcp.png) - -The Spaces software gets deployed on a GKE Cluster in the region of your choice. -This GKE cluster is where your control planes are ultimately run. Upbound also -deploys cloud buckets, 1 for the collection of the billing data and 1 for -control plane backups. - -Upbound doesn't have access to other projects nor your organization-level -settings in your cloud environment. Outside of your cloud organization, Upbound -runs the Upbound Console, which includes the Upbound API and web application, -including the dashboard you see at `console.upbound.io`. By default, all -connections are encrypted, but public. Optionally, you also have the option to -use private network connectivity through [GCP Private Service -Connect][gcp-private-service-connect]. - - - -## Prerequisites - -- An organization created on Upbound - - - -- You should have a preexisting AWS organization to complete this guide. -- You must create a new AWS sub-account. Read the [AWS documentation][aws-documentation] to learn how to create a new sub-account in an existing organization on AWS. - -After the sub-account information gets provided to Upbound, **don't change it -any further.** Any changes made to the sub-account or the resources created by -Upbound for the purposes of the Managed Space deployments voids the SLA you have -with Upbound. If you want to make configuration changes, contact your Upbound -Solutions Architect. - - - - - -- You should have a preexisting GCP organization with an active Cloud Billing account to complete this guide. -- You must create a new GCP project. Read the [GCP documentation][gcp-documentation] to learn how to create a new project in an existing organization on GCP. - -After the project information gets provided to Upbound, **don't change it any -further.** Any changes made to the project or the resources created by Upbound -for the purposes of the Managed Space deployments voids the SLA you have with -Upbound. If you want to make configuration changes, contact your Upbound -Solutions Architect. - - - - - -## Set up cross-account management - -Upbound supports using AWS Key Management Service with cross-account IAM -permissions. This enables the isolation of keys so the infrastructure operated -by Upbound has limited access to symmetric keys. - -In the KMS key's account, apply the baseline key policy: - -```json -{ - "Sid": "Allow Upbound to use this key", - "Effect": "Allow", - "Principal": { - "AWS": ["[Managed Space sub-account ID]"] - }, - "Action": ["kms:Encrypt", "kms:Decrypt", "kms:ReEncrypt*", "kms:GenerateDataKey*", "kms:DescribeKey"], - "Resource": "*" -} -``` - -You need another key policy to let the sub-account create persistent resources -with the KMS key: - -```json -{ - "Sid": "Allow attachment of persistent resources for an Upbound Managed Space", - "Effect": "Allow", - "Principal": { - "AWS": "[Managed Space sub-account ID]" - }, - "Action": ["kms:CreateGrant", "kms:ListGrants", "kms:RevokeGrant"], - "Resource": "*", - "Condition": { - "Bool": { - "kms:GrantIsForAWSResource": "true" - } - } -} -``` - -### Configure PrivateLink - -By default, all connections to the Upbound Console are encrypted, but public. -AWS PrivateLink is a feature that allows VPC peering whereby your traffic -doesn't traverse the public internet. To have this configured, contact your -Upbound Account Representative. - - - - - -## Enable APIs - -Enable the following APIs in the new project: - -- Kubernetes Engine API -- Cloud Resource Manager API -- Compute Engine API -- Cloud DNS API - -:::tip -Read how to enable APIs in a GCP project [here][here]. -::: - -## Create a service account - -Create a service account in the new project. Name the service account, -upbound-sa. Give the service account the following roles: - -- Compute Admin -- Project IAM Admin -- Service Account Admin -- DNS Administrator -- Editor - -Select the service account you just created. Select keys. Add a new key and -select JSON. The key gets downloaded to your machine. Save this for later. - -## Create a DNS Zone - -Create a DNS Zone, set the **Zone type** to `Public`. - -### Configure Private Service Connect - -By default, all connections to the Upbound Console are encrypted, but public. -GCP Private Service Connect is a feature that allows VPC peering whereby your -traffic doesn't traverse the public internet. To have this configured, contact -your Upbound Account Representative. - - - -## Provide information to Upbound - -Once these policies get attached to the key, tell your Upbound Account -Representative, providing them the following: - - - -- the full ARN of the KMS key. -- the name of the organization that you created in Upbound. Use the up CLI command, `up org list`, so see this information. -- Confirmation of which region in AWS you want the deployment to target. - - - - - -- The service account JSON key -- The NS records associated with the DNS name created in the last step. -- the name of the organization that you created in Upbound. Use the up CLI command, `up org list`, so see this information. -- Confirmation of which region in GCP you want the deployment to target. - - - -Once Upbound has this information, the request gets processed in a business day. - -## Use your Managed Space - -Once the Managed Space gets deployed, you can see it in the Space selector when browsing your environment on [`console.upbound.io`][console-upbound-io]. - - - - -[contact]: https://www.upbound.io/contact-us -[aws-privatelink]: #configure-privatelink -[aws-documentation]: https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_create.html#orgs_manage_accounts_create-new -[gcp-private-service-connect]: #configure-private-service-connect -[gcp-documentation]: https://cloud.google.com/resource-manager/docs/creating-managing-organization -[here]: https://cloud.google.com/apis/docs/getting-started#enabling_apis -[console-upbound-io]: https://console.upbound.io/ diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/oidc-configuration.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/oidc-configuration.md deleted file mode 100644 index cbef4dc42..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/oidc-configuration.md +++ /dev/null @@ -1,289 +0,0 @@ ---- -title: Configure OIDC -sidebar_position: 20 -description: Configure OIDC in your Space ---- -:::important -This guide is only applicable for administrators who've deployed self-hosted Spaces. general RBAC in Upbound, read [Upbound RBAC][upbound-rbac]. -::: - -Upbound uses the Kubernetes [Structured Authentication Configuration][structured-auth-config] to validate OIDC tokens sent to the API. Upbound stores this -configuration as a `ConfigMap` and authenticates with the Upbound router -component during installation with Helm. - -This guide walks you through how to create and apply an authentication -configuration to validate Upbound with an external identity provider. Each -section focuses on a specific part of the configuration file. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. - -For details on authentication and access control across versions, see the . related platform authentication features, see the [Platform manual](../../../../platform/). -::: - -## Creating the `AuthenticationConfiguration` file - -First, create a file called `config.yaml` with an `AuthenticationConfiguration` -kind. The `AuthenticationConfiguration` is the initial authentication structure -necessary for Upbound to communicate with your chosen identity provider. - -```yaml -apiVersion: apiserver.config.k8s.io/v1beta1 -kind: AuthenticationConfiguration -jwt: -- issuer: - url: oidc-issuer-url - audiences: - - oidc-client-id - claimMappings: # optional - username: - claim: oidc-username-claim - prefix: oidc-username-prefix - groups: - claim: oidc-groups-claim - prefix: oidc-groups-prefix -``` - - -For detailed configuration options, including the CEL-based token validation, -review the feature [documentation][structured-auth-config]. - - -The `AuthenticationConfiguration` allows you to configure multiple JWT -authenticators as separate issuers. - -### Configure an issuer - -The `jwt` array requires an `issuer` specification and typically contains: - -- A `username` claim mapping -- A `groups` claim mapping -Optionally, the configuration may also include: -- A set of claim validation rules -- A set of user validation rules - -The `issuer` URL must be unique across all configured authenticators. - -```yaml -issuer: - url: https://example.com - discoveryUrl: https://discovery.example.com/.well-known/openid-configuration - certificateAuthority: |- - - audiences: - - client-id-a - - client-id-b - audienceMatchPolicy: MatchAny -``` - -By default, the authenticator assumes the OIDC Discovery URL is -`{issuer.url}/.well-known/openid-configuration`. Most identity providers follow -this structure, and you can omit the `discoveryUrl` field. To use a separate -discovery service, specify the full path to the discovery endpoint in this -field. - -If the CA for the Issuer isn't public, provide the PEM encoded CA for the Discovery URL. - -At least one of the `audiences` entries must match the `aud` claim in the JWT. -For OIDC tokens, this is the Client ID of the application attempting to access -the Upbound API. Having multiple values set allows the same configuration to -apply to multiple client applications, for example the `kubectl` CLI and an -Internal Developer Portal. - -If you specify multiple `audiences` , `audienceMatchPolicy` must equal `MatchAny`. - -### Configure `claimMappings` - -#### Username claim mapping - -By default, the authenticator uses the `sub` claim as the user name. To override this, either: - -- specify *both* `claim` and `prefix`. `prefix` may be explicitly set to the empty string. -or - -- specify a CEL `expression` to calculate the user name. - -```yaml -claimMappings: - username: - claim: "sub" - prefix: "keycloak" - # - expression: 'claims.username + ":external-user"' -``` - - -#### Groups claim mapping - -By default, this configuration doesn't map groups, unless you either: - -- specify both `claim` and `prefix`. `prefix` may be explicitly set to the empty string. -or - -- specify a CEL `expression` that returns a string or list of strings. - - -```yaml -claimMappings: - groups: - claim: "groups" - prefix: "" - # - expression: 'claims.roles.split(",")' -``` - - -### Validation rules - - -Validation rules are outside the scope of this document. Review the -[documentation][structured-auth-config] for more information. Examples include -using CEL expressions to validate authentication such as: - - -- Validating that a token claim has a specific value -- Validating that a token has a limited lifetime -- Ensuring usernames and groups don't contain reserved prefixes - -## Required claims - -To interact with Space and ControlPlane APIs, users must have the `upbound.io/aud` claim set to one of the following: - -| Upbound.io Audience | Notes | -| -------------------------------------------------------- | -------------------------------------------------------------------- | -| `[]` | No Access to Space-level or ControlPlane APIs | -| `['upbound:spaces:api']` | This Identity is only for Space-level APIs | -| `['upbound:spaces:controlplanes']` | This Identity is only for ControlPlane APIs | -| `['upbound:spaces:api', 'upbound:spaces:controlplanes']` | This Identity is for both Space-level and ControlPlane APIs | - - -You can set this claim in two ways: - -- In the identity provider mapped in the ID token. -- Inject in the authenticator with the `jwt.claimMappings.extra` array. - -For example: -```yaml -apiVersion: apiserver.config.k8s.io/v1beta1 -kind: AuthenticationConfiguration -jwt: -- issuer: - url: https://keycloak:8443/realms/master - certificateAuthority: |- - - audiences: - - master-realm - audienceMatchPolicy: MatchAny - claimMappings: - username: - claim: "preferred_username" - prefix: "keycloak:" - groups: - claim: "groups" - prefix: "" - extra: - - key: 'upbound.io/aud' - valueExpression: "['upbound:spaces:controlplanes', 'upbound:spaces:api']" -``` - -## Install the `AuthenticationConfiguration` - -Once you create an `AuthenticationConfiguration` file, specify this file as a -`ConfigMap` in the host cluster for the Upbound Space. - -```sh -kubectl create configmap -n upbound-system --from-file=config.yaml=./path/to/config.yaml -``` - - -To enable OIDC authentication and disable Upbound IAM when installing the Space, -reference the configuration and pass an empty value to the Upbound IAM issuer -parameter: - - -```sh -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - ... - --set "authentication.structuredConfig=" \ - --set "router.controlPlane.extraArgs[0]=--upbound-iam-issuer-url=" -``` - -## Configure RBAC - - -In this scenario, the external identity provider handles authentication, but -permissions for Spaces and ControlPlane APIs use standard RBAC objects. - -### Spaces APIs - -The Spaces APIs include: -```yaml -- apiGroups: - - spaces.upbound.io - resources: - - controlplanes - - sharedexternalsecrets - - sharedsecretstores - - backups - - backupschedules - - sharedbackups - - sharedbackupconfigs - - sharedbackupschedules -- apiGroups: - - observability.spaces.upbound.io - resources: - - sharedtelemetryconfigs -``` - -### ControlPlane APIs - - - -Crossplane specifies three [roles][crossplane-managed-clusterroles] for a -ControlPlane: admin, editor, and viewer. These map to the verbs `admin`, `edit`, -and `view` on the `controlplanes/k8s` resource in the `spaces.upbound.io` API -group. - - -### Control access - -The `groups` claim in the `AuthenticationConfiguration` allows you to control -resource access when you create a `ClusterRoleBinding`. A `ClusterRole` defines -the role parameters and a `ClusterRoleBinding` subject. - -The example below allows `admin` permissions for all ControlPlanes to members of -the `ctp-admins` group: - -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: allow-ctp-admin -rules: -- apiGroups: - - spaces.upbound.io - resources: - - controlplanes/k8s - verbs: - - admin -``` - -ctp-admins ClusterRoleBinding -```yaml -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: allow-ctp-admin -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: allow-ctp-admin -subjects: -- apiGroup: rbac.authorization.k8s.io - kind: Group - name: ctp-admins -``` - -[structured-auth-config]: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#using-authentication-configuration -[crossplane-managed-clusterroles]: https://github.com/crossplane/crossplane/blob/master/design/design-doc-rbac-manager.md#managed-rbac-clusterroles -[upbound-rbac]: /manuals/platform/concepts/authorization/upbound-rbac diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/proxies-config.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/proxies-config.md deleted file mode 100644 index 3802e4cb0..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/proxies-config.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: Proxied configuration -sidebar_position: 20 -description: Configure Upbound within a proxied environment ---- - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments in proxied environments. Proxy configuration procedures are consistent across versions. - -For version-specific deployment considerations, see the . -::: - - - -When you install Upbound with Helm in a proxied environment, please update the specified registry with your internal registry. - - - -```bash -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "${SPACES_VERSION}" \ - --set "ingress.host=${SPACES_ROUTER_HOST}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "authentication.hubIdentities=true" \ - --set "authorization.hubRBAC=true" \ - --set "registry=registry.company.corp/spaces" \ - --set "controlPlanes.uxp.registryOverride=registry.company.corp/xpkg.upbound.io" \ - --set "controlPlanes.uxp.repository=registry.company.corp/spaces" \ - --wait -``` diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/query-api.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/query-api.md deleted file mode 100644 index c112e9001..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/query-api.md +++ /dev/null @@ -1,396 +0,0 @@ ---- -title: Deploy Query API infrastructure -weight: 130 -description: Query API -aliases: - - /all-spaces/self-hosted-spaces/query-api - - /self-hosted-spaces/query-api - - all-spaces/self-hosted-spaces/query-api ---- - - - - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). Query API has evolved across versions: - -- **Cloud Spaces**: Available since v1.6 (enabled by default) -- **Self-Hosted**: Available since v1.8 (requires manual enablement) - -For details on Query API availability across versions, see the . -::: - -:::important - -This feature is in preview. The Query API is available in the Cloud Space offering in `v1.6` and enabled by default. - -This is a requirement to be able to connect a Space since `v1.8.0`, and is off by default, see below to enable it. - -::: - -Upbound's Query API allows users to inspect objects and resources within their control planes. The read-only `up alpha query` and `up alpha get` CLI commands allow you to gather information on your control planes in a fast and efficient package. These commands follow the [`kubectl` conventions][kubectl-conventions] for filtering, sorting, and retrieving information from your Space. - -Query API requires a PostgreSQL database to store the data. You can use the default PostgreSQL instance provided by Upbound or bring your own PostgreSQL instance. - -## Managed setup - -:::tip -If you don't have specific requirements for your setup, Upbound recommends following this approach. -::: - -To enable this feature, set `features.alpha.apollo.enabled=true` and `apollo.apollo.storage.postgres.create=true` when installing Spaces. - -However, you need to install CloudNativePG (`CNPG`) to provide the PostgreSQL instance. You can let the `up` CLI do this for you, or install it manually. - -For more customization, see the [Helm chart reference][helm-chart-reference]. You can modify the number -of PostgreSQL instances, pooling instances, storage size, and more. - -If you have specific requirements not addressed in the Helm chart, see below for more information on how to bring your own [PostgreSQL setup][postgresql-setup]. - -### Using the up CLI - -Before you begin, make sure you have the most recent version of the [`up` CLI installed][up-cli-installed]. - -To enable this feature, set `features.alpha.apollo.enabled=true` and `apollo.apollo.storage.postgres.create=true` when installing Spaces: - -```bash -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - ... - --set "features.alpha.apollo.enabled=true" \ - --set "apollo.apollo.storage.postgres.create=true" -``` - -`up space init` and `up space upgrade` install CloudNativePG automatically, if needed. - -### Helm chart - -If you are installing the Helm chart in some other way, you can manually install CloudNativePG in one of the [supported ways][supported-ways], for example: - -```shell -kubectl apply --server-side -f \ - https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.24.1/cnpg-1.24.1.yaml -kubectl rollout status -n cnpg-system deployment cnpg-controller-manager -w --timeout 120s -``` - -Next, install the Spaces Helm chart with the necessary values, for example: - -```shell -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "${SPACES_VERSION}" \ - ... - --set "features.alpha.apollo.enabled=true" \ - --set "apollo.apollo.storage.postgres.create=true" \ - --wait -``` - -## Self-hosted PostgreSQL configuration - - -If your workflow requires more customization, you can provide your own -PostgreSQL instance and configure credentials manually. - -Using your own PostgreSQL instance requires careful architecture consideration. -Review the architecture and requirements guidelines. - -### Architecture - -The Query API architecture uses three components, other than a PostgreSQL database: -* **Apollo Syncers**: Watching `ETCD` for changes and syncing them to PostgreSQL. One, or more, per control plane. -* **Apollo Server**: Serving the Query API out of the data in PostgreSQL. One, or more, per Space. - -The default setup also uses the `PgBouncer` connection pooler to manage connections from the syncers. -```mermaid -graph LR - User[User] - - subgraph Cluster["Cluster (Spaces)"] - direction TB - Apollo[apollo] - - subgraph ControlPlanes["Control Planes"] - APIServer[API Server] - Syncer[apollo-syncer] - end - end - - PostgreSQL[(PostgreSQL)] - - User -->|requests| Apollo - - Apollo -->|connects| PostgreSQL - Apollo -->|creates schemas & users| PostgreSQL - - Syncer -->|watches| APIServer - Syncer -->|writes| PostgreSQL - - PostgreSQL -->|data| Apollo - - style PostgreSQL fill:#e1f5ff,stroke:#333,stroke-width:2px,color:#000 - style Apollo fill:#ffe1e1,stroke:#333,stroke-width:2px,color:#000 - style Cluster fill:#f0f0f0,stroke:#333,stroke-width:2px,color:#000 - style ControlPlanes fill:#fff,stroke:#666,stroke-width:1px,stroke-dasharray: 5 5,color:#000 -``` - - -Each component needs to connect to the PostgreSQL database. - -In the event of database issues, you can provide a new database and the syncers -automatically repopulate the data. - -### Requirements - -* A PostgreSQL 16 instance or cluster. -* A database, for example named `upbound`. -* **Optional**: A dedicated user for the Apollo Syncers, otherwise the Spaces Controller generates a dedicated set of credentials per syncer with the necessary permissions, for example named `syncer`. -* A dedicated **superuser or admin account** for the Apollo Server. -* **Optional**: A connection pooler, like PgBouncer, to manage connections from the Apollo Syncers. If you didn't provide the optional users, you might have to configure the pooler to allow users to connect using the same credentials as PostgreSQL. -* **Optional**: A read replica for the Apollo Syncers to connect to, to reduce load on the primary database, this might cause a slight delay in the data being available through the Query API. - -Below you can find examples of setups to get you started, you can mix and match the examples to suit your needs. - -### In-cluster setup - -:::tip - -If you don't have strong opinions on your setup, but still want full control on -the resources created for some unsupported customizations, Upbound recommends -the in-cluster setup. - -::: - -For more customization than the managed setup, you can use CloudNativePG for -PostgreSQL in the same cluster. - -For in-cluster setup, manually deploy the operator in one of the [supported ways][supported-ways-1], for example: - -```shell -kubectl apply --server-side -f \ - https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.24.1/cnpg-1.24.1.yaml -kubectl rollout status -n cnpg-system deployment cnpg-controller-manager -w --timeout 120s -``` - -Then create a `Cluster` and `Pooler` in the `upbound-system` namespace, for example: - -```shell -kubectl create ns upbound-system - -kubectl apply -f - < - -### External setup - - -:::tip - -If you want to run your PostgreSQL instance outside the cluster, but are fine with credentials being managed by the `apollo` user, this is the suggested way to proceed. - -::: - -When using this setup, you must manually create the required Secrets in the -`upbound-system` namespace. The `apollo` user must have permissions to create -schemas and users. - -```shell - -kubectl create ns upbound-system - -# A Secret containing the necessary credentials to connect to the PostgreSQL instance -kubectl create secret generic spaces-apollo-pg-app -n upbound-system \ - --from-literal=password=supersecret - -# A Secret containing the necessary CA certificate to verify the connection to the PostgreSQL instance -kubectl create secret generic spaces-apollo-pg-ca -n upbound-system \ - --from-file=ca.crt=/path/to/ca.crt -``` - -Next, install Spaces with the necessary settings: - -```shell -export PG_URL=your-postgres-host:5432 -export PG_POOLED_URL=your-pgbouncer-host:5432 # this could be the same as above - -helm upgrade --install ... \ - --set "features.alpha.apollo.enabled=true" \ - --set "apollo.apollo.storage.postgres.create=false" \ - --set "apollo.apollo.storage.postgres.connection.url=$PG_URL" \ - --set "apollo.apollo.storage.postgres.connection.credentials.secret.name=spaces-apollo-pg-app" \ - --set "apollo.apollo.storage.postgres.connection.credentials.format=basicauth" \ - --set "apollo.apollo.storage.postgres.connection.ca.name=spaces-apollo-pg-ca" \ - --set "apollo.apollo.storage.postgres.connection.syncer.url=$PG_POOLED_URL" -``` - -### External setup with all custom credentials - -For custom credentials with Apollo Syncers or Server, create a new secret in the -`upbound-system` namespace: - -```shell -export APOLLO_SYNCER_USER=syncer -export APOLLO_SERVER_USER=apollo - -kubectl create ns upbound-system - -# A Secret containing the necessary credentials to connect to the PostgreSQL instance -kubectl create secret generic spaces-apollo-pg-app -n upbound-system \ - --from-literal=password=supersecret - -# A Secret containing the necessary CA certificate to verify the connection to the PostgreSQL instance -kubectl create secret generic spaces-apollo-pg-ca -n upbound-system \ - --from-file=ca.crt=/path/to/ca.crt - -# A Secret containing the necessary credentials for the Apollo Syncers to connect to the PostgreSQL instance. -# These will be used by all Syncers in the Space. -kubectl create secret generic spaces-apollo-pg-syncer -n upbound-system \ - --from-literal=username=$APOLLO_SYNCER_USER \ - --from-literal=password=supersecret - -# A Secret containing the necessary credentials for the Apollo Server to connect to the PostgreSQL instance. -kubectl create secret generic spaces-apollo-pg-apollo -n upbound-system \ - --from-literal=username=$APOLLO_SERVER_USER \ - --from-literal=password=supersecret -``` - -Next, install Spaces with the necessary settings: - -```shell -export PG_URL=your-postgres-host:5432 -export PG_POOLED_URL=your-pgbouncer-host:5432 # this could be the same as above - -helm ... \ - --set "features.alpha.apollo.enabled=true" \ - --set "apollo.apollo.storage.postgres.create=false" \ - --set "apollo.apollo.storage.postgres.connection.url=$PG_URL" \ - --set "apollo.apollo.storage.postgres.connection.credentials.secret.name=spaces-apollo-pg-app" \ - --set "apollo.apollo.storage.postgres.connection.credentials.format=basicauth" \ - --set "apollo.apollo.storage.postgres.connection.ca.name=spaces-apollo-pg-ca" \ - --set "apollo.apollo.storage.postgres.connection.syncer.url=$PG_POOLED_URL" \ - - #. the syncers - --set "apollo.apollo.storage.postgres.connection.syncer.credentials.format=basicauth" \ - --set "apollo.apollo.storage.postgres.connection.syncer.credentials.user=$APOLLO_SYNCER_USER" \ - --set "apollo.apollo.storage.postgres.connection.syncer.credentials.secret.name=spaces-apollo-pg-syncer" \ - - #. the server - --set "apollo.apollo.storage.postgres.connection.apollo.credentials.format=basicauth" \ - --set "apollo.apollo.storage.postgres.connection.apollo.credentials.user=$APOLLO_SERVER_USER" \ - --set "apollo.apollo.storage.postgres.connection.apollo.credentials.secret.name=spaces-apollo-pg-apollo" \ - --set "apollo.apollo.storage.postgres.connection.apollo.url=$PG_POOLED_URL" -``` - - -## Using the Query API - - -See the [Query API documentation][query-api-documentation] for more information on how to use the Query API. - - - - -[postgresql-setup]: #self-hosted-postgresql-configuration -[up-cli-installed]: /manuals/cli/overview -[query-api-documentation]: /spaces/howtos/query-api - -[helm-chart-reference]: /reference/helm-reference -[kubectl-conventions]: https://kubernetes.io/docs/reference/kubectl/generated/kubectl_get/ -[supported-ways]: https://cloudnative-pg.io/documentation/current/installation_upgrade/ -[supported-ways-1]: https://cloudnative-pg.io/documentation/current/installation_upgrade/ -[cloudnativepg-documentation]: https://cloudnative-pg.io/documentation/1.24/storage/#configuration-via-a-pvc-template -[postgresql-cluster]: https://cloudnative-pg.io/documentation/1.24/resource_management/ -[pooler]: https://cloudnative-pg.io/documentation/1.24/connection_pooling/#pod-templates -[postgresql-cluster-2]: https://cloudnative-pg.io/documentation/1.24/replication/ -[pooler-3]: https://cloudnative-pg.io/documentation/1.24/connection_pooling/#high-availability-ha -[postgresql-cluster-4]: https://cloudnative-pg.io/documentation/1.24/operator_capability_levels/#override-of-operand-images-through-the-crd -[pooler-5]: https://cloudnative-pg.io/documentation/1.24/connection_pooling/#pod-templates -[cloudnativepg-documentation-6]: https://cloudnative-pg.io/documentation/1.24/postgresql_conf/ diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/scaling-resources.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/scaling-resources.md deleted file mode 100644 index 7bb04d2c2..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/scaling-resources.md +++ /dev/null @@ -1,184 +0,0 @@ ---- -title: Scaling vCluster and etcd Resources -weight: 950 -description: A guide for scaling vCluster and etcd resources in self-hosted Spaces -aliases: - - /all-spaces/self-hosted-spaces/scaling-resources - - /spaces/scaling-resources ---- - -In large workloads or control plane migration, you may performance impacting -resource constraints. This guide explains how to scale vCluster and `etcd` -resources for optimal performance in your self-hosted Space. - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Scaling procedures are consistent across versions. - -For version-specific resource requirements and capacity planning, see the . -::: - -## Signs of resource constraints - -You may need to scale your vCluster or `etcd` resources if you observe: - -- API server timeout errors such as `http: Handler timeout` -- Error messages about `too many requests` and requests to `try again later` -- Operations like provider installation failing with errors like `cannot apply provider package secret` -- vCluster pods experiencing continuous restarts -- API performance degrades with high resource volume - - -## Scaling vCluster resources - - -The vCluster component handles Kubernetes API requests for your control planes. -Deployments with multiple control planes or providers may exceed default resource allocations. - -```yaml -# Default settings -controlPlanes.vcluster.resources.limits.cpu: "3000m" -controlPlanes.vcluster.resources.limits.memory: "3960Mi" -controlPlanes.vcluster.resources.requests.cpu: "170m" -controlPlanes.vcluster.resources.requests.memory: "1320Mi" -``` - -For larger workloads, like migrating from an existing control plane with several -providers, increase these resource limits in your Spaces `values.yaml` file. - -```yaml -controlPlanes: - vcluster: - resources: - limits: - cpu: "4000m" # Increase to 4 cores - memory: "6Gi" # Increase to 6GB memory - requests: - cpu: "500m" # Increase baseline CPU request - memory: "2Gi" # Increase baseline memory request -``` - -## Scaling `etcd` storage - -Kubernetes relies on `etcd` performance, which can lead to IOPS (input/output -operations per second) bottlenecks. Upbound allocates `50Gi` volumes for `etcd` -in cloud environments to ensure adequate IOPS performance. - -```yaml -# Default setting -controlPlanes.etcd.persistence.size: "5Gi" -``` - -For production environments or when migrating large control planes, increase -`etcd` volume size and specify an appropriate storage class: - -```yaml -controlPlanes: - etcd: - persistence: - size: "50Gi" # Recommended for production - storageClassName: "fast-ssd" # Use a high-performance storage class -``` - -### Storage class considerations - -For AWS: -- Use GP3 volumes with adequate IOPS --. AWS GP3 volumes, IOPS scale with volume size (3000 IOPS baseline) --. optimal performance, provision at least 32Gi to support up to 16,000 IOPS - -For GCP and Azure: -- Use SSD-based persistent disk types for optimal performance -- Consider premium storage options for high-throughput workloads - -## Scaling Crossplane resources - -Crossplane manages provider resources in your control planes. You may need to increase provider resources for larger deployments: - -```yaml -# Default settings -controlPlanes.uxp.resourcesCrossplane.requests.cpu: "370m" -controlPlanes.uxp.resourcesCrossplane.requests.memory: "400Mi" -``` - - -For environments with many providers or managed resources: - - -```yaml -controlPlanes: - uxp: - resourcesCrossplane: - limits: - cpu: "1000m" # Add CPU limit - memory: "1Gi" # Add memory limit - requests: - cpu: "500m" # Increase CPU request - memory: "512Mi" # Increase memory request -``` - -## High availability configuration - -For production environments, enable High Availability mode to ensure resilience: - -```yaml -controlPlanes: - ha: - enabled: true -``` - -## Best practices for migration scenarios - -When migrating from existing control planes into a self-hosted Space: - -1. **Pre-scale resources**: Scale up resources before performing the migration -2. **Monitor resource usage**: Watch resource consumption during and after migration with `kubectl top pods` -3. **Scale incrementally**: If issues persist, increase resources incrementally until performance stabilizes -4. **Consider storage performance**: `etcd` is sensitive to storage I/O performance - -## Helm values configuration - -Apply these settings through your Spaces Helm values file: - -```yaml -controlPlanes: - vcluster: - resources: - limits: - cpu: "4000m" - memory: "6Gi" - requests: - cpu: "500m" - memory: "2Gi" - etcd: - persistence: - size: "50Gi" - storageClassName: "gp3" # Use your cloud provider's fast storage class - uxp: - resourcesCrossplane: - limits: - cpu: "1000m" - memory: "1Gi" - requests: - cpu: "500m" - memory: "512Mi" - ha: - enabled: true #. production environments -``` - -Apply the configuration using Helm: - -```bash -helm upgrade --install spaces oci://xpkg.upbound.io/spaces-artifacts/spaces \ - -f values.yaml \ - -n upbound-system -``` - -## Considerations - -- **Provider count**: Each provider adds resource overhead - consider using provider families to optimize resource usage -- **Managed resources**: The number of managed resources impacts CPU usage more than memory -- **Vertical pod autoscaling**: Consider using vertical pod autoscaling in Kubernetes to automatically adjust resources based on usage -- **Storage performance**: Storage performance is as important as capacity for etcd -- **Network latency**: Low-latency connections between components improve performance - - diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/self-hosted-spaces-deployment.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/self-hosted-spaces-deployment.md deleted file mode 100644 index e549e3939..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/self-hosted-spaces-deployment.md +++ /dev/null @@ -1,461 +0,0 @@ ---- -title: Deployment Workflow -sidebar_position: 3 -description: A quickstart guide for Upbound Spaces -tier: "business" ---- -import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; - - - - - -This guide deploys a self-hosted Upbound cluster in AWS. - - - - - -This guide deploys a self-hosted Upbound cluster in Azure. - - - - - -This guide deploys a self-hosted Upbound cluster in GCP. - - - -Disconnected Spaces allows you to host control planes in your preferred environment. - -## Prerequisites - -To get started deploying your own Disconnected Space, you need: - -- An Upbound organization account string, provided by your Upbound account representative -- A `token.json` license, provided by your Upbound account representative - - - -- An AWS account and the AWS CLI - - - - - -- An Azure account and the Azure CLI - - - - - -- An GCP account and the GCP CLI - - - -:::important -Disconnected Spaces are a business critical feature of Upbound and requires a license token to successfully complete the installation. [Contact Upbound][contact-upbound] if you want to try out Upbound with Disconnected Spaces. -::: - -## Provision the hosting environment - -### Create a cluster - - - -Configure the name and target region you want the EKS cluster deployed to. - -```ini -export SPACES_CLUSTER_NAME=upbound-space-quickstart -export SPACES_REGION=us-east-1 -``` - -Provision a 3-node cluster using eksctl. - -```bash -cat < - - - -Configure the name and target region you want the AKS cluster deployed to. - -```ini -export SPACES_RESOURCE_GROUP_NAME=upbound-space-quickstart -export SPACES_CLUSTER_NAME=upbound-space-quickstart -export SPACES_LOCATION=westus -``` - -Provision a new Azure resource group. - -```bash -az group create --name ${SPACES_RESOURCE_GROUP_NAME} --location ${SPACES_LOCATION} -``` - -Provision a 3-node cluster. - -```bash -az aks create -g ${SPACES_RESOURCE_GROUP_NAME} -n ${SPACES_CLUSTER_NAME} \ - --enable-managed-identity \ - --node-count 3 \ - --node-vm-size Standard_D4s_v4 \ - --enable-addons monitoring \ - --enable-msi-auth-for-monitoring \ - --generate-ssh-keys \ - --network-plugin kubenet \ - --network-policy calico -``` - -Get the kubeconfig of your AKS cluster. - -```bash -az aks get-credentials --resource-group ${SPACES_RESOURCE_GROUP_NAME} --name ${SPACES_CLUSTER_NAME} -``` - - - - - -Configure the name and target region you want the GKE cluster deployed to. - -```ini -export SPACES_PROJECT_NAME=upbound-spaces-project -export SPACES_CLUSTER_NAME=upbound-spaces-quickstart -export SPACES_LOCATION=us-west1-a -``` - -Create a new project and set it as the current project. - -```bash -gcloud projects create ${SPACES_PROJECT_NAME} -gcloud config set project ${SPACES_PROJECT_NAME} -``` - -Provision a 3-node cluster. - -```bash -gcloud container clusters create ${SPACES_CLUSTER_NAME} \ - --enable-network-policy \ - --num-nodes=3 \ - --zone=${SPACES_LOCATION} \ - --machine-type=e2-standard-4 -``` - -Get the kubeconfig of your GKE cluster. - -```bash -gcloud container clusters get-credentials ${SPACES_CLUSTER_NAME} --zone=${SPACES_LOCATION} -``` - - - -## Configure the pre-install - -### Set your Upbound organization account details - -Set your Upbound organization account string as an environment variable for use in future steps - -```ini -export UPBOUND_ACCOUNT= -``` - -### Set up pre-install configurations - -Export the path of the license token JSON file provided by your Upbound account representative. - -```ini {copy-lines="2"} -# Change the path to where you saved the token. -export SPACES_TOKEN_PATH="/path/to/token.json" -``` - -Set the version of Spaces software you want to install. - -```ini -export SPACES_VERSION= -``` - -Set the router host and cluster type. The `SPACES_ROUTER_HOST` is the domain name that's used to access the control plane instances. It's used by the ingress controller to route requests. - -```ini -export SPACES_ROUTER_HOST="proxy.upbound-127.0.0.1.nip.io" -``` - -:::important -Make sure to replace the placeholder text in `SPACES_ROUTER_HOST` and provide a real domain that you own. -::: - - -## Install the Spaces software - - -### Install cert-manager - -Install cert-manager. - -```bash -kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.11.0/cert-manager.yaml -kubectl wait deployment -n cert-manager cert-manager-webhook --for condition=Available=True --timeout=360s -``` - - - -### Install ALB Load Balancer - -```bash -helm install aws-load-balancer-controller aws-load-balancer-controller --namespace kube-system \ - --repo https://aws.github.io/eks-charts \ - --set clusterName=${SPACES_CLUSTER_NAME} \ - --set serviceAccount.create=false \ - --set serviceAccount.name=aws-load-balancer-controller \ - --wait -``` - - - -### Install ingress-nginx - -Starting with Spaces v1.10.0, you need to configure the ingress-nginx -controller to allow SSL-passthrough mode. You can do so by passing the -`--enable-ssl-passthrough=true` command-line option to the controller. -The following Helm install command enables this with the `controller.extraArgs` -parameter: - - - -```bash -helm upgrade --install ingress-nginx ingress-nginx \ - --create-namespace --namespace ingress-nginx \ - --repo https://kubernetes.github.io/ingress-nginx \ - --version 4.12.1 \ - --set 'controller.service.type=LoadBalancer' \ - --set 'controller.extraArgs.enable-ssl-passthrough=true' \ - --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-type=external' \ - --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-scheme=internet-facing' \ - --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-nlb-target-type=ip' \ - --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-healthcheck-protocol=http' \ - --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-healthcheck-path=/healthz' \ - --set 'controller.service.annotations.service\.beta\.kubernetes\.io/aws-load-balancer-healthcheck-port=10254' \ - --wait -``` - - - - - -```bash -helm upgrade --install ingress-nginx ingress-nginx \ - --create-namespace --namespace ingress-nginx \ - --repo https://kubernetes.github.io/ingress-nginx \ - --version 4.12.1 \ - --set 'controller.service.type=LoadBalancer' \ - --set 'controller.extraArgs.enable-ssl-passthrough=true' \ - --set 'controller.service.annotations.service\.beta\.kubernetes\.io/azure-load-balancer-health-probe-request-path=/healthz' \ - --wait -``` - - - - - -```bash -helm upgrade --install ingress-nginx ingress-nginx \ - --create-namespace --namespace ingress-nginx \ - --repo https://kubernetes.github.io/ingress-nginx \ - --version 4.12.1 \ - --set 'controller.service.type=LoadBalancer' \ - --set 'controller.extraArgs.enable-ssl-passthrough=true' \ - --wait -``` - - - -### Install Upbound Spaces software - -Create an image pull secret so that the cluster can pull Upbound Spaces images. - -```bash -kubectl create ns upbound-system -kubectl -n upbound-system create secret docker-registry upbound-pull-secret \ - --docker-server=https://xpkg.upbound.io \ - --docker-username="$(jq -r .accessId $SPACES_TOKEN_PATH)" \ - --docker-password="$(jq -r .token $SPACES_TOKEN_PATH)" -``` - -Log in with Helm to be able to pull chart images for the installation commands. - -```bash -jq -r .token $SPACES_TOKEN_PATH | helm registry login xpkg.upbound.io -u $(jq -r .accessId $SPACES_TOKEN_PATH) --password-stdin -``` - -Install the Spaces software. - -```bash -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "${SPACES_VERSION}" \ - --set "ingress.host=${SPACES_ROUTER_HOST}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "authentication.hubIdentities=true" \ - --set "authorization.hubRBAC=true" \ - --wait -``` - -### Create a DNS record - -:::important -If you chose to create a public ingress, you also need to create a DNS record for the load balancer of the public facing ingress. Do this before you create your first control plane. -::: - -Create a DNS record for the load balancer of the public facing ingress. To get the address for the Ingress, run the following: - - - -```bash -kubectl get ingress \ - -n upbound-system mxe-router-ingress \ - -o jsonpath='{.status.loadBalancer.ingress[0].hostname}' -``` - - - - - -```bash -kubectl get ingress \ - -n upbound-system mxe-router-ingress \ - -o jsonpath='{.status.loadBalancer.ingress[0].ip}' -``` - - - - - -```bash -kubectl get ingress \ - -n upbound-system mxe-router-ingress \ - -o jsonpath='{.status.loadBalancer.ingress[0].ip}' -``` - - - -If the preceding command doesn't return a load balancer address then your provider may not have allocated it yet. Once it's available, add a DNS record for the `ROUTER_HOST` to point to the given load balancer address. If it's an IPv4 address, add an A record. If it's a domain name, add a CNAME record. - -## Configure the up CLI - -With your kubeconfig pointed at the Kubernetes cluster where you installed -Upbound Spaces, create a new profile in the `up` CLI. This profile interacts -with your Space: - -```bash -up profile create --use ${SPACES_CLUSTER_NAME} --type=disconnected --organization ${UPBOUND_ACCOUNT} -``` - -Optionally, log in to your Upbound account using the new profile so you can use the Upbound Marketplace with this profile as well: - -```bash -up login -``` - - -## Connect to your Space - - -Use `up ctx` to create a kubeconfig context pointed at your new Space: - -```bash -up ctx disconnected/$(kubectl config current-context) -``` - -## Create your first control plane - -You can now create a control plane with the `up` CLI: - -```bash -up ctp create ctp1 -``` - -You can also create a control plane with kubectl: - -```yaml -cat < -```yaml -observability: - spacesCollector: - env: - - name: API_KEY - valueFrom: - secretKeyRef: - name: my-secret - key: api-key - config: - exporters: - otlphttp: - endpoint: "" - headers: - api-key: ${env:API_KEY} - exportPipeline: - logs: - - otlphttp - metrics: - - otlphttp - traces: - - otlphttp -``` - - -You can export metrics, logs, and traces from your Crossplane installation, Spaces -infrastructure (controller, API, router, etc.), provider-helm, and -provider-kubernetes. - -### Router metrics - -The Spaces router component uses Envoy as a reverse proxy and exposes detailed -metrics about request handling, circuit breakers, and connection pooling. -Upbound collects these metrics in your Space after you enable Space-level -observability. - -Envoy metrics in Upbound include: - -- **Upstream cluster metrics** - Request status codes, timeouts, retries, and latency for traffic to control planes and services -- **Circuit breaker metrics** - Connection and request circuit breaker state for both `DEFAULT` and `HIGH` priority levels -- **Downstream listener metrics** - Client connections and requests received -- **HTTP connection manager metrics** - End-to-end HTTP request processing and latency - -For a complete list of available router metrics and example PromQL queries, see the [Router metrics reference][router-ref]. - -### Router tracing - -The Spaces router generates distributed traces through OpenTelemetry integration, -providing end-to-end visibility into request flow across the system. Use these -traces to debug latency issues, understand request paths, and correlate errors -across services. - -The router uses: - -- **Protocol**: OTLP (OpenTelemetry Protocol) over gRPC -- **Service name**: `spaces-router` -- **Transport**: TLS-encrypted connection to telemetry collector - -#### Trace configuration - -Enable tracing and configure the sampling rate with the following Helm values: - -```yaml -observability: - enabled: true - tracing: - enabled: true - sampling: - rate: 0.1 # Sample 10% of new traces (0.0-1.0) -``` - -The sampling behavior depends on whether a parent trace context exists: - -- **With parent context**: If a `traceparent` header is present, the parent's - sampling decision is respected, enabling proper distributed tracing across services. -- **Root spans**:. new traces without a parent, Envoy samples based on - `x-request-id` hashing. The default sampling rate is 10%. - -#### TLS configuration for external collectors - -To send traces to an external OTLP collector, configure the endpoint and TLS settings: - -```yaml -observability: - enabled: true - tracing: - enabled: true - endpoint: "otlp-gateway.example.com" - port: 443 - tls: - caBundleSecretRef: "custom-ca-secret" -``` - -If `caBundleSecretRef` is set, the router uses the CA bundle from the referenced -Kubernetes secret. The secret must contain a key named `ca.crt` with the -PEM-encoded CA bundle. If not set, the router uses the Spaces CA for the -in-cluster collector. - -#### Custom trace tags - -The router adds custom tags to every span to enable filtering and grouping by -control plane: - -| Tag | Source | Description | -|-----|--------|-------------| -| `controlplane.id` | `x-upbound-mxp-id` header | Control plane UUID | -| `controlplane.name` | `x-upbound-mxp-host` header | Internal vcluster hostname | -| `hostcluster.id` | `x-upbound-hostcluster-id` header | Host cluster identifier | - -These tags enable queries like "show all slow requests to control plane X" or -"find errors for control planes in host cluster Y." - -#### Example trace - -The following example shows the attributes from a successful GET request: - -```text -Span: ingress -├─ Service: spaces-router -├─ Duration: 8.025ms -├─ Attributes: -│ ├─ http.method: GET -│ ├─ http.status_code: 200 -│ ├─ upstream_cluster: ctp-b2b37aaa-ee55-492c-ba0c-4d561a6325fa-api-cluster -│ ├─ controlplane.id: b2b37aaa-ee55-492c-ba0c-4d561a6325fa -│ ├─ controlplane.name: vcluster.mxp-b2b37aaa-ee55-492c-ba0c-4d561a6325fa-system -│ └─ response_size: 1827 -``` - -## Available metrics - -Space-level observability collects metrics from multiple infrastructure components: - -### Infrastructure component metrics - -- Crossplane controller metrics -- Spaces controller, API, and router metrics -- Provider metrics (provider-helm, provider-kubernetes) - -### Router metrics - -The router component exposes Envoy proxy metrics for monitoring traffic flow and -service health. Key metric categories include: - -- `envoy_cluster_upstream_rq_*` - Upstream request metrics (status codes, timeouts, retries, latency) -- `envoy_cluster_circuit_breakers_*` - Circuit breaker state and capacity -- `envoy_listener_downstream_*` - Client connection and request metrics -- `envoy_http_downstream_*` - HTTP request processing metrics - -Example query to monitor total request rate: - -```promql -sum(rate(envoy_cluster_upstream_rq_total{job="spaces-router-envoy"}[5m])) -``` - -Example query for P95 latency: - -```promql -histogram_quantile( - 0.95, - sum by (le) ( - rate(envoy_cluster_upstream_rq_time_bucket{job="spaces-router-envoy"}[5m]) - ) -) -``` - -For detailed router metrics documentation and more query examples, see the [Router metrics reference][router-ref]. - - -## OpenTelemetryCollector image - - -Control plane (`SharedTelemetry`) and Space observability deploy the same custom -OpenTelemetry Collector image. The OpenTelemetry Collector image supports -`otlhttp`, `datadog`, and `debug` exporters. - -For more information on observability configuration, review the [Helm chart reference][helm-chart-reference]. - -## Observability in control planes - -Read the [observability documentation][observability-documentation] to learn -about the features Upbound offers for collecting telemetry from control planes. - - -## Router metrics reference {#router-ref} - -To avoid overwhelming observability tools with hundreds of Envoy metrics, an -allow-list filters metrics to only the following metric families. - -### Upstream cluster metrics - -Metrics tracking requests sent from Envoy to configured upstream clusters. -Individual control planes, spaces-api, and other services are each considered -an upstream cluster. Use these metrics to monitor service health, identify -upstream errors, and measure backend latency. - -| Metric | Description | -|--------|-------------| -| `envoy_cluster_upstream_rq_xx_total` | HTTP status codes (2xx, 3xx, 4xx, 5xx) with label `envoy_response_code_class` | -| `envoy_cluster_upstream_rq_timeout_total` | Requests that timed out waiting for upstream | -| `envoy_cluster_upstream_rq_retry_limit_exceeded_total` | Requests that exhausted retry attempts | -| `envoy_cluster_upstream_rq_total` | Total upstream requests | -| `envoy_cluster_upstream_rq_time_bucket` | Latency histogram (for P50/P95/P99 calculations) | -| `envoy_cluster_upstream_rq_time_sum` | Sum of request durations | -| `envoy_cluster_upstream_rq_time_count` | Count of requests | - -### Circuit breaker metrics - - - -Metrics tracking circuit breaker state and remaining capacity. Circuit breakers -prevent cascading failures by limiting connections and concurrent requests to -unhealthy upstreams. Two priority levels exist: `DEFAULT` for watch requests and -`HIGH` for API requests. - - -| Name | Description | -|--------|-------------| -| `envoy_cluster_circuit_breakers_default_cx_open` | `DEFAULT` priority connection circuit breaker open (gauge) | -| `envoy_cluster_circuit_breakers_default_rq_open` | `DEFAULT` priority request circuit breaker open (gauge) | -| `envoy_cluster_circuit_breakers_default_remaining_cx` | Available `DEFAULT` priority connections (gauge) | -| `envoy_cluster_circuit_breakers_default_remaining_rq` | Available `DEFAULT` priority request slots (gauge) | -| `envoy_cluster_circuit_breakers_high_cx_open` | `HIGH` priority connection circuit breaker open (gauge) | -| `envoy_cluster_circuit_breakers_high_rq_open` | `HIGH` priority request circuit breaker open (gauge) | -| `envoy_cluster_circuit_breakers_high_remaining_cx` | Available `HIGH` priority connections (gauge) | -| `envoy_cluster_circuit_breakers_high_remaining_rq` | Available `HIGH` priority request slots (gauge) | - -### Downstream listener metrics - -Metrics tracking requests received from clients such as kubectl and API consumers. -Use these metrics to monitor client connection patterns, overall request volume, -and responses sent to external users. - -| Name | Description | -|--------|-------------| -| `envoy_listener_downstream_rq_xx_total` | HTTP status codes for responses sent to clients | -| `envoy_listener_downstream_rq_total` | Total requests received from clients | -| `envoy_listener_downstream_cx_total` | Total connections from clients | -| `envoy_listener_downstream_cx_active` | Currently active client connections (gauge) | - - - -### HTTP connection manager metrics - - -Metrics from Envoy's HTTP connection manager tracking end-to-end request -processing. These metrics provide a comprehensive view of the HTTP request -lifecycle including status codes and client-perceived latency. - -| Name | Description | -|--------|-------------| -| `envoy_http_downstream_rq_xx` | HTTP status codes (note: no `_total` suffix for this metric family) | -| `envoy_http_downstream_rq_total` | Total HTTP requests received | -| `envoy_http_downstream_rq_time_bucket` | Downstream request latency histogram | -| `envoy_http_downstream_rq_time_sum` | Sum of downstream request durations | -| `envoy_http_downstream_rq_time_count` | Count of downstream requests | - -[router-ref]: #router-ref -[observability-documentation]: /spaces/howtos/observability -[opentelemetry-collector]: https://opentelemetry.io/docs/collector/ -[opentelemetry-operator]: https://opentelemetry.io/docs/kubernetes/operator/ -[helm-chart-reference]: /reference/helm-reference diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/spaces-management.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/spaces-management.md deleted file mode 100644 index 3df61c306..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/spaces-management.md +++ /dev/null @@ -1,219 +0,0 @@ ---- -title: Interacting with Disconnected Spaces -sidebar_position: 10 -description: Common operations in Spaces ---- - -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Spaces management procedures are consistent across versions. - -For version compatibility details, see the . -::: - -## Spaces management - -### Create a Space - -To install an Upbound Space into a cluster, it's recommended you dedicate an entire Kubernetes cluster for the Space. You can use [up space init][up-space-init] to install an Upbound Space. Below is an example: - -```bash -up space init "v1.9.0" -``` -:::tip -For a full guide to get started with Spaces, read the [quickstart][quickstart] guide: -::: - -You can also install the helm chart for Spaces directly. In order for a Spaces install to succeed, you must install some prerequisites first and configure them. This includes: - -- UXP -- provider-helm and provider-kubernetes -- cert-manager - -Furthermore, the Spaces chart requires a pull secret, which Upbound must provide to you. - -```bash -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "v1.9.0" \ - --set "ingress.host=your-host.com" \ - --set "clusterType=eks" \ - --set "account=your-upbound-account" \ - --wait -``` -For a complete tutorial of the helm install, read one of the deployment guides for [AWS][aws], [Azure][azure] , or [GCP][gcp] which cover the step-by-step process. - -### Upgrade a Space - -To upgrade a Space from one version to the next, use [up space upgrade][up-space-upgrade]. Spaces supports upgrading from version `ver x.N.*` to version `ver x.N+1.*`. - -```bash -up space upgrade "v1.9.0" -``` - -You can also upgrade a Space by manually bumping the Helm chart version. Before -upgrading, review the release notes for any breaking changes or -special requirements: - -1. Review the release notes for the target version in the [Spaces Release Notes][spaces-release-notes] -2. Upgrade the Space by updating the helm chart version: - -```bash -helm -n upbound-system upgrade spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "v1.9.0" \ - --reuse-values \ - --wait -``` - -For major version upgrades or configuration changes, extract your current values -and adjust: - -```bash -# Extract current values to a file -helm -n upbound-system get values spaces > spaces-values.yaml - -# Upgrade with modified values -helm -n upbound-system upgrade spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "v1.9.0" \ - -f spaces-values.yaml \ - --wait -``` - -### Downgrade a Space - -To rollback a Space from one version to the previous, use [up space upgrade][up-space-upgrade-1]. Spaces supports downgrading from version `ver x.N.*` to version `ver x.N-1.*`. - -```bash -up space upgrade --rollback -``` - -You can also downgrade a Space manually using Helm by specifying an earlier version: - -```bash -helm -n upbound-system upgrade spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "v1.8.0" \ - --reuse-values \ - --wait -``` - -When downgrading, make sure to: -1. Check the [release notes][release-notes] for specific downgrade instructions -2. Verify compatibility between the downgraded Space and any control planes -3. Back up any critical data before proceeding - -### Uninstall a Space - -To uninstall a Space from a Kubernetes cluster, use [up space destroy][up-space-destroy]. A destroy operation uninstalls core components and orphans control planes and their associated resources. - -```bash -up space destroy -``` - -## Control plane management - -You can manage control planes in a Space via the [up CLI][up-cli] or the Spaces-local Kubernetes API. When you install a Space, it defines new a API type, `kind: Controlplane`, that you can use to create and manage control planes in the Space. - -### Create a control plane - -To create a control plane in a Space using `up`, run the following: - -```bash -up ctp create ctp1 -``` - -You can also declare a new control plane like the example below and apply it to your Spaces cluster: - -```yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - name: ctp1 - namespace: default -spec: - writeConnectionSecretToRef: - name: kubeconfig-ctp1 - namespace: default -``` - -This manifest: - -- Creates a new control plane in the space called `ctp1`. -- Publishes the kubeconfig to connect to the control plane to a secret in the Spaces cluster, called `kubeconfig-ctp1` - -### Connect to a control plane - -To connect to a control plane in a Space using `up`, run the following: - -```bash -up ctp connect new-control-plane -``` - -The command changes your kubeconfig's current context to the control plane you specify. If you want to change your kubeconfig back to a previous context, run: - -```bash -up ctp disconnect -``` - -If you configured your control plane to publish connection details, you can also access it this way. Once the control plane is ready, use the secret (containing connection details) to connect to the API server of your control plane. - -```bash -kubectl get secret -n default -o jsonpath='{.data.kubeconfig}' | base64 -d > /tmp/.yaml -``` - -Reference the kubeconfig whenever you want to interact directly with the API server of the control plane (vs the Space's API server): - -```bash -kubectl get providers --kubeconfig=/tmp/.yaml -``` - -### Configure a control plane - -Spaces offers a built-in feature that allows you to connect a control plane to a Git source. This experience is like when a control plane runs in [Upbound's SaaS environment][upbound-s-saas-environment]. Upbound recommends using the built-in Git integration to drive configuration of your control planes in a Space. - -Learn more in the [Spaces Git integration][spaces-git-integration] documentation. - -### List control planes - -To list all control planes in a Space using `up`, run the following: - -```bash -up ctp list -``` - -Or you can use Kubernetes-style semantics to list the control plane: - -```bash -kubectl get controlplanes -``` - - -### Delete a control plane - -To delete a control plane in a Space using `up`, run the following: - -```bash -up ctp delete ctp1 -``` - -Or you can use Kubernetes-style semantics to delete the control plane: - -```bash -kubectl delete controlplane ctp1 -``` - - -[up-space-init]: /reference/cli-reference -[quickstart]: / -[aws]: /spaces/howtos/self-hosted/self-hosted-spaces-deployment -[azure]:/spaces/howtos/self-hosted/self-hosted-spaces-deployment -[gcp]:/spaces/howtos/self-hosted/self-hosted-spaces-deployment -[up-space-upgrade]: /reference/cli-reference -[spaces-release-notes]: /reference/release-notes/spaces -[up-space-upgrade-1]: /reference/cli-reference -[release-notes]: /reference/release-notes/spaces -[up-space-destroy]: /reference/cli-reference -[up-cli]: /reference/cli-reference -[upbound-s-saas-environment]: /spaces/howtos/self-hosted/spaces-management -[spaces-git-integration]: /spaces/howtos/self-hosted/gitops diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/troubleshooting.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/troubleshooting.md deleted file mode 100644 index 8d1ca6517..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/troubleshooting.md +++ /dev/null @@ -1,132 +0,0 @@ ---- -title: Troubleshooting -sidebar_position: 100 -description: A guide for troubleshooting an issue that occurs in a Space ---- - -Find guidance below on how to find solutions for issues you encounter when deploying and using an Upbound Space. Use the tips below as a supplement to the observability metrics discussed in the [Observability][observability] page. - -## General tips - -Most issues fall into two general categories: - -1. issues with the Spaces management plane -2. issues on a control plane - -If your control plane doesn't reach a `Ready` state, it's indicative of the former. If your control plane is in a created and running state, but resources aren't reconciling, it's indicative of the latter. - -### Spaces component layout - -Run `kubectl get pods -A` against the cluster hosting a Space. You should see a variety of pods across several namespaces. It should look something like this: - -```bash -NAMESPACE NAME READY STATUS RESTARTS AGE -cert-manager cert-manager-6d6769565c-mc5df 1/1 Running 0 25m -cert-manager cert-manager-cainjector-744bb89575-nw4fg 1/1 Running 0 25m -cert-manager cert-manager-webhook-759d6dcbf7-ps4mq 1/1 Running 0 25m -ingress-nginx ingress-nginx-controller-7f8ccfccc6-6szlp 1/1 Running 0 25m -kube-system coredns-5d78c9869d-4p477 1/1 Running 0 26m -kube-system coredns-5d78c9869d-pdxt6 1/1 Running 0 26m -kube-system etcd-kind-control-plane 1/1 Running 0 26m -kube-system kindnet-8s7pq 1/1 Running 0 26m -kube-system kube-apiserver-kind-control-plane 1/1 Running 0 26m -kube-system kube-controller-manager-kind-control-plane 1/1 Running 0 26m -kube-system kube-proxy-l68r8 1/1 Running 0 26m -kube-system kube-scheduler-kind-control-plane 1/1 Running 0 26m -local-path-storage local-path-provisioner-6bc4bddd6b-qsdjt 1/1 Running 0 26m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system coredns-5dc69d6447-f56rh-x-kube-system-x-vcluster 1/1 Running 0 21m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system crossplane-6b6d67bc66-6b8nx-x-upbound-system-x-vcluster 1/1 Running 0 20m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system crossplane-rbac-manager-78f6fc7cb4-pjkhc-x-upbound-s-12253c3c4e 1/1 Running 0 20m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system kube-state-metrics-7f8f4dcc5b-8p8c4 1/1 Running 0 22m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system mxp-gateway-68f546b9c8-xnz5j-x-upbound-system-x-vcluster 1/1 Running 0 20m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system mxp-ksm-config-54655667bb-hv9br 1/1 Running 0 22m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system mxp-readyz-5f7f97d967-b98bw 1/1 Running 0 22m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system otlp-collector-56d7d46c8d-g5sh5-x-upbound-system-x-vcluster 1/1 Running 0 20m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-67c9fb8959-ppb2m 1/1 Running 0 22m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-api-6bfbccc49d-ffgpj 1/1 Running 0 22m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-controller-7cc6855656-8c46b 1/1 Running 0 22m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vcluster-etcd-0 1/1 Running 0 22m -mxp-706c49fa-5bb8-4a7e-9f41-2fc38ef4b065-system vector-754b494b84-wljw4 1/1 Running 0 22m -mxp-system mxp-charts-chartmuseum-7587f77558-8tltb 1/1 Running 0 23m -upbound-system crossplane-b4dc7b4c9-6hjh5 1/1 Running 0 25m -upbound-system crossplane-contrib-provider-helm-ce18dd03e6e4-7945d8985-4gcwr 1/1 Running 0 24m -upbound-system crossplane-contrib-provider-kubernetes-1f1e32c1957d-577756gs2x4 1/1 Running 0 24m -upbound-system crossplane-rbac-manager-d8cb49cbc-gbvvf 1/1 Running 0 25m -upbound-system spaces-controller-6647677cf9-5zl5q 1/1 Running 0 24m -upbound-system spaces-router-bc78c96d7-kzts2 2/2 Running 0 24m -``` - -What you are seeing is: - -- Pods in the `upbound-system` namespace are components required to run the management plane of the Space. This includes the `spaces-controller`, `spaces-router`, and install of UXP. -- Pods in the `mxp-{GUID}-system` namespace are components that collectively power a control plane. Notable call outs include pod names that look like `vcluster-api-{GUID}` and `vcluster-controller-{GUID}`, which are integral components of a control plane. -- Pods in other notable namespaces, including `cert-manager` and `ingress-nginx`, are prerequisite components that support a Space's successful operation. - - - -### Troubleshooting tips for the Spaces management plane - -Start by getting the status of all the pods in a Space: - -1. Make sure the current context of your kubeconfig points at the Kubernetes cluster hosting your Space -2. Get the status of all the pods in the Space: -```bash -kubectl get pods -A -``` -3. Scan the `Status` column to see if any of the pods report a status besides `Running`. -4. Scan the `Restarts` column to see if any of the pods have restarted. -5. If you notice a Status other than `Running` or see pods that restarted, you should investigate their events by running -```bash -kubectl describe pod -n -``` - -Next, inspect the status of objects and releases: - -1. Make sure the current context of your kubeconfig points at the Kubernetes cluster hosting your Space -2. Inspect the objects in your Space. If any are unhealthy, describe those objects to get the events: -```bash -kubectl get objects -``` -3. Inspect the releases in your Space. If any are unhealthy, describe those releases to get the events: -```bash -kubectl get releases -``` - -### Troubleshooting tips for control planes in a Space - -General troubleshooting in a control plane starts by fetching the events of the control plane: - -1. Make sure the current context of your kubeconfig points at the Kubernetes cluster hosting your Space -2. Run the following to fetch your control planes. -```bash -kubectl get ctp -``` -3. Describe the control plane by providing its name, found in the preceding instruction. -```bash -kubectl describe controlplanes.spaces.upbound.io -``` - -## Issues - - -### Your control plane is stuck in a 'creating' state - -#### Error: unknown field "ports" in io.k8s.api.networking.v1.NetworkPolicySpec - -This error is emitted by a Helm release named `control-plane-host-policies` attempting to be installed by the Spaces software. The full error is: - -_CannotCreateExternalResource failed to install release: unable to build kubernetes objects from release manifest: error validating "": error validating data: ValidationError(NetworkPolicy.spec): unknown field "ports" in io.k8s.api.networking.v1.NetworkPolicySpec_ - -This error may be caused by running a Space on an earlier version of Kubernetes than is supported (`v1.26 or later`). To resolve this issue, upgrade the host Kubernetes cluster version to 1.25 or later. - -### Your Spaces install fails - -#### Error: You tried to install a Space on a previous Crossplane installation - -If you try to install a Space on an existing cluster that previously had Crossplane or UXP on it, you may encounter errors. Due to how the Spaces installer tests for the presence of UXP, it may detect orphaned CRDs that weren't cleaned up by the previous uninstall of Crossplane. You may need to manually [remove old Crossplane CRDs][remove-old-crossplane-crds] for the installer to properly detect the UXP prerequisite. - - - - -[observability]: /spaces/howtos/observability -[remove-old-crossplane-crds]: https://docs.crossplane.io/latest/guides/uninstall-crossplane/ diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/use-argo.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/use-argo.md deleted file mode 100644 index d58f7db44..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/use-argo.md +++ /dev/null @@ -1,228 +0,0 @@ ---- -title: Use ArgoCD Plugin -sidebar_position: 15 -description: A guide for integrating Argo with control planes in a Space. -aliases: - - /all-spaces/self-hosted-spaces/use-argo - - /deploy/disconnected-spaces/use-argo-flux - - /all-spaces/self-hosted-spaces/use-argo-flux - - /connect/use-argo ---- - - -:::info API Version Information -This guide covers the ArgoCD Plugin, a preview feature available in all supported versions (v1.9-v1.14+) for self-hosted deployments. - -For details on GitOps patterns and related features across versions, see [Automation and GitOps Overview](/spaces/howtos/automation-and-gitops/overview/). -::: - -:::important -This feature is in preview and is off by default. To enable, set `features.alpha.argocdPlugin.enabled=true` when installing Spaces: - -```bash -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - ... - --set "features.alpha.argocdPlugin.enabled=true" -``` -::: - -Spaces provides an optional plugin to assist with integrating a control plane in a Space with Argo CD. You must enable the plugin for the entire Space at Spaces install or upgrade time. The plugin's job is to propagate the connection details of each control plane in a Space to Argo CD. By default, Upbound stores these connection details in a Kubernetes secret named after the control plane. To run Argo CD across multiple namespaces, Upbound recommends enabling the `features.alpha.argocdPlugin.useUIDFormatForCTPSecrets` flag to use a UID-based format for secret names to avoid conflicts. - -:::tip -For general guidance on integrating Upbound with GitOps flows, see [GitOps with Control Planes][gitops-with-control-planes]. -::: - -## On cluster Argo CD - -If you are running Argo CD on the same cluster as the Space, run the following to enable the plugin: - - - - - - -```bash {hl_lines="3-4"} -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "features.alpha.argocdPlugin.enabled=true" \ - --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ - --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" -``` - - - - - -```bash {hl_lines="7-8"} -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "${SPACES_VERSION}" \ - --set "ingress.host=${SPACES_ROUTER_HOST}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "features.alpha.argocdPlugin.enabled=true" \ - --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ - --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ - --wait -``` - - - - - - -The important flags are: - -- `features.alpha.argocdPlugin.enabled=true` -- `features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true` -- `features.alpha.argocdPlugin.target.secretNamespace=argocd` - -The first flag enables the feature and the second indicates the namespace on the cluster where you installed Argo CD. - -Be sure to [configure Argo][configure-argo] after it's installed. - -## External cluster Argo CD - -If you are running Argo CD on an external cluster from where you installed your Space, you need to provide some extra flags: - - - - - - -```bash {hl_lines="3-7"} -up space init --token-file="${SPACES_TOKEN_PATH}" "v${SPACES_VERSION}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "features.alpha.argocdPlugin.enabled=true" \ - --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ - --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ - --set "features.alpha.argocdPlugin.target.externalCluster.enabled=true" \ - --set "features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster" \ - --set "features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig" -``` - - - - - -```bash {hl_lines="7-11"} -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "${SPACES_VERSION}" \ - --set "ingress.host=${SPACES_ROUTER_HOST}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "features.alpha.argocdPlugin.enabled=true" \ - --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ - --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ - --set "features.alpha.argocdPlugin.target.externalCluster.enabled=true" \ - --set "features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster" \ - --set "features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig" \ - --wait -``` - - - - - -```bash -helm -n upbound-system upgrade --install spaces \ - oci://xpkg.upbound.io/spaces-artifacts/spaces \ - --version "${SPACES_VERSION}" \ - --set "ingress.host=${SPACES_ROUTER_HOST}" \ - --set "account=${UPBOUND_ACCOUNT}" \ - --set "features.alpha.argocdPlugin.enabled=true" \ - --set "features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true" \ - --set "features.alpha.argocdPlugin.target.secretNamespace=argocd" \ - --set "features.alpha.argocdPlugin.target.externalCluster.enabled=true" \ - --set "features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster" \ - --set "features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig" \ - --wait -``` - -The extra flags are: - -- `features.alpha.argocdPlugin.target.externalCluster.enabled=true` -- `features.alpha.argocdPlugin.useUIDFormatForCTPSecrets=true` -- `features.alpha.argocdPlugin.target.externalCluster.secret.name=my-argo-cluster` -- `features.alpha.argocdPlugin.target.externalCluster.secret.key=kubeconfig` - -These flags tell the plugin (running in Spaces) where your Argo CD instance is. After you've done this at install-time, you also need to create a `Secret` on the Spaces cluster. This secret must contain a kubeconfig pointing to your Argo CD instance. The secret needs to be in the same namespace as the `spaces-controller`, which is `upbound-system`. - -Once you enable the plugin and configure it, the plugin automatically propagates connection details for your control planes to your Argo CD instance. You can then target the control plane and use Argo to sync Crossplane-related objects to it. - -Be sure to [configure Argo][configure-argo-1] after it's installed. - -## Configure Argo - -Argo's default configuration causes it to try to query for resource kinds that don't exist in control planes. You should configure Argo's [general configmap][general-configmap] to include the resource group/kinds which make sense in the context of control planes. example, the concept of `nodes` isn't exposed in control planes. - -To configure Argo CD, connect to the cluster where you've installed it and edit the configmap: - -```bash -kubectl edit configmap argocd-cm -n argocd -``` - -Adjust the resource inclusions and exclusions under the `data` field of the configmap: - -```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: argocd-cm - namespace: argocd -data: - resource.exclusions: | - - apiGroups: - - "*" - kinds: - - "*" - clusters: - - "*" - resource.inclusions: | - - apiGroups: - - "*" - kinds: - - Provider - - Configuration - clusters: - - "*" -``` - -The preceding configuration causes Argo to exclude syncing **all** resource group/kinds--except Crossplane `providers` and `configurations`--for **all** control planes. You're encouraged to adjust the `resource.inclusions` to include the types that make sense for your control plane, such as an `XRD` you've built with Crossplane. You're also encouraged to customize the `clusters` pattern to selectively apply these exclusions/inclusions to control planes (for example, `control-plane-prod-*`). - -## Control plane connection secrets - -To deploy control planes through Argo CD, you need to configure the `writeConnectionSecretToRef` field in your control plane spec. This field specifies where to store the control plane's `kubeconfig` and makes connection details available to Argo CD. - -### Basic Configuration - -In your control plane manifest, include the `writeConnectionSecretToRef` field: - -```yaml -apiVersion: spaces.upbound.io/v1beta1 -kind: ControlPlane -metadata: - name: my-control-plane - namespace: my-control-plane-group -spec: - writeConnectionSecretToRef: - name: kubeconfig-my-control-plane - namespace: my-control-plane-group - # ... other control plane configuration -``` - -### Parameters - -The `writeConnectionSecretToRef` field requires two parameters: - -- `name`: A unique name for the secret containing the kubeconfig (`kubeconfig-my-control-plane`) -- `namespace`: The Kubernetes namespace where you store the secret, which must match the metadata namespace. The system copies it into the `argocd` namespace when you set the `features.alpha.argocdPlugin.target.secretNamespace=argocd` configuration parameter. - -Control plane labels automatically propagate to the connection secret, which allows you to use label selectors in Argo CD for automated discovery and management. - -This configuration enables Argo CD to automatically discover and manage resources on your control planes. - - -[gitops-with-control-planes]: /spaces/howtos/cloud-spaces/gitops -[configure-argo]: #configure-argo -[configure-argo-1]: #configure-argo -[general-configmap]: https://argo-cd.readthedocs.io/en/stable/operator-manual/argocd-cm-yaml/ diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/workload-id/_category_.json b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/workload-id/_category_.json deleted file mode 100644 index c5ecc93f6..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/workload-id/_category_.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "label": "Workload Identity Configuration", - "position": 2, - "collapsed": true, - "customProps": { - "plan": "business" - } - -} - - diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/workload-id/backup-restore-config.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/workload-id/backup-restore-config.md deleted file mode 100644 index 935ca69ec..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/workload-id/backup-restore-config.md +++ /dev/null @@ -1,384 +0,0 @@ ---- -title: Backup and Restore Workload ID -weight: 1 -description: Configure workload identity for Spaces Backup and Restore ---- -import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; - - - - - - - -Workload-identity authentication lets you use access policies to grant temporary -AWS credentials to your Kubernetes pod with a service account. Assigning IAM roles and service accounts allows the pod to assume the IAM role dynamically and much more securely than static credentials. - -This guide walks you through creating an IAM trust role policy and applying it -to your EKS cluster to handle backup and restore storage. - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary Azure credentials to your Kubernetes pod based on -a service account. Assigning managed identities and service accounts allows the pod to -authenticate with Azure resources dynamically and much more securely than static credentials. - -This guide walks you through creating a managed identity and federated credential for your AKS -cluster to handle backup and restore storage. - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary GCP credentials to your Kubernetes pod based on -a service account. Assigning IAM roles and service accounts allows the pod to -access cloud resources dynamically and much more securely than static credentials. - -This guide walks you through configuring workload identity for your GKE -cluster to handle backup and restore storage. - - - -## Prerequisites - - -To set up a workload-identity, you'll need: - - -- A self-hosted Space cluster -- Administrator access in your cloud provider -- Helm and `kubectl` - -## About the backup and restore component - -The `mxp-controller` component handles backup and restore workloads. It needs to -access your cloud storage to store and retrieve backups. By default, this -component runs in each control plane's host namespace. - -## Configuration - - - -Upbound supports workload-identity configurations in AWS with IAM Roles for -Service Accounts and EKS pod identity association. - -#### IAM Roles for Service Accounts (IRSA) - -With IRSA, you can associate a Kubernetes service account in an EKS cluster with -an AWS IAM role. Upbound authenticates workloads with that service account as -the IAM role using temporary credentials instead of static role credentials. -IRSA relies on AWS `AssumeRoleWithWebIdentity` `STS` to exchange OIDC ID tokens with -the IAM role's temporary credentials. IRSA uses the `eks.amazon.aws/role-arn` -annotation to link the service account and the IAM role. - -First, create an IAM role with appropriate permissions to access your S3 bucket: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:GetObject", - "s3:PutObject", - "s3:ListBucket", - "s3:DeleteObject" - ], - "Resource": [ - "arn:aws:s3:::${YOUR_BACKUP_BUCKET}", - "arn:aws:s3:::${YOUR_BACKUP_BUCKET}/*" - ] - } - ] -} -``` - -Next, ensure your EKS cluster has an OIDC identity provider: - -```shell -eksctl utils associate-iam-oidc-provider --cluster ${YOUR_CLUSTER_NAME} --approve -``` - -Configure the IAM role trust policy with the namespace for each -provisioned control plane. - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:oidc-provider/${YOUR_OIDC_PROVIDER}" - }, - "Action": "sts:AssumeRoleWithWebIdentity", - "Condition": { - "StringEquals": { - "${YOUR_OIDC_PROVIDER}:aud": "sts.amazonaws.com", - "${YOUR_OIDC_PROVIDER}:sub": "system:serviceaccount:${YOUR_NAMESPACE}:mxp-controller" - } - } - } - ] -} -``` - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the Backup and Restore component: - -```shell ---set controlPlanes.mxpController.serviceAccount.annotations."eks\.amazonaws\.com/role-arn"="${SPACES_BR_IAM_ROLE_ARN}" -``` - -This command allows the backup and restore component to authenticate with your -dedicated IAM role in your EKS cluster environment. - -#### EKS pod identities - -Upbound also supports EKS Pod Identity configuration. EKS Pod Identities allow -you to create a pod identity association with your Kubernetes namespace, a -service account, and an IAM role, which allows the EKS control plane to -automatically handle the credential exchange. - -First, create an IAM role with appropriate permissions to access your S3 bucket: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:GetObject", - "s3:PutObject", - "s3:ListBucket", - "s3:DeleteObject" - ], - "Resource": [ - "arn:aws:s3:::${YOUR_BACKUP_BUCKET}", - "arn:aws:s3:::${YOUR_BACKUP_BUCKET}/*" - ] - } - ] -} -``` - -When you install or upgrade your Space with Helm, add the backup/restore values: - -```shell -helm upgrade spaces spaces-helm-chart \ - --set "billing.enabled=true" \ - --set "backup.enabled=true" \ - --set "backup.storage.provider=aws" \ - --set "backup.storage.aws.region= ${YOUR_AWS_REGION}" \ - --set "backup.storage.aws.bucket= ${YOUR_BACKUP_BUCKET}" -``` - -After Upbound provisions your control plane, create a Pod Identity Association -with the `aws` CLI: - -```shell -aws eks create-pod-identity-association \ - --cluster-name ${YOUR_CLUSTER_NAME} \ - --namespace ${YOUR_CONTROL_PLANE_NAMESPACE} \ - --service-account mxp-controller \ - --role-arn arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/backup-restore-role -``` - - - - - -Upbound supports workload-identity configurations in Azure with Azure's built-in -workload identity feature. - -#### Prepare your cluster - -First, enable the OIDC issuer and workload identity in your AKS cluster: - -```shell -az aks update --resource-group ${YOUR_RESOURCE_GROUP} --name ${YOUR_AKS_CLUSTER_NAME} --enable-oidc-issuer --enable-workload-identity -``` - -Next, find and store the OIDC issuer URL as an environment variable: - -```shell -export AKS_OIDC_ISSUER="$(az aks show --name ${YOUR_AKS_CLUSTER_NAME} --resource-group ${YOUR_RESOURCE_GROUP} --query "oidcIssuerProfile.issuerUrl" --output tsv)" -``` - -#### Create a User-Assigned Managed Identity - -Create a new managed identity to associate with the backup and restore component: - -```shell -az identity create --name backup-restore-identity --resource-group ${YOUR_RESOURCE_GROUP} --location ${YOUR_LOCATION} -``` - -Retrieve the client ID and store it as an environment variable: - -```shell -export USER_ASSIGNED_CLIENT_ID="$(az identity show --name backup-restore-identity --resource-group ${YOUR_RESOURCE_GROUP} --query clientId -otsv)" -``` - -Grant the managed identity you created to access your Azure Storage account: - -```shell -az role assignment create \ - --role "Storage Blob Data Contributor" \ - --assignee ${USER_ASSIGNED_CLIENT_ID} \ - --scope /subscriptions/${YOUR_SUBSCRIPTION_ID}/resourceGroups/${YOUR_RESOURCE_GROUP}/providers/Microsoft.Storage/storageAccounts/${YOUR_STORAGE_ACCOUNT} -``` - -#### Apply the managed identity role - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the backup and restore component: - -```shell ---set controlPlanes.mxpController.serviceAccount.annotations."azure\.workload\.identity/client-id"="${YOUR_USER_ASSIGNED_CLIENT_ID}" ---set controlPlanes.mxpController.pod.customLabels."azure\.workload\.identity/use"="true" -``` - -#### Create a Federated Identity credential - -```shell -az identity federated-credential create \ - --name backup-restore-federated-identity \ - --identity-name backup-restore-identity \ - --resource-group ${YOUR_RESOURCE_GROUP} \ - --issuer ${AKS_OIDC_ISSUER} \ - --subject system:serviceaccount:${YOUR_CONTROL_PLANE_NAMESPACE}:mxp-controller -``` - - - - - -Upbound supports workload-identity configurations in GCP with IAM principal -identifiers and service account impersonation. - -#### Prepare your cluster - -First, enable Workload Identity Federation on your GKE cluster: - -```shell -gcloud container clusters update ${YOUR_CLUSTER_NAME} \ - --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ - --region=${YOUR_REGION} -``` - -#### Create a Google Service Account - -Create a service account for the backup and restore component: - -```shell -gcloud iam service-accounts create backup-restore-sa \ - --display-name "Backup Restore Service Account" \ - --project ${YOUR_PROJECT_ID} -``` - -Grant the service account access to your Google Cloud Storage bucket: - -```shell -gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ - --member "serviceAccount:backup-restore-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" \ - --role "roles/storage.objectAdmin" -``` - -#### Configure Workload Identity - -Create an IAM binding to grant the Kubernetes service account access to the Google service account: - -```shell -gcloud iam service-accounts add-iam-policy-binding \ - backup-restore-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com \ - --role roles/iam.workloadIdentityUser \ - --member "serviceAccount:${YOUR_PROJECT_ID}.svc.id.goog[${YOUR_CONTROL_PLANE_NAMESPACE}/mxp-controller]" -``` - -#### Apply the service account configuration - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the backup and restore component: - -```shell ---set controlPlanes.mxpController.serviceAccount.annotations."iam\.gke\.io/gcp-service-account"="backup-restore-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" -``` - - - -## Verify your configuration - -After you apply the configuration use `kubectl` to verify the service account -has the correct annotation: - -```shell -kubectl get serviceaccount mxp-controller -n ${YOUR_CONTROL_PLANE_NAMESPACE} -o yaml -``` - -Verify the `mxp-controller` pod is running: - -```shell -kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep mxp-controller -``` - -## Restart workload - -You must manually restart a workload's pod when you add the workload identity annotations to the running pod's service account. - - - -This restart enables the EKS pod identity webhook to inject the necessary -environment for using IRSA. - - - - - -This restart enables the workload identity webhook to inject the necessary -environment for using Azure workload identity. - - - - - -This restart enables the workload identity webhook to inject the necessary -environment for using GCP workload identity. - - - -```shell -kubectl rollout restart deployment mxp-controller -n ${YOUR_CONTROL_PLANE_NAMESPACE} -``` - -## Use cases - - -Configuring backup and restore with workload identity eliminates the need for -static credentials in your cluster and the overhead of credential rotation. -These benefits are helpful in: - -* Disaster recovery scenarios -* Control plane migration -* Compliance requirements -* Rollbacks after unsuccessful upgrades - -## Next steps - -Now that you have a workload identity configured for the backup and restore -component, visit the [Backup Configuration][backup-restore-guide] documentation. - -Other workload identity guides are: -* [Billing][billing] -* [Shared Secrets][secrets] - -[backup-restore-guide]: /spaces/howtos/backup-and-restore -[billing]: /spaces/howtos/self-hosted/workload-id/billing-config -[secrets]: /spaces/howtos/self-hosted/workload-id/eso-config diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/workload-id/billing-config.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/workload-id/billing-config.md deleted file mode 100644 index 323a6122f..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/workload-id/billing-config.md +++ /dev/null @@ -1,454 +0,0 @@ ---- -title: Billing Workload ID -weight: 1 -description: Configure workload identity for Spaces Billing ---- -import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; - - - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary AWS credentials to your Kubernetes pod based on -a service account. Assigning IAM roles and service accounts allows the pod to -assume the IAM role dynamically and much more securely than static credentials. - -This guide walks you through creating an IAM trust role policy and applying it to your EKS -cluster for billing in your Space cluster. - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary Azure credentials to your Kubernetes pod based on -a service account. Assigning managed identities and service accounts allows the pod to -authenticate with Azure resources dynamically and much more securely than static credentials. - -This guide walks you through creating a managed identity and federated credential for your AKS -cluster for billing in your Space cluster. - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary GCP credentials to your Kubernetes pod based on -a service account. Assigning IAM roles and service accounts allows the pod to -access cloud resources dynamically and much more securely than static -credentials. - -This guide walks you through configuring workload identity for your GKE -cluster's billing component. - - - -## Prerequisites - - -To set up a workload-identity, you'll need: - - -- A self-hosted Space cluster -- Administrator access in your cloud provider -- Helm and `kubectl` - -## About the billing component - -The `vector.dev` component handles billing metrics collection in spaces. It -stores account data in your cloud storage. By default, this component runs in -each control plane's host namespace. - -## Configuration - - - -Upbound supports workload-identity configurations in AWS with IAM Roles for -Service Accounts and EKS pod identity association. - -#### IAM Roles for Service Accounts (IRSA) - -With IRSA, you can associate a Kubernetes service account in an EKS cluster with -an AWS IAM role. Upbound authenticates workloads with that service account as -the IAM role using temporary credentials instead of static role credentials. -IRSA relies on AWS `AssumeRoleWithWebIdentity` `STS` to exchange OIDC ID tokens with -the IAM role's temporary credentials. IRSA uses the `eks.amazon.aws/role-arn` -annotation to link the service account and the IAM role. - -**Create an IAM role and trust policy** - -First, create an IAM role appropriate permissions to access your S3 bucket: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:GetObject", - "s3:PutObject", - "s3:ListBucket", - "s3:DeleteObject" - ], - "Resource": [ - "arn:aws:s3:::${YOUR_BILLING_BUCKET}", - "arn:aws:s3:::${YOUR_BILLING_BUCKET}/*" - ] - } - ] -} -``` - -You must configure the IAM role trust policy with the exact match for each -provisioned control plane. An example of a trust policy for a single control -plane is below: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:oidc-provider/${YOUR_OIDC_PROVIDER}" - }, - "Action": "sts:AssumeRoleWithWebIdentity", - "Condition": { - "StringEquals": { - ":aud": "sts.amazonaws.com", - ":sub": "system:serviceaccount:${YOUR_NAMESPACE}:vector" - } - } - } - ] -} -``` - -**Configure the EKS OIDC provider** - -Next, ensure your EKS cluster has an OIDC identity provider: - -```shell -eksctl utils associate-iam-oidc-provider --cluster ${YOUR_CLUSTER_NAME} --approve -``` - -**Apply the IAM role** - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the Billing component: - -```shell ---set "billing.enabled=true" ---set "billing.storage.provider=aws" ---set "billing.storage.aws.region=${YOUR_AWS_REGION}" ---set "billing.storage.aws.bucket=${YOUR_BILLING_BUCKET}" ---set "billing.storage.secretRef.name=" ---set controlPlanes.vector.serviceAccount.customAnnotations."eks\.amazonaws\.com/role-arn"="arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_BILLING_ROLE_NAME}" -``` - -:::important -You **must** set the `billing.storage.secretRef.name` to an empty string to -enable workload identity for the billing component -::: - -#### EKS pod identities - -Upbound also supports EKS Pod Identity configuration. EKS Pod Identities allow -you to create a pod identity association with your Kubernetes namespace, a -service account, and an IAM role, which allows the EKS control plane to -automatically handle the credential exchange. - -**Create an IAM role** - -First, create an IAM role appropriate permissions to access your S3 bucket: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:GetObject", - "s3:PutObject", - "s3:ListBucket" - ], - "Resource": [ - "arn:aws:s3:::${YOUR_BILLING_BUCKET}", - "arn:aws:s3:::${YOUR_BILLING_BUCKET}/*" - ] - } - ] -} -``` - -**Configure your Space with Helm** - -When you install or upgrade your Space with Helm, add the billing values: - -```shell -helm upgrade spaces spaces-helm-chart \ - --set "billing.enabled=true" \ - --set "billing.storage.provider=aws" \ - --set "billing.storage.aws.region=${YOUR_AWS_REGION}" \ - --set "billing.storage.aws.bucket=${YOUR_BILLING_BUCKET}" \ - --set "billing.storage.secretRef.name=" -``` - -**Create a Pod Identity Association** - -After Upbound provisions your control plane, create a Pod Identity Association -with the `aws` CLI: - -```shell -aws eks create-pod-identity-association \ - --cluster-name ${YOUR_CLUSTER_NAME} \ - --namespace ${YOUR_CONTROL_PLANE_NAMESPACE} \ - --service-account vector \ - --role-arn arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_BILLING_ROLE_NAME} -``` - - - - - -Upbound supports workload-identity configurations in Azure with Azure's built-in -workload identity feature. - -First, enable the OIDC issuer and workload identity in your AKS cluster: - -```shell -az aks update --resource-group ${YOUR_RESOURCE_GROUP} --name ${YOUR_AKS_CLUSTER_NAME} --enable-oidc-issuer --enable-workload-identity -``` - -Next, find and store the OIDC issuer URL as an environment variable: - -```shell -export AKS_OIDC_ISSUER="$(az aks show --name ${YOUR_AKS_CLUSTER_NAME} --resource-group ${YOUR_RESOURCE_GROUP} --query "oidcIssuerProfile.issuerUrl" --output tsv)" -``` - -Create a new managed identity to associate with the billing component: - -```shell -az identity create --name billing-identity --resource-group ${YOUR_RESOURCE_GROUP} --location ${YOUR_LOCATION} -``` - -Retrieve the client ID and store it as an environment variable: - -```shell -export USER_ASSIGNED_CLIENT_ID="$(az identity show --name billing-identity --resource-group ${YOUR_RESOURCE_GROUP} --query clientId -otsv)" -``` - -Grant the managed identity you created to access your Azure Storage account: - -```shell -az role assignment create --role "Storage Blob Data Contributor" --assignee $USER_ASSIGNED_CLIENT_ID --scope /subscriptions/${YOUR_SUBSCRIPTION_ID}/resourceGroups/${YOUR_RESOURCE_GROUP}/providers/Microsoft.Storage/storageAccounts/${YOUR_STORAGE_ACCOUNT} -``` - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the billing component: - -```shell ---set "billing.enabled=true" ---set "billing.storage.provider=azure" ---set "billing.storage.azure.storageAccount=${SPACES_BILLING_STORAGE_ACCOUNT}" ---set "billing.storage.azure.container=${SPACES_BILLING_STORAGE_CONTAINER}" ---set "billing.storage.secretRef.name=" ---set controlPlanes.vector.serviceAccount.customAnnotations."azure\.workload\.identity/client-id"="${SPACES_BILLING_APP_ID}" ---set controlPlanes.vector.pod.customLabels."azure\.workload\.identity/use"="true" -``` - -Create a federated credential to establish trust between the managed identity -and your AKS OIDC provider: - -```shell -az identity federated-credential create \ - --name billing-federated-identity \ - --identity-name billing-identity \ - --resource-group ${YOUR_RESOURCE_GROUP} \ - --issuer ${AKS_OIDC_ISSUER} \ - --subject system:serviceaccount:${YOUR_CONTROL_PLANE_NAMESPACE}:vector -``` - - - - - -Upbound supports workload-identity configurations in GCP with IAM principal -identifiers or service account impersonation. - -#### IAM principal identifiers - -IAM principal identifiers allow you to grant permissions directly to -Kubernetes service accounts without additional annotation. Upbound recommends -this approach for ease-of-use and flexibility. - -First, enable Workload Identity Federation on your GKE cluster: - -```shell -gcloud container clusters update ${YOUR_CLUSTER_NAME} \ - --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ - --region=${YOUR_REGION} -``` - -Next, configure your Spaces installation with the Spaces Helm chart parameters: - -```shell ---set "billing.enabled=true" ---set "billing.storage.provider=gcp" ---set "billing.storage.gcp.bucket=${YOUR_BILLING_BUCKET}" ---set "billing.storage.secretRef.name=" -``` - -:::important -You **must** set the `billing.storage.secretRef.name` to an empty string to -enable workload identity for the billing component. -::: - -Grant the necessary permissions to your Kubernetes service account: - -```shell -gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ - --member="principalSet://iam.googleapis.com/projects/${YOUR_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${YOUR_PROJECT_ID}.svc.id.goog/attribute.kubernetes_namespace/${YOUR_CONTROL_PLANE_NAMESPACE}/attribute.kubernetes_service_account/vector" \ - --role="roles/storage.objectAdmin" -``` - -Enable uniform bucket-level access on your storage bucket: - -```shell -gcloud storage buckets update gs://${YOUR_BILLING_BUCKET} --uniform-bucket-level-access -``` - -#### Service account impersonation - -Service account impersonation allows you to link a Kubernetes service account to -a GCP service account. The Kubernetes service account assumes the permissions of -the GCP service account you specify. - -Enable workload id federation on your GKE cluster: - -```shell -gcloud container clusters update ${YOUR_CLUSTER_NAME} \ - --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ - --region=${YOUR_REGION} -``` - -Next, create a dedicated service account for your billing operations: - -```shell -gcloud iam service-accounts create billing-sa \ - --project=${YOUR_PROJECT_ID} -``` - -Grant storage permissions to the service account you created: - -```shell -gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ - --member="serviceAccount:billing-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" \ - --role="roles/storage.objectAdmin" -``` - -Link the Kubernetes service account to the GCP service account: - -```shell -gcloud iam service-accounts add-iam-policy-binding \ - billing-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com \ - --role="roles/iam.workloadIdentityUser" \ - --member="serviceAccount:${YOUR_PROJECT_ID}.svc.id.goog[${YOUR_CONTROL_PLANE_NAMESPACE}/vector]" -``` - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the billing component: - -```shell ---set "billing.enabled=true" ---set "billing.storage.provider=gcp" ---set "billing.storage.gcp.bucket=${YOUR_BILLING_BUCKET}" ---set "billing.storage.secretRef.name=" ---set controlPlanes.vector.serviceAccount.customAnnotations."iam\.gke\.io/gcp-service-account"="billing-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" -``` - - - -## Verify your configuration - -After you apply the configuration use `kubectl` to verify the service account -has the correct annotation: - -```shell -kubectl get serviceaccount vector -n ${YOUR_CONTROL_PLANE_NAMESPACE} -o yaml -``` - -Verify the `vector` pod is running: - -```shell -kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep vector -``` - -## Restart workload - - - -You must manually restart a workload's pod when you add the -`eks.amazonaws.com/role-arn key` annotation to the running pod's service -account. - -This restart enables the EKS pod identity webhook to inject the necessary -environment for using IRSA. - - - - - -You must manually restart a workload's pod when you add the workload identity annotations to the running pod's service account. - -This restart enables the workload identity webhook to inject the necessary -environment for using Azure workload identity. - - - - - -GCP workload identity doesn't require pod restarts after configuration changes. -If you do need to restart the workload, use the `kubectl` command to force the -component restart: - - - -```shell -kubectl rollout restart deployment vector -``` - - -## Use cases - - -Using workload identity authentication for billing eliminates the need for static -credentials in your cluster as well as the overhead of credential rotation. -These benefits are helpful in: - -* Resource usage tracking across teams/projects -* Cost allocation for multi-tenant environments -* Financial auditing requirements -* Capacity billing and resource optimization -* Automated billing workflows - -## Next steps - -Now that you have workload identity configured for the billing component, visit -the [Billing guide][billing-guide] for more information. - -Other workload identity guides are: -* [Backup and restore][backuprestore] -* [Shared Secrets][secrets] - -[billing-guide]: /spaces/howtos/self-hosted/billing -[backuprestore]: /spaces/howtos/self-hosted/workload-id/backup-restore-config -[secrets]: /spaces/howtos/self-hosted/workload-id/eso-config diff --git a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/workload-id/eso-config.md b/spaces_versioned_docs/version-v1.9/howtos/self-hosted/workload-id/eso-config.md deleted file mode 100644 index c1418c171..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/self-hosted/workload-id/eso-config.md +++ /dev/null @@ -1,503 +0,0 @@ ---- -title: Shared Secrets Workload ID -weight: 1 -description: Configure workload identity for Spaces Shared Secrets ---- -import GlobalLanguageSelector, { CodeBlock } from '@site/src/components/GlobalLanguageSelector'; - - - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary AWS credentials to your Kubernetes pod based on -a service account. Assigning IAM roles and service accounts allows the pod to -assume the IAM role dynamically and much more securely than static credentials. - -This guide walks you through creating an IAM trust role policy and applying it to your EKS -cluster for secret sharing with Kubernetes. - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary Azure credentials to your Kubernetes pod based on -a service account. Assigning managed identities and service accounts allows the pod to -authenticate with Azure resources dynamically and much more securely than static credentials. - -This guide walks you through creating a managed identity and federated credential for your AKS -cluster for shared secrets in your Space cluster. - - - - - -Workload-identity authentication lets you use access policies to grant your -self-hosted Space cluster access to your cloud providers. Workload identity -authentication grants temporary GCP credentials to your Kubernetes pod based on -a service account. Assigning IAM roles and service accounts allows the pod to -access cloud resources dynamically and much more securely than static -credentials. - -This guide walks you through configuring workload identity for your GKE -cluster's Shared Secrets component. - - - -## Prerequisites - - -To set up a workload-identity, you'll need: - - -- A self-hosted Space cluster -- Administrator access in your cloud provider -- Helm and `kubectl` - - -## About the Shared Secrets component - - - - -The External Secrets Operator (ESO) runs in each control plane's host namespace as `external-secrets-controller`. It needs to access -your external secrets management service like AWS Secrets Manager. - -To configure your shared secrets workflow controller, you must: - -* Annotate the Kubernetes service account to associate it with a cloud-side - principal (such as an IAM role, service account, or enterprise application). The workload must then - use this service account. -* Label the workload (pod) to allow the injection of a temporary credential set, - enabling authentication. - - - - - -The External Secrets Operator (ESO) component runs in each control plane's host -namespace as `external-secrets-controller`. It synchronizes secrets from -external APIs into Kubernetes secrets. Shared secrets allow you to manage -credentials outside your Kubernetes cluster while making them available to your -application - - - - - -The External Secrets Operator (ESO) component runs in each control plane's host -namespace as `external-secrets-controller`. It synchronizes secrets from -external APIs into Kubernetes secrets. Shared secrets allow you to manage -credentials outside your Kubernetes cluster while making them available to your -application - - - -## Configuration - - - -Upbound supports workload-identity configurations in AWS with IAM Roles for -Service Accounts or EKS pod identity association. - -#### IAM Roles for Service Accounts (IRSA) - -With IRSA, you can associate a Kubernetes service account in an EKS cluster with -an AWS IAM role. Upbound authenticates workloads with that service account as -the IAM role using temporary credentials instead of static role credentials. -IRSA relies on AWS `AssumeRoleWithWebIdentity` `STS` to exchange OIDC ID tokens with -the IAM role's temporary credentials. IRSA uses the `eks.amazon.aws/role-arn` -annotation to link the service account and the IAM role. - -**Create an IAM role and trust policy** - -First, create an IAM role with appropriate permissions to access AWS Secrets Manager: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "secretsmanager:GetSecretValue", - "secretsmanager:DescribeSecret", - "ssm:GetParameter" - ], - "Resource": [ - "arn:aws:secretsmanager:${YOUR_REGION}:${YOUR_AWS_ACCOUNT_ID}:secret:${YOUR_SECRET_PREFIX}*", - "arn:aws:ssm:${YOUR_REGION}:${YOUR_AWS_ACCOUNT_ID}:parameter/${YOUR_PARAMETER_PREFIX}*" - ] - } - ] -} -``` - -You must configure the IAM role trust policy with the exact match for each -provisioned control plane. An example of a trust policy for a single control -plane is below: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Federated": "arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:oidc-provider/${YOUR_OIDC_PROVIDER}" - }, - "Action": "sts:AssumeRoleWithWebIdentity", - "Condition": { - "StringEquals": { - ":aud": "sts.amazonaws.com" - }, - "StringLike": { - ":sub": "system:serviceaccount:*:external-secrets-controller" - } - } - } - ] -} -``` - -**Configure the EKS OIDC provider** - -Next, ensure your EKS cluster has an OIDC identity provider: - -```shell -eksctl utils associate-iam-oidc-provider --cluster ${YOUR_CLUSTER_NAME} --approve -``` - -**Apply the IAM role** - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the shared secrets component: - -```yaml ---set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."eks\.amazonaws\.com/role-arn"="arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_ESO_ROLE_NAME}" -``` - -This command allows the shared secrets component to authenticate with your -dedicated IAM role in your EKS cluster environment. - -#### EKS pod identities - -Upbound also supports EKS Pod Identity configuration. EKS Pod Identities allow -you to create a pod identity association with your Kubernetes namespace, a -service account, and an IAM role, which allows the EKS control plane to -automatically handle the credential exchange. - -**Create an IAM role** - -First, create an IAM role with appropriate permissions to access AWS Secrets Manager: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "secretsmanager:GetSecretValue", - "secretsmanager:DescribeSecret", - "ssm:GetParameter" - ], - "Resource": [ - "arn:aws:secretsmanager:${YOUR_AWS_REGION}:${YOUR_AWS_ACCOUNT_ID}:secret:${YOUR_SECRET_PREFIX}*", - "arn:aws:ssm:${YOUR_AWS_REGION}:${YOUR_AWS_ACCOUNT_ID}:parameter/${YOUR_PARAMETER_PREFIX}*" - ] - } - ] -} -``` - -**Configure your Space with Helm** - -When you install or upgrade your Space with Helm, add the shared secrets value: - -```shell -helm upgrade spaces spaces-helm-chart \ - --set "sharedSecrets.enabled=true" -``` - -**Create a Pod Identity Association** - -After Upbound provisions your control plane, create a Pod Identity Association -with the `aws` CLI: - -```shell -aws eks create-pod-identity-association \ - --cluster-name ${YOUR_CLUSTER_NAME} \ - --namespace ${YOUR_CONTROL_PLANE_NAMESPACE} \ - --service-account external-secrets-controller \ - --role-arn arn:aws:iam::${YOUR_AWS_ACCOUNT_ID}:role/${YOUR_ROLE_NAME} -``` - - - - - -Upbound supports workload-identity configurations in Azure with Azure's built-in -workload identity feature. - -First, enable the OIDC issuer and workload identity in your AKS cluster: - -```shell -az aks update --resource-group ${YOUR_RESOURCE_GROUP} --name ${YOUR_AKS_CLUSTER_NAME} --enable-oidc-issuer --enable-workload-identity -``` - -Next, find and store the OIDC issuer URL as an environment variable: - -```shell -export AKS_OIDC_ISSUER="$(az aks show --name ${YOUR_AKS_CLUSTER_NAME} --resource-group ${YOUR_RESOURCE_GROUP} --query "oidcIssuerProfile.issuerUrl" --output tsv)" -``` - -Create a new managed identity to associate with the shared secrets component: - -```shell -az identity create --name secrets-identity --resource-group ${YOUR_RESOURCE_GROUP} --location ${YOUR_LOCATION} -``` - -Retrieve the client ID and store it as an environment variable: - -```shell -export USER_ASSIGNED_CLIENT_ID="$(az identity show --name secrets-identity --resource-group ${YOUR_RESOURCE_GROUP} --query clientId -otsv)" -``` - -Grant the managed identity you created to access your Azure Storage account: - -```shell -az keyvault set-policy --name ${YOUR_KEY_VAULT_NAME} \ - --resource-group ${YOUR_RESOURCE_GROUP} \ - --object-id $(az identity show --name secrets-identity --resource-group ${YOUR_RESOURCE_GROUP} --query principalId -otsv) \ - --secret-permissions get list -``` - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the shared secrets component: - -```shell ---set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."azure\.workload\.identity/client-id"="${USER_ASSIGNED_CLIENT_ID}" ---set controlPlanes.sharedSecrets.pod.customLabels."azure\.workload\.identity/use"="true" -``` - -Next, create a federated credential to establish trust between the managed identity -and your AKS OIDC provider: - -```shell -az identity federated-credential create \ - --name secrets-federated-identity \ - --identity-name secrets-identity \ - --resource-group ${YOUR_RESOURCE_GROUP} \ - --issuer ${AKS_OIDC_ISSUER} \ - --subject system:serviceaccount:${YOUR_CONTROL_PLANE_NAMESPACE}:external-secrets-controller -``` - - - - - -Upbound supports workload-identity configurations in GCP with IAM principal -identifiers or service account impersonation. - -#### IAM principal identifiers - -IAM principal identifiers allow you to grant permissions directly to -Kubernetes service accounts without additional annotation. Upbound recommends -this approach for ease-of-use and flexibility. - -First, enable Workload Identity Federation on your GKE cluster: - -```shell -gcloud container clusters update ${YOUR_CLUSTER_NAME} \ - --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ - --region=${YOUR_REGION} -``` - -Next, grant the necessary permissions to your Kubernetes service account: - -```shell -gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ - --member="principalSet://iam.googleapis.com/projects/${YOUR_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${YOUR_PROJECT_ID}.svc.id.goog/attribute.kubernetes_namespace/${YOUR_CONTROL_PLANE_NAMESPACE}/attribute.kubernetes_service_account/external-secrets-controller" \ - --role="roles/secretmanager.secretAccessor" -``` - -#### Service account impersonation - -Service account impersonation allows you to link a Kubernetes service account to -a GCP service account. The Kubernetes service account assumes the permissions of -the GCP service account you specify. - -Enable workload id federation on your GKE cluster: - -```shell -gcloud container clusters update ${YOUR_CLUSTER_NAME} \ - --workload-pool=${YOUR_PROJECT_ID}.svc.id.goog \ - --region=${YOUR_REGION} -``` - -Next, create a dedicated service account for your secrets operations: - -```shell -gcloud iam service-accounts create secrets-sa \ - --project=${YOUR_PROJECT_ID} -``` - -Grant secret access permissions to the service account you created: - -```shell -gcloud projects add-iam-policy-binding ${YOUR_PROJECT_ID} \ - --member="serviceAccount:secrets-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" \ - --role="roles/secretmanager.secretAccessor" -``` - -Link the Kubernetes service account to the GCP service account: - -```shell -gcloud iam service-accounts add-iam-policy-binding \ - secrets-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com \ - --role="roles/iam.workloadIdentityUser" \ - --member="serviceAccount:${YOUR_PROJECT_ID}.svc.id.goog[${YOUR_CONTROL_PLANE_NAMESPACE}/external-secrets-controller]" -``` - -In your control plane, pass the `--set` flag with the Spaces Helm chart -parameters for the shared secrets component: - -```shell ---set controlPlanes.sharedSecrets.serviceAccount.customAnnotations."iam\.gke\.io/gcp-service-account"="secrets-sa@${YOUR_PROJECT_ID}.iam.gserviceaccount.com" -``` - - - -## Verify your configuration - -After you apply the configuration use `kubectl` to verify the service account -has the correct annotation: - -```shell -kubectl get serviceaccount external-secrets-controller -n ${YOUR_CONTROL_PLANE_NAMESPACE} -o yaml -``` - - - -Verify the `external-secrets` pod is running correctly: - -```shell -kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep external-secrets -``` - - - - - -Verify the External Secrets Operator pod is running correctly: - -```shell -kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep external-secrets -``` - - - - - -Verify the `external-secrets` pod is running correctly: - -```shell -kubectl get pods -n ${YOUR_CONTROL_PLANE_NAMESPACE} | grep external-secrets -``` - - - -## Restart workload - - - -You must manually restart a workload's pod when you add the -`eks.amazonaws.com/role-arn key` annotation to the running pod's service -account. - -This restart enables the EKS pod identity webhook to inject the necessary -environment for using IRSA. - - - - - -You must manually restart a workload's pod when you add the workload identity annotations to the running pod's service account. - -This restart enables the workload identity webhook to inject the necessary -environment for using Azure workload identity. - - - - - -GCP workload identity doesn't require pod restarts after configuration changes. -If you do need to restart the workload, use the `kubectl` command to force the -component restart: - - - -```shell -kubectl rollout restart deployment external-secrets -``` - -## Use cases - - - - -Shared secrets with workload identity eliminates the need for static credentials -in your cluster. These benefits are particularly helpful in: - -* Secure application credentials management -* Database connection string storage -* API token management -* Compliance with secret rotation security standards -* Multi-environment configuration with centralized secret management - - - - - -Using workload identity authentication for shared secrets eliminates the need for static -credentials in your cluster as well as the overhead of credential rotation. -These benefits are particularly helpful in: - -* Secure application credentials management -* Database connection string storage -* API token management -* Compliance with secret rotation security standards - - - - - -Configuring the external secrets operator with workload identity eliminates the need for -static credentials in your cluster and the overhead of credential rotation. -These benefits are particularly helpful in: - -* Secure application credentials management -* Database connection string storage -* API token management -* Compliance with secret rotation security standards - - - -## Next steps - -Now that you have workload identity configured for the shared secrets component, visit -the [Shared Secrets][eso-guide] guide for more information. - -Other workload identity guides are: -* [Backup and restore][backuprestore] -* [Billing][billing] - -[eso-guide]: /spaces/howtos/secrets-management -[backuprestore]: /spaces/howtos/self-hosted/workload-id/backup-restore-config -[billing]: /spaces/howtos/self-hosted/workload-id/billing-config diff --git a/spaces_versioned_docs/version-v1.9/howtos/simulations.md b/spaces_versioned_docs/version-v1.9/howtos/simulations.md deleted file mode 100644 index 26cb0e657..000000000 --- a/spaces_versioned_docs/version-v1.9/howtos/simulations.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -title: Simulate changes to your Control Plane Projects -sidebar_position: 100 -description: Use the Up CLI to mock operations before deploying to your environments. ---- - -:::info API Version Information -This guide covers Simulations, available in v1.10+ (GA since v1.13). version-specific availability and features, see the . - -For API specifications on the Simulations CRD, see the [Spaces API Reference](../../../reference/apis/spaces-api/). -::: - -:::important -The Simulations feature is in private preview. more information, [reach out to Upbound][reach-out-to-upbound]. -::: - -Control plane simulations allow you to preview changes to your resources before -applying them to your control planes. Like a plan or dry-run operation, -simulations expose the impact of updates to compositions or claims without -changing your actual resources. - -A control plane simulation creates a temporary copy of your control plane and -returns a preview of the desired changes. The simulation change plan helps you -reduce the risk of unexpected behavior based on your changes. - -## Simulation benefits - -Control planes are dynamic systems that automatically reconcile resources to -match your desired state. Simulations provide visibility into this -reconciliation process by showing: - - -* New resources to create -* Existing resources to change -* Existing resources to delete -* How configuration changes propagate through the system - -These insights are crucial when planning complex changes or upgrading Crossplane -packages. - -## Requirements - -Simulations are available to select customers on Upbound Cloud with Team -Tier or higher. more information, [reach out to Upbound][reach-out-to-upbound-1]. - -## How to simulate your control planes - -Before you start a simulation, build your project and use the `up -project run` command to run your control plane. - -Use the `up project simulate` command with your control plane name to start the -simulation: - -```ini {copy-lines="all"} -up project simulate --complete-after=60s --terminate-on-finish -``` - -The `complete-after` flag determines how long to run the simulation before it completes and calculates the results. Depending on the change, a simulation may not complete within your defined interval leaving unaffected resources as `unchanged`. - -The `terminate-on-finish` flag terminates the simulation after the time -you set - deleting the control plane that ran the simulation. - -At the end of your simulation, your CLI returns: -* A summary of the resources created, modified, or deleted -* Diffs for each resource affected - -## View your simulation in the Upbound Console -You can also view your simulation results in the Upbound Console: - -1. Navigate to your base control plane in the Upbound Console -2. Select the "Simulations" tab in the menu -3. Select a simulation object for a change list of all - resources affected. - -The Console provides visual indications of changes: - -- Created Resources: Marked with green -- Modified Resources: Marked with yellow -- Deleted Resources: Marked with red -- Unchanged Resources: Displayed in gray - -![Upbound Console Simulation](/img/simulations.png) - -## Considerations - -Simulations is a **private preview** feature. - -Be aware of the following limitations: - -- Simulations can't predict the exact behavior of external systems due to the - complexity and non-deterministic reconciliation pattern in Crossplane. - -- The only completion criteria for a simulation is time. Your simulation may not - receive a conclusive result within that interval. Upbound recommends the - default `60s` value. - -- Providers don't run in simulations. Simulations can't compose resources that - rely on the status of Managed Resources. - - -The Upbound team is working to improve these limitations. Your feedback is always appreciated. - -## Next steps - -For more information, follow the [tutorial][tutorial] on Simulations. - - -[tutorial]: /manuals/cli/howtos/simulations -[reach-out-to-upbound]: https://www.upbound.io/contact-us -[reach-out-to-upbound-1]: https://www.upbound.io/contact-us diff --git a/spaces_versioned_docs/version-v1.9/overview/_category_.json b/spaces_versioned_docs/version-v1.9/overview/_category_.json deleted file mode 100644 index 54bb16430..000000000 --- a/spaces_versioned_docs/version-v1.9/overview/_category_.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "label": "Overview", - "position": 0 -} diff --git a/spaces_versioned_docs/version-v1.9/overview/index.md b/spaces_versioned_docs/version-v1.9/overview/index.md deleted file mode 100644 index 7b79f6e44..000000000 --- a/spaces_versioned_docs/version-v1.9/overview/index.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Spaces Overview -sidebar_position: 0 ---- - -# Upbound Spaces - -Welcome to the Upbound Spaces documentation. This section contains comprehensive documentation for Spaces API and Spaces operations across all supported versions (v1.9 through v1.15). - -## Get Started - -- **[Concepts](/spaces/concepts/control-planes)** - Core concepts for Spaces -- **[How-To Guides](/spaces/howtos/auto-upgrade)** - Step-by-step guides for operating Spaces -- **[API Reference](/spaces/reference/)** - API specifications and resources diff --git a/spaces_versioned_docs/version-v1.9/reference/_category_.json b/spaces_versioned_docs/version-v1.9/reference/_category_.json deleted file mode 100644 index 4a6a139c4..000000000 --- a/spaces_versioned_docs/version-v1.9/reference/_category_.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "label": "Spaces API", - "position": 1, - "collapsed": true -} diff --git a/spaces_versioned_docs/version-v1.9/reference/index.md b/spaces_versioned_docs/version-v1.9/reference/index.md deleted file mode 100644 index 5e68b0768..000000000 --- a/spaces_versioned_docs/version-v1.9/reference/index.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: Spaces API Reference -description: Documentation for the Spaces API resources (v1.15 - Latest) -sidebar_position: 1 ---- -import CrdDocViewer from '@site/src/components/CrdViewer'; - - -This page documents the Custom Resource Definitions (CRDs) for the Spaces API. - - -## Control Planes -### Control Planes - - -## Observability -### Shared Telemetry Configs - - -## `pkg` -### Controller Revisions - - -### Controller Runtime Configs - - -### Controllers - - -### Remote Configuration Revisions - - -### Remote Configurations - - -## Policy -### Shared Upbound Policies - - -## References -### Referenced Objects - - -## Scheduling -### Environments - - -## Secrets -### Shared External Secrets - - -### Shared Secret Stores - - -## Simulations - - -## Spaces Backups -### Backups - - -### Backup Schedules - - -### Shared Backup Configs - - -### Shared Backups - - -### Shared Backup Schedules - diff --git a/spaces_versioned_sidebars/version-v1.13-sidebars.json b/spaces_versioned_sidebars/version-1.13-sidebars.json similarity index 100% rename from spaces_versioned_sidebars/version-v1.13-sidebars.json rename to spaces_versioned_sidebars/version-1.13-sidebars.json diff --git a/spaces_versioned_sidebars/version-v1.14-sidebars.json b/spaces_versioned_sidebars/version-1.14-sidebars.json similarity index 100% rename from spaces_versioned_sidebars/version-v1.14-sidebars.json rename to spaces_versioned_sidebars/version-1.14-sidebars.json diff --git a/spaces_versioned_sidebars/version-v1.15-sidebars.json b/spaces_versioned_sidebars/version-1.15-sidebars.json similarity index 100% rename from spaces_versioned_sidebars/version-v1.15-sidebars.json rename to spaces_versioned_sidebars/version-1.15-sidebars.json diff --git a/spaces_versioned_sidebars/version-v1.10-sidebars.json b/spaces_versioned_sidebars/version-v1.10-sidebars.json deleted file mode 100644 index 063f5e7c8..000000000 --- a/spaces_versioned_sidebars/version-v1.10-sidebars.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "sidebar": [ - { - "type": "doc", - "id": "overview/index", - "label": "Overview" - }, - { - "type": "category", - "label": "Concepts", - "items": [ - "concepts/control-planes", - "concepts/deployment-modes", - "concepts/groups" - ] - }, - { - "type": "category", - "label": "How-tos", - "items": [ - "howtos/auto-upgrade", - "howtos/backup-and-restore", - "howtos/ctp-connector", - "howtos/debugging-a-ctp", - "howtos/managed-service", - "howtos/mcp-connector-guide", - "howtos/migrating-to-mcps", - "howtos/observability", - "howtos/query-api", - "howtos/secrets-management", - { - "type": "category", - "label": "Automation and GitOps", - "items": [ - "howtos/automation-and-gitops/overview" - ] - }, - { - "type": "category", - "label": "Cloud Spaces", - "items": [ - "howtos/cloud-spaces/dedicated-spaces-deployment", - "howtos/cloud-spaces/gitops-on-upbound" - ] - }, - { - "type": "category", - "label": "Self-Hosted", - "items": [ - "howtos/self-hosted/administer-features", - "howtos/self-hosted/attach-detach", - "howtos/self-hosted/billing", - "howtos/self-hosted/capacity-licensing", - "howtos/self-hosted/certs", - "howtos/self-hosted/configure-ha", - "howtos/self-hosted/controllers", - "howtos/self-hosted/declarative-ctps", - "howtos/self-hosted/deployment-reqs", - "howtos/self-hosted/dr", - "howtos/self-hosted/gitops-with-argocd", - "howtos/self-hosted/managed-spaces-deployment", - "howtos/self-hosted/oidc-configuration", - "howtos/self-hosted/proxies-config", - "howtos/self-hosted/query-api", - "howtos/self-hosted/scaling-resources", - "howtos/self-hosted/self-hosted-spaces-deployment", - "howtos/self-hosted/space-observability", - "howtos/self-hosted/spaces-management", - "howtos/self-hosted/troubleshooting", - { - "type": "category", - "label": "Workload ID", - "items": [ - "howtos/self-hosted/workload-id/backup-restore-config", - "howtos/self-hosted/workload-id/billing-config", - "howtos/self-hosted/workload-id/eso-config" - ] - } - ] - } - ] - }, - { - "type": "doc", - "id": "reference/index", - "label": "API Reference" - } - ] -} diff --git a/spaces_versioned_sidebars/version-v1.11-sidebars.json b/spaces_versioned_sidebars/version-v1.11-sidebars.json deleted file mode 100644 index 063f5e7c8..000000000 --- a/spaces_versioned_sidebars/version-v1.11-sidebars.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "sidebar": [ - { - "type": "doc", - "id": "overview/index", - "label": "Overview" - }, - { - "type": "category", - "label": "Concepts", - "items": [ - "concepts/control-planes", - "concepts/deployment-modes", - "concepts/groups" - ] - }, - { - "type": "category", - "label": "How-tos", - "items": [ - "howtos/auto-upgrade", - "howtos/backup-and-restore", - "howtos/ctp-connector", - "howtos/debugging-a-ctp", - "howtos/managed-service", - "howtos/mcp-connector-guide", - "howtos/migrating-to-mcps", - "howtos/observability", - "howtos/query-api", - "howtos/secrets-management", - { - "type": "category", - "label": "Automation and GitOps", - "items": [ - "howtos/automation-and-gitops/overview" - ] - }, - { - "type": "category", - "label": "Cloud Spaces", - "items": [ - "howtos/cloud-spaces/dedicated-spaces-deployment", - "howtos/cloud-spaces/gitops-on-upbound" - ] - }, - { - "type": "category", - "label": "Self-Hosted", - "items": [ - "howtos/self-hosted/administer-features", - "howtos/self-hosted/attach-detach", - "howtos/self-hosted/billing", - "howtos/self-hosted/capacity-licensing", - "howtos/self-hosted/certs", - "howtos/self-hosted/configure-ha", - "howtos/self-hosted/controllers", - "howtos/self-hosted/declarative-ctps", - "howtos/self-hosted/deployment-reqs", - "howtos/self-hosted/dr", - "howtos/self-hosted/gitops-with-argocd", - "howtos/self-hosted/managed-spaces-deployment", - "howtos/self-hosted/oidc-configuration", - "howtos/self-hosted/proxies-config", - "howtos/self-hosted/query-api", - "howtos/self-hosted/scaling-resources", - "howtos/self-hosted/self-hosted-spaces-deployment", - "howtos/self-hosted/space-observability", - "howtos/self-hosted/spaces-management", - "howtos/self-hosted/troubleshooting", - { - "type": "category", - "label": "Workload ID", - "items": [ - "howtos/self-hosted/workload-id/backup-restore-config", - "howtos/self-hosted/workload-id/billing-config", - "howtos/self-hosted/workload-id/eso-config" - ] - } - ] - } - ] - }, - { - "type": "doc", - "id": "reference/index", - "label": "API Reference" - } - ] -} diff --git a/spaces_versioned_sidebars/version-v1.12-sidebars.json b/spaces_versioned_sidebars/version-v1.12-sidebars.json deleted file mode 100644 index 073b7b92f..000000000 --- a/spaces_versioned_sidebars/version-v1.12-sidebars.json +++ /dev/null @@ -1,90 +0,0 @@ -{ - "sidebar": [ - { - "type": "doc", - "id": "overview/index", - "label": "Overview" - }, - { - "type": "category", - "label": "Concepts", - "items": [ - "concepts/control-planes", - "concepts/deployment-modes", - "concepts/groups" - ] - }, - { - "type": "category", - "label": "How-tos", - "items": [ - "howtos/auto-upgrade", - "howtos/backup-and-restore", - "howtos/control-plane-topologies", - "howtos/ctp-connector", - "howtos/debugging-a-ctp", - "howtos/managed-service", - "howtos/mcp-connector-guide", - "howtos/migrating-to-mcps", - "howtos/observability", - "howtos/query-api", - "howtos/secrets-management", - { - "type": "category", - "label": "Automation and GitOps", - "items": [ - "howtos/automation-and-gitops/overview" - ] - }, - { - "type": "category", - "label": "Cloud Spaces", - "items": [ - "howtos/cloud-spaces/dedicated-spaces-deployment", - "howtos/cloud-spaces/gitops-on-upbound" - ] - }, - { - "type": "category", - "label": "Self-Hosted", - "items": [ - "howtos/self-hosted/administer-features", - "howtos/self-hosted/attach-detach", - "howtos/self-hosted/billing", - "howtos/self-hosted/capacity-licensing", - "howtos/self-hosted/certs", - "howtos/self-hosted/configure-ha", - "howtos/self-hosted/controllers", - "howtos/self-hosted/declarative-ctps", - "howtos/self-hosted/deployment-reqs", - "howtos/self-hosted/dr", - "howtos/self-hosted/gitops-with-argocd", - "howtos/self-hosted/managed-spaces-deployment", - "howtos/self-hosted/oidc-configuration", - "howtos/self-hosted/proxies-config", - "howtos/self-hosted/query-api", - "howtos/self-hosted/scaling-resources", - "howtos/self-hosted/self-hosted-spaces-deployment", - "howtos/self-hosted/space-observability", - "howtos/self-hosted/spaces-management", - "howtos/self-hosted/troubleshooting", - { - "type": "category", - "label": "Workload ID", - "items": [ - "howtos/self-hosted/workload-id/backup-restore-config", - "howtos/self-hosted/workload-id/billing-config", - "howtos/self-hosted/workload-id/eso-config" - ] - } - ] - } - ] - }, - { - "type": "doc", - "id": "reference/index", - "label": "API Reference" - } - ] -} diff --git a/spaces_versioned_sidebars/version-v1.9-sidebars.json b/spaces_versioned_sidebars/version-v1.9-sidebars.json deleted file mode 100644 index 063f5e7c8..000000000 --- a/spaces_versioned_sidebars/version-v1.9-sidebars.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "sidebar": [ - { - "type": "doc", - "id": "overview/index", - "label": "Overview" - }, - { - "type": "category", - "label": "Concepts", - "items": [ - "concepts/control-planes", - "concepts/deployment-modes", - "concepts/groups" - ] - }, - { - "type": "category", - "label": "How-tos", - "items": [ - "howtos/auto-upgrade", - "howtos/backup-and-restore", - "howtos/ctp-connector", - "howtos/debugging-a-ctp", - "howtos/managed-service", - "howtos/mcp-connector-guide", - "howtos/migrating-to-mcps", - "howtos/observability", - "howtos/query-api", - "howtos/secrets-management", - { - "type": "category", - "label": "Automation and GitOps", - "items": [ - "howtos/automation-and-gitops/overview" - ] - }, - { - "type": "category", - "label": "Cloud Spaces", - "items": [ - "howtos/cloud-spaces/dedicated-spaces-deployment", - "howtos/cloud-spaces/gitops-on-upbound" - ] - }, - { - "type": "category", - "label": "Self-Hosted", - "items": [ - "howtos/self-hosted/administer-features", - "howtos/self-hosted/attach-detach", - "howtos/self-hosted/billing", - "howtos/self-hosted/capacity-licensing", - "howtos/self-hosted/certs", - "howtos/self-hosted/configure-ha", - "howtos/self-hosted/controllers", - "howtos/self-hosted/declarative-ctps", - "howtos/self-hosted/deployment-reqs", - "howtos/self-hosted/dr", - "howtos/self-hosted/gitops-with-argocd", - "howtos/self-hosted/managed-spaces-deployment", - "howtos/self-hosted/oidc-configuration", - "howtos/self-hosted/proxies-config", - "howtos/self-hosted/query-api", - "howtos/self-hosted/scaling-resources", - "howtos/self-hosted/self-hosted-spaces-deployment", - "howtos/self-hosted/space-observability", - "howtos/self-hosted/spaces-management", - "howtos/self-hosted/troubleshooting", - { - "type": "category", - "label": "Workload ID", - "items": [ - "howtos/self-hosted/workload-id/backup-restore-config", - "howtos/self-hosted/workload-id/billing-config", - "howtos/self-hosted/workload-id/eso-config" - ] - } - ] - } - ] - }, - { - "type": "doc", - "id": "reference/index", - "label": "API Reference" - } - ] -} diff --git a/spaces_versions.json b/spaces_versions.json index 07dee0e04..1a26c5a3f 100644 --- a/spaces_versions.json +++ b/spaces_versions.json @@ -1 +1 @@ -["v1.15","v1.14", "v1.13", "v1.12", "v1.11", "v1.10", "v1.9"] +["1.15","1.14", "1.13"] diff --git a/src/sidebars/spaces.js b/src/sidebars/spaces.js index 4b746709a..4e526c633 100644 --- a/src/sidebars/spaces.js +++ b/src/sidebars/spaces.js @@ -16,7 +16,7 @@ module.exports = { }, { type: 'category', - label: 'How-To Guides', + label: 'How-tos ', items: [ 'howtos/api-connector', 'howtos/auto-upgrade', diff --git a/src/theme/DocItem/Layout/index.js b/src/theme/DocItem/Layout/index.js index 37307ff17..b77352e82 100644 --- a/src/theme/DocItem/Layout/index.js +++ b/src/theme/DocItem/Layout/index.js @@ -3,24 +3,21 @@ import Layout from '@theme-original/DocItem/Layout'; import { useLocation } from '@docusaurus/router'; import styles from './layout.module.css'; +const versionsJson = require('../../../../spaces_versions.json'); + +const versions = versionsJson.map((version, index) => ({ + label: index === 0 ? `${version} (Latest)` : version, + value: index === 0 ? '' : version, +})); + export default function LayoutWrapper(props) { const location = useLocation(); const isSpacesPage = location.pathname.startsWith('/spaces'); - const versions = [ - { label: '1.15 (Latest)', value: '' }, - { label: '1.14', value: 'v1.14' }, - { label: '1.13', value: 'v1.13' }, - { label: '1.12', value: 'v1.12' }, - { label: '1.11', value: 'v1.11' }, - { label: '1.10', value: 'v1.10' }, - { label: '1.9', value: 'v1.9' }, - ]; - const getCurrentVersion = () => { const pathSegments = location.pathname.split('/').filter(Boolean); if (pathSegments[0] === 'spaces') { - if (/^v\d+\.\d+$/.test(pathSegments[1])) { + if (/^\d+\.\d+$/.test(pathSegments[1])) { return pathSegments[1]; } } @@ -33,7 +30,7 @@ export default function LayoutWrapper(props) { let newPath = location.pathname; if (pathSegments[0] === 'spaces') { - if (/^v\d+\.\d+$/.test(pathSegments[1])) { + if (/^\d+\.\d+$/.test(pathSegments[1])) { const contentPath = '/' + pathSegments.slice(2).join('/'); newPath = selectedVersion ? `/spaces/${selectedVersion}${contentPath}` : `/spaces${contentPath}`; } else { diff --git a/src/theme/DocItem/Layout/layout.module.css b/src/theme/DocItem/Layout/layout.module.css index dae1030f0..3891f57db 100644 --- a/src/theme/DocItem/Layout/layout.module.css +++ b/src/theme/DocItem/Layout/layout.module.css @@ -1,57 +1,34 @@ .versionSelector { - background-color: #f8f9fa; - border-bottom: 1px solid #dee2e6; - padding: 12px 20px; + margin-bottom: 1rem; + padding: 0.75rem; + background-color: var(--ifm-background-color); + border: 1px solid var(--ifm-border-color); + border-radius: 0.25rem; display: flex; align-items: center; - gap: 10px; - margin-bottom: 16px; -} - -html[data-theme='dark'] .versionSelector { - background-color: #2d2d2d; - border-bottom-color: #444; + gap: 0.5rem; } .label { font-weight: 500; - font-size: 14px; - color: #333; margin: 0; } -html[data-theme='dark'] .label { - color: #d0d0d0; -} - .select { - padding: 6px 12px; - border: 1px solid #ced4da; - border-radius: 4px; - font-size: 14px; + padding: 0.375rem 0.75rem; + border: 1px solid var(--ifm-border-color); + border-radius: 0.25rem; + background-color: var(--ifm-background-surface); + color: var(--ifm-text); cursor: pointer; - background-color: white; - color: #333; - font-weight: 500; - min-width: 120px; -} - -html[data-theme='dark'] .select { - background-color: #3d3d3d; - color: #e0e0e0; - border-color: #555; } .select:hover { - border-color: #999; -} - -html[data-theme='dark'] .select:hover { - border-color: #888; + border-color: var(--ifm-color-primary); } .select:focus { outline: none; - border-color: #80bdff; - box-shadow: 0 0 0 0.2rem rgba(0, 123, 255, 0.25); + border-color: var(--ifm-color-primary); + box-shadow: 0 0 0 2px var(--ifm-color-primary-light); } diff --git a/static/crds/space/v1.10/admin.spaces.upbound.io_spacebackupconfigs.yaml b/static/crds/space/v1.10/admin.spaces.upbound.io_spacebackupconfigs.yaml deleted file mode 100644 index 9feedd34f..000000000 --- a/static/crds/space/v1.10/admin.spaces.upbound.io_spacebackupconfigs.yaml +++ /dev/null @@ -1,177 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: spacebackupconfigs.admin.spaces.upbound.io -spec: - group: admin.spaces.upbound.io - names: - categories: - - spaces - kind: SpaceBackupConfig - listKind: SpaceBackupConfigList - plural: spacebackupconfigs - singular: spacebackupconfig - scope: Cluster - versions: - - additionalPrinterColumns: - - jsonPath: .spec.objectStorage.provider - name: Provider - type: string - - jsonPath: .spec.objectStorage.bucket - name: Bucket - type: string - - jsonPath: .spec.objectStorage.credentials.source - name: Auth - type: string - - jsonPath: .metadata.annotations.spacebackupconfig\.admin\.internal\.spaces\.upbound\.io/secret - name: Secret - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: SpaceBackupConfig defines the configuration to backup a Space. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: |- - A SpaceBackupConfigSpec represents the configuration to backup or restore - a Space. - properties: - objectStorage: - description: ObjectStorage specifies the object storage configuration - for the given provider. - properties: - bucket: - description: Bucket is the name of the bucket to store backups - in. - minLength: 1 - type: string - config: - description: |- - Config is a free-form map of configuration options for the object storage provider. - See https://github.com/thanos-io/objstore?tab=readme-ov-file for more - information on the formats for each supported cloud provider. Bucket and - Provider will override the required values in the config. - type: object - x-kubernetes-preserve-unknown-fields: true - credentials: - description: Credentials specifies the credentials to access the - object storage. - properties: - env: - description: |- - Env is a reference to an environment variable that contains credentials - that must be used to connect to the provider. - properties: - name: - description: Name is the name of an environment variable. - type: string - required: - - name - type: object - fs: - description: |- - Fs is a reference to a filesystem location that contains credentials that - must be used to connect to the provider. - properties: - path: - description: Path is a filesystem path. - type: string - required: - - path - type: object - secretRef: - description: |- - A SecretRef is a reference to a secret key that contains the credentials - that must be used to connect to the provider. - properties: - key: - default: credentials - description: The key to select. - type: string - name: - description: Name of the secret. - type: string - namespace: - description: Namespace of the secret. - type: string - required: - - key - - name - - namespace - type: object - source: - allOf: - - enum: - - Secret - - InjectedIdentity - - enum: - - Secret - - InjectedIdentity - description: |- - Source of the credentials. - Source "Secret" requires "get" permissions on the referenced Secret. - type: string - required: - - source - type: object - x-kubernetes-validations: - - message: secretRef.name and namespace must be set when source - is Secret - rule: self.source != 'Secret' || (has(self.secretRef) && has(self.secretRef.name) - && has(self.secretRef.__namespace__)) - prefix: - description: |- - Prefix is the prefix to use for all backups using this - SharedBackupConfig, e.g. "prod/cluster1", resulting in backups for - controlplane "ctp1" in namespace "ns1" being stored in - "prod/cluster1/ns1/ctp1". - type: string - provider: - description: Provider is the name of the object storage provider. - enum: - - AWS - - Azure - - GCP - type: string - required: - - bucket - - credentials - - provider - type: object - x-kubernetes-validations: - - message: credentials.secretRef.name must be set when source is Secret - rule: self.credentials.source != 'Secret' || (has(self.credentials.secretRef) - && has(self.credentials.secretRef.name)) - required: - - objectStorage - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.10/admin.spaces.upbound.io_spacebackups.yaml b/static/crds/space/v1.10/admin.spaces.upbound.io_spacebackups.yaml deleted file mode 100644 index 2e0824038..000000000 --- a/static/crds/space/v1.10/admin.spaces.upbound.io_spacebackups.yaml +++ /dev/null @@ -1,787 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: spacebackups.admin.spaces.upbound.io -spec: - group: admin.spaces.upbound.io - names: - categories: - - spaces - kind: SpaceBackup - listKind: SpaceBackupList - plural: spacebackups - singular: spacebackup - scope: Cluster - versions: - - additionalPrinterColumns: - - jsonPath: .status.phase - name: Phase - type: string - - jsonPath: .status.retries - name: Retries - type: integer - - jsonPath: .spec.ttl - name: TTL - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: SpaceBackup represents a backup of a Space. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: SpaceBackupSpec defines a backup over a set of Match - properties: - configRef: - description: |- - ConfigRef is a reference to the space backup configuration. - ApiGroup is optional and defaults to "spaces.upbound.io". - Kind is required, and the only supported value is "SpaceBackupConfig" at - the moment. - Name is required. - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. - type: string - kind: - description: Kind is the type of resource being referenced. - minLength: 1 - type: string - name: - description: Name is the name of resource being referenced. - minLength: 1 - type: string - type: object - x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: backup config ref must be a reference to a SpaceBackupConfig - rule: (!has(self.apiGroup) || self.apiGroup == 'admin.spaces.upbound.io') - && self.kind == 'SpaceBackupConfig' - - message: backup config ref must have a name - rule: size(self.name) > 0 - controlPlaneBackups: - description: ControlPlaneBackups is the definition of the control - plane backups, - properties: - excludedResources: - description: |- - ExcludedResources is a slice of resource names that are not - included in the backup. Used to filter the included extra resources. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - deletionPolicy: - default: Orphan - description: DeletionPolicy is the policy for the backup. - enum: - - Orphan - - Delete - type: string - exclude: - description: |- - Exclude is the selector for resources that should be excluded from the backup. - If both Match and Exclude are specified, the Exclude selector will be applied - after the Match selector. - By default, only SpaceBackups are excluded. - properties: - controlPlanes: - description: |- - ControlPlanes specifies the control planes selected. - A control plane is matched if any of the control plane selectors matches, if not specified - any control plane in the selected groups is matched. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - extras: - description: Extras specifies the extra resources selected. - items: - description: GenericSpaceBackupResourceSelector represents a - generic resource selector. - properties: - apiGroup: - description: APIGroup is the group of the resource. - type: string - kind: - description: Kind is the kind of the resource. - type: string - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - required: - - apiGroup - - kind - type: object - type: array - groups: - description: |- - Groups specifies the groups selected. - A group is matched if any of the group selectors matches, if not specified - any group is matched. Group selector is ANDed with all other selectors, so no resource in - a group not matching the group selector will be included in the backup. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - secrets: - description: Spaces specifies the spaces selected. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - type: object - match: - description: |- - Match is the selector for resources that should be included in the backup. - By default, we'll back up all Groups and for each Group: - - All ControlPlanes. - - All Secrets. - - All other Space API resources, e.g. SharedBackupConfigs, SharedUpboundPolicies, Backups, etc... - properties: - controlPlanes: - description: |- - ControlPlanes specifies the control planes selected. - A control plane is matched if any of the control plane selectors matches, if not specified - any control plane in the selected groups is matched. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - extras: - description: Extras specifies the extra resources selected. - items: - description: GenericSpaceBackupResourceSelector represents a - generic resource selector. - properties: - apiGroup: - description: APIGroup is the group of the resource. - type: string - kind: - description: Kind is the kind of the resource. - type: string - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - required: - - apiGroup - - kind - type: object - type: array - groups: - description: |- - Groups specifies the groups selected. - A group is matched if any of the group selectors matches, if not specified - any group is matched. Group selector is ANDed with all other selectors, so no resource in - a group not matching the group selector will be included in the backup. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - secrets: - description: Spaces specifies the spaces selected. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - type: object - ttl: - description: |- - TTL is the time to live for the backup. After this time, the backup - will be eligible for garbage collection. If not set, the backup will - not be garbage collected. - type: string - required: - - configRef - type: object - x-kubernetes-validations: - - message: spec.configRef can't be changed or set after creation - rule: '!has(self.configRef) && !has(oldSelf.configRef) || (self.configRef - == oldSelf.configRef) ' - - message: spec.match can't be changed or set after creation - rule: '!has(self.match) && !has(oldSelf.match) || (self.match == oldSelf.match) ' - - message: spec.exclude can't be changed or set after creation - rule: '!has(self.exclude) && !has(oldSelf.exclude) || (self.exclude - == oldSelf.exclude) ' - - message: spec.controlPlaneBackups can't be changed or set after creation - rule: '!has(self.controlPlaneBackups) && !has(oldSelf.controlPlaneBackups) - || (self.controlPlaneBackups == oldSelf.controlPlaneBackups) ' - status: - description: SpaceBackupStatus represents the observed state of a SpaceBackup. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - phase: - default: Pending - description: Phase is the current phase of the backup. - enum: - - Pending - - InProgress - - Failed - - Completed - - Deleted - type: string - retries: - description: Retries is the number of times the backup has been retried. - format: int32 - type: integer - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.10/admin.spaces.upbound.io_spacebackupschedules.yaml b/static/crds/space/v1.10/admin.spaces.upbound.io_spacebackupschedules.yaml deleted file mode 100644 index bf66b7856..000000000 --- a/static/crds/space/v1.10/admin.spaces.upbound.io_spacebackupschedules.yaml +++ /dev/null @@ -1,789 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: spacebackupschedules.admin.spaces.upbound.io -spec: - group: admin.spaces.upbound.io - names: - categories: - - spaces - kind: SpaceBackupSchedule - listKind: SpaceBackupScheduleList - plural: spacebackupschedules - singular: spacebackupschedule - scope: Cluster - versions: - - additionalPrinterColumns: - - jsonPath: .spec.schedule - name: Schedule - type: string - - jsonPath: .status.lastBackup - name: LastBackup - type: date - - jsonPath: .spec.ttl - name: TTL - type: string - - jsonPath: .spec.suspend - name: Suspended - type: boolean - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: SpaceBackupSchedule represents a schedule to backup a Space. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: SpaceBackupScheduleSpec defines a space backup schedule. - properties: - configRef: - description: |- - ConfigRef is a reference to the space backup configuration. - ApiGroup is optional and defaults to "spaces.upbound.io". - Kind is required, and the only supported value is "SpaceBackupConfig" at - the moment. - Name is required. - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. - type: string - kind: - description: Kind is the type of resource being referenced. - minLength: 1 - type: string - name: - description: Name is the name of resource being referenced. - minLength: 1 - type: string - type: object - x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: backup config ref must be a reference to a SpaceBackupConfig - rule: (!has(self.apiGroup) || self.apiGroup == 'admin.spaces.upbound.io') - && self.kind == 'SpaceBackupConfig' - - message: backup config ref must have a name - rule: size(self.name) > 0 - controlPlaneBackups: - description: ControlPlaneBackups is the definition of the control - plane backups, - properties: - excludedResources: - description: |- - ExcludedResources is a slice of resource names that are not - included in the backup. Used to filter the included extra resources. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - deletionPolicy: - default: Orphan - description: DeletionPolicy is the policy for the backup. - enum: - - Orphan - - Delete - type: string - exclude: - description: |- - Exclude is the selector for resources that should be excluded from the backup. - If both Match and Exclude are specified, the Exclude selector will be applied - after the Match selector. - By default, only SpaceBackups are excluded. - properties: - controlPlanes: - description: |- - ControlPlanes specifies the control planes selected. - A control plane is matched if any of the control plane selectors matches, if not specified - any control plane in the selected groups is matched. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - extras: - description: Extras specifies the extra resources selected. - items: - description: GenericSpaceBackupResourceSelector represents a - generic resource selector. - properties: - apiGroup: - description: APIGroup is the group of the resource. - type: string - kind: - description: Kind is the kind of the resource. - type: string - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - required: - - apiGroup - - kind - type: object - type: array - groups: - description: |- - Groups specifies the groups selected. - A group is matched if any of the group selectors matches, if not specified - any group is matched. Group selector is ANDed with all other selectors, so no resource in - a group not matching the group selector will be included in the backup. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - secrets: - description: Spaces specifies the spaces selected. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - type: object - match: - description: |- - Match is the selector for resources that should be included in the backup. - By default, we'll back up all Groups and for each Group: - - All ControlPlanes. - - All Secrets. - - All other Space API resources, e.g. SharedBackupConfigs, SharedUpboundPolicies, Backups, etc... - properties: - controlPlanes: - description: |- - ControlPlanes specifies the control planes selected. - A control plane is matched if any of the control plane selectors matches, if not specified - any control plane in the selected groups is matched. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - extras: - description: Extras specifies the extra resources selected. - items: - description: GenericSpaceBackupResourceSelector represents a - generic resource selector. - properties: - apiGroup: - description: APIGroup is the group of the resource. - type: string - kind: - description: Kind is the kind of the resource. - type: string - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - required: - - apiGroup - - kind - type: object - type: array - groups: - description: |- - Groups specifies the groups selected. - A group is matched if any of the group selectors matches, if not specified - any group is matched. Group selector is ANDed with all other selectors, so no resource in - a group not matching the group selector will be included in the backup. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - secrets: - description: Spaces specifies the spaces selected. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - type: object - schedule: - description: Schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - minLength: 1 - type: string - suspend: - description: |- - Suspend specifies whether the schedule is suspended. If true, no - SpaceBackups will be created, but running backups will be allowed to - complete. - type: boolean - ttl: - description: |- - TTL is the time to live for the backup. After this time, the backup - will be eligible for garbage collection. If not set, the backup will - not be garbage collected. - type: string - useOwnerReferencesInBackup: - description: |- - UseOwnerReferencesBackup specifies whether an ownership chain should be - established between this resource and the Backup it creates. - If set to true, the Backup will be garbage collected when this resource - is deleted. - type: boolean - required: - - configRef - - schedule - type: object - status: - description: SpaceBackupScheduleStatus represents the observed state of - a SpaceBackupSchedule. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - lastBackup: - description: |- - LastBackup is the last time a Backup was run for this - Schedule schedule - format: date-time - type: string - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.10/authorization.spaces.upbound.io_objectrolebindings.yaml b/static/crds/space/v1.10/authorization.spaces.upbound.io_objectrolebindings.yaml deleted file mode 100644 index dcca4418f..000000000 --- a/static/crds/space/v1.10/authorization.spaces.upbound.io_objectrolebindings.yaml +++ /dev/null @@ -1,153 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: objectrolebindings.authorization.spaces.upbound.io -spec: - group: authorization.spaces.upbound.io - names: - categories: - - iam - kind: ObjectRoleBinding - listKind: ObjectRoleBindingList - plural: objectrolebindings - singular: objectrolebinding - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: |- - A ObjectRoleBinding binds a namespaced API object to a set of subjects, at varying access levels. - For now, there can be at most one ObjectRoleBinding pointing to each API object. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: ObjectRoleBindingSpec is ObjectRoleBinding's spec. - properties: - object: - description: |- - Object references the object to which the listed subjects should have access at varying levels. - The object value is immutable after creation. - properties: - apiGroup: - description: |- - APIGroup defines the apiGroup of the object being pointed to. - With some minor differences, this is essentially matched as a DNS subdomain, like how Kubernetes validates it. - The Kubernetes legacy core group is denoted as "core". - maxLength: 64 - pattern: ^[a-z][a-z0-9-]{0,61}[a-z0-9](\.[a-z][a-z0-9-]{0,61}[a-z0-9])*$ - type: string - x-kubernetes-validations: - - message: apiGroup is immutable - rule: self == oldSelf - - message: apiGroup must be 'core' for now. This will change in - the future. - rule: self == 'core' - name: - description: |- - Name points to the .metadata.name of the object targeted. - Kubernetes validates this as a DNS 1123 subdomain. - maxLength: 253 - pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ - type: string - x-kubernetes-validations: - - message: name is immutable - rule: self == oldSelf - resource: - description: |- - Resource defines the resource type (often kind in plural, e.g. - controlplanes) being pointed to. - With some minor differences, this is essentially matched as a DNS label, like how Kubernetes validates it. - maxLength: 63 - pattern: ^[a-z][a-z0-9-]{1,61}[a-z0-9]$ - type: string - x-kubernetes-validations: - - message: resource is immutable - rule: self == oldSelf - - message: resource must be 'namespaces' for now. This will change - in the future. - rule: self == 'namespaces' - required: - - apiGroup - - name - - resource - type: object - subjects: - description: Subjects should be a map type with both kind+name as - a key - items: - description: |- - SubjectBinding contains a reference to the object or user identities a role - binding applies to. - properties: - kind: - description: |- - Kind of subject being referenced. Values defined by this API group are - for now only "UpboundTeam". - enum: - - UpboundTeam - type: string - x-kubernetes-validations: - - message: kind must be 'UpboundTeam' for now. This will change - in the future. - rule: self == 'UpboundTeam' - name: - description: |- - Name (identifier) of the subject (of the specified kind) being referenced. - The identifier must be 2-100 chars, [a-zA-Z0-9-], no repeating dashes, can't start/end with a dash. - Notably, a UUID fits that format. - maxLength: 100 - pattern: ^([a-zA-Z0-9]+-?)+[a-zA-Z0-9]$ - type: string - role: - description: |- - Role this subject has on the associated Object. - The list of valid roles is defined for each target API resource separately. - For namespaces, valid values are "viewer", "editor", and "admin". - The format of this is essentially a RFC 1035 label with underscores instead of dashes, minimum three characters long. - maxLength: 63 - pattern: ^[a-z][a-z0-9_]{1,62}[a-z0-9]$ - type: string - required: - - kind - - name - - role - type: object - type: array - x-kubernetes-list-map-keys: - - kind - - name - x-kubernetes-list-type: map - required: - - object - - subjects - type: object - status: - description: ObjectRoleBindingStatus is RoleBindings' status. - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.10/embed.go b/static/crds/space/v1.10/embed.go deleted file mode 100644 index d6ea58626..000000000 --- a/static/crds/space/v1.10/embed.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2024 Upbound Inc. -// All rights reserved - -package crds - -import ( - "embed" -) - -//go:embed *.yaml -var Manifests embed.FS diff --git a/static/crds/space/v1.10/observability.spaces.upbound.io_sharedtelemetryconfigs.yaml b/static/crds/space/v1.10/observability.spaces.upbound.io_sharedtelemetryconfigs.yaml deleted file mode 100644 index 4fb97688b..000000000 --- a/static/crds/space/v1.10/observability.spaces.upbound.io_sharedtelemetryconfigs.yaml +++ /dev/null @@ -1,360 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: sharedtelemetryconfigs.observability.spaces.upbound.io -spec: - group: observability.spaces.upbound.io - names: - categories: - - observability - kind: SharedTelemetryConfig - listKind: SharedTelemetryConfigList - plural: sharedtelemetryconfigs - shortNames: - - stc - singular: sharedtelemetryconfig - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.annotations.sharedtelemetryconfig\.internal\.spaces\.upbound\.io/selected - name: Selected - type: string - - jsonPath: .metadata.annotations.sharedtelemetryconfig\.internal\.spaces\.upbound\.io/failed - name: Failed - type: string - - jsonPath: .metadata.annotations.sharedtelemetryconfig\.internal\.spaces\.upbound\.io/provisioned - name: Provisioned - type: string - - jsonPath: .status.conditions[?(@.type=='Validated')].status - name: Validated - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: SharedTelemetryConfig defines a telemetry configuration over - a set of ControlPlanes. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: SharedTelemetryConfigSpec defines a telemetry configuration - over a set of ControlPlanes. - properties: - configPatchSecretRefs: - description: |- - ConfigPatchSecretRefs allows defining patches sourced from secrets to be - applied to the telemetry configuration. - items: - description: |- - ConfigPatchSecretRef defines a config patch sourced from a secret to be - applied to the telemetry configuration. - properties: - key: - description: Key in the secret from which to source the patch. - type: string - name: - description: Name of the secret. - type: string - path: - description: |- - Path to the field in the telemetry configuration to patch. - Currently, we only support patching exporters, so the path - needs to start with "exporters". - type: string - x-kubernetes-validations: - - message: Only 'exporters' patching is supported, path must - start with 'exporters.' - rule: self.startsWith('exporters.') - required: - - key - - name - - path - type: object - type: array - controlPlaneSelector: - description: |- - ControlPlaneSelector defines the selector for ControlPlanes on which to - configure telemetry. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: either names or a labelSelector must be specified - rule: (has(self.labelSelectors) || has(self.names)) && (size(self.labelSelectors) - > 0 || size(self.names) > 0) - exportPipeline: - description: |- - ExportPipeline defines the telemetry exporter pipeline to configure on - the selected ControlPlanes. - properties: - logs: - description: |- - Logs defines the logs exporter pipeline to configure on the - selected ControlPlanes. The value has to be present in the - spec.exporters field. - items: - type: string - maxItems: 10 - type: array - metrics: - description: |- - Metrics defines the metrics exporter pipeline to configure on the - selected ControlPlanes. The value has to be present in the - spec.exporters field. - items: - type: string - maxItems: 10 - type: array - traces: - description: |- - Traces defines the traces exporter pipeline to configure on the - selected ControlPlanes. The value has to be present in the - spec.exporters field. - items: - type: string - maxItems: 10 - type: array - type: object - exporters: - description: |- - Exporters defines the exporters to configure on the selected ControlPlanes. - Untyped as we use the underlying OpenTelemetryOperator to configure the - OpenTelemetry collector's exporters. Use the OpenTelemetry Collector - documentation to configure the exporters. - Currently only supported exporters are push based exporters. - type: object - x-kubernetes-preserve-unknown-fields: true - required: - - controlPlaneSelector - - exportPipeline - - exporters - type: object - status: - description: SharedTelemetryConfigStatus represents the observed state - of a SharedTelemetryConfig. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - failed: - description: list of provisioning failures. - items: - description: SharedTelemetryConfigProvisioningFailure defines configuration - provisioning failure. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition - from one status to another. - type: string - status: - description: Status of this condition; is it currently - True, False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - controlPlane: - description: ControlPlane name where the failure occurred. - type: string - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - required: - - controlPlane - type: object - type: array - x-kubernetes-list-map-keys: - - controlPlane - x-kubernetes-list-type: map - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - provisioned: - description: List of successfully provisioned targets. - items: - type: string - type: array - x-kubernetes-list-type: set - selectedControlPlanes: - description: SelectedControlPlanes represents the names of the selected - ControlPlanes. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.10/policy.spaces.upbound.io_sharedupboundpolicies.yaml b/static/crds/space/v1.10/policy.spaces.upbound.io_sharedupboundpolicies.yaml deleted file mode 100644 index 30f732e75..000000000 --- a/static/crds/space/v1.10/policy.spaces.upbound.io_sharedupboundpolicies.yaml +++ /dev/null @@ -1,4303 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: sharedupboundpolicies.policy.spaces.upbound.io -spec: - group: policy.spaces.upbound.io - names: - categories: - - policies - kind: SharedUpboundPolicy - listKind: SharedUpboundPolicyList - plural: sharedupboundpolicies - shortNames: - - sup - singular: sharedupboundpolicy - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.annotations.sharedupboundpolicies\.internal\.spaces\.upbound\.io/provisioned-total - name: Provisioned - type: string - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: |- - SharedUpboundPolicy specifies a shared Kyverno policy projected into the specified - ControlPlanes of the same namespace as SharedUpboundPolicy. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: SharedUpboundPolicySpec defines the desired state of SharedUpboundPolicy. - properties: - admission: - default: true - description: |- - Admission controls if rules are applied during admission. - Optional. Default value is "true". - type: boolean - applyRules: - description: |- - ApplyRules controls how rules in a policy are applied. Rule are processed in - the order of declaration. When set to `One` processing stops after a rule has - been applied i.e. the rule matches and results in a pass, fail, or error. When - set to `All` all rules in the policy are processed. The default is `All`. - enum: - - All - - One - type: string - background: - default: true - description: |- - Background controls if rules are applied to existing resources during a background scan. - Optional. Default value is "true". The value must be set to "false" if the policy rule - uses variables that are only available in the admission review request (e.g. user name). - type: boolean - controlPlaneSelector: - description: |- - The policy is projected only to control planes - matching the provided selector. Either names or a labelSelector must be specified. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: either names or a labelSelector must be specified - rule: (has(self.labelSelectors) || has(self.names)) && (size(self.labelSelectors) - > 0 || size(self.names) > 0) - failurePolicy: - description: |- - FailurePolicy defines how unexpected policy errors and webhook response timeout errors are handled. - Rules within the same policy share the same failure behavior. - This field should not be accessed directly, instead `GetFailurePolicy()` should be used. - Allowed values are Ignore or Fail. Defaults to Fail. - enum: - - Ignore - - Fail - type: string - generateExisting: - description: |- - GenerateExisting controls whether to trigger generate rule in existing resources - If is set to "true" generate rule will be triggered and applied to existing matched resources. - Defaults to "false" if not specified. - type: boolean - generateExistingOnPolicyUpdate: - description: Deprecated, use generateExisting instead - type: boolean - mutateExistingOnPolicyUpdate: - description: |- - MutateExistingOnPolicyUpdate controls if a mutateExisting policy is applied on policy events. - Default value is "false". - type: boolean - policyMetadata: - description: The metadata of the policy to be created. - properties: - annotations: - additionalProperties: - type: string - description: Annotations that are set on projected resource. - type: object - labels: - additionalProperties: - type: string - description: Labels that are set on projected resource. - type: object - type: object - policyName: - description: |- - PolicyName is the name to use when creating policy within a control plane. - optional, if not set, SharedUpboundPolicy name will be used. - When set, it is immutable. - maxLength: 253 - minLength: 1 - type: string - x-kubernetes-validations: - - message: policyName is immutable - rule: self == oldSelf - rules: - description: |- - Rules is a list of Rule instances. A Policy contains multiple rules and - each rule can validate, mutate, or generate resources. - items: - description: |- - Rule defines a validation, mutation, or generation control for matching resources. - Each rules contains a match declaration to select resources, and an optional exclude - declaration to specify which resources to exclude. - properties: - celPreconditions: - description: |- - CELPreconditions are used to determine if a policy rule should be applied by evaluating a - set of CEL conditions. It can only be used with the validate.cel subrule - items: - description: MatchCondition represents a condition which must - by fulfilled for a request to be sent to a webhook. - properties: - expression: - description: |- - Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. - CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables: - - 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. - See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the - request resource. - Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ - - Required. - type: string - name: - description: |- - Name is an identifier for this match condition, used for strategic merging of MatchConditions, - as well as providing an identifier for logging purposes. A good name should be descriptive of - the associated expression. - Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and - must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or - '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an - optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName') - - Required. - type: string - required: - - expression - - name - type: object - type: array - context: - description: Context defines variables and data sources that - can be used during rule execution. - items: - description: |- - ContextEntry adds variables and data sources to a rule Context. Either a - ConfigMap reference or a APILookup must be provided. - properties: - apiCall: - description: |- - APICall is an HTTP request to the Kubernetes API server, or other JSON web service. - The data returned is stored in the context with the name for the context entry. - properties: - data: - description: Data specifies the POST data sent to - the server. - items: - description: RequestData contains the HTTP POST - data - properties: - key: - description: Key is a unique identifier for - the data value - type: string - value: - description: Value is the data value - x-kubernetes-preserve-unknown-fields: true - required: - - key - - value - type: object - type: array - jmesPath: - description: |- - JMESPath is an optional JSON Match Expression that can be used to - transform the JSON response returned from the server. For example - a JMESPath of "items | length(@)" applied to the API server response - for the URLPath "/apis/apps/v1/deployments" will return the total count - of deployments across all namespaces. - type: string - method: - default: GET - description: Method is the HTTP request type (GET - or POST). - enum: - - GET - - POST - type: string - service: - description: Service is an API call to a JSON web - service - properties: - caBundle: - description: |- - CABundle is a PEM encoded CA bundle which will be used to validate - the server certificate. - type: string - url: - description: |- - URL is the JSON web service URL. A typical form is - `https://{service}.{namespace}:{port}/{path}`. - type: string - required: - - url - type: object - urlPath: - description: |- - URLPath is the URL path to be used in the HTTP GET or POST request to the - Kubernetes API server (e.g. "/api/v1/namespaces" or "/apis/apps/v1/deployments"). - The format required is the same format used by the `kubectl get --raw` command. - See https://kyverno.io/docs/writing-policies/external-data-sources/#variables-from-kubernetes-api-server-calls - for details. - type: string - type: object - configMap: - description: ConfigMap is the ConfigMap reference. - properties: - name: - description: Name is the ConfigMap name. - type: string - namespace: - description: Namespace is the ConfigMap namespace. - type: string - required: - - name - type: object - imageRegistry: - description: |- - ImageRegistry defines requests to an OCI/Docker V2 registry to fetch image - details. - properties: - imageRegistryCredentials: - description: ImageRegistryCredentials provides credentials - that will be used for authentication with registry - properties: - allowInsecureRegistry: - description: AllowInsecureRegistry allows insecure - access to a registry. - type: boolean - providers: - description: |- - Providers specifies a list of OCI Registry names, whose authentication providers are provided. - It can be of one of these values: default,google,azure,amazon,github. - items: - description: ImageRegistryCredentialsProvidersType - provides the list of credential providers - required. - enum: - - default - - amazon - - azure - - google - - github - type: string - type: array - secrets: - description: |- - Secrets specifies a list of secrets that are provided for credentials. - Secrets must live in the Kyverno namespace. - items: - type: string - type: array - type: object - jmesPath: - description: |- - JMESPath is an optional JSON Match Expression that can be used to - transform the ImageData struct returned as a result of processing - the image reference. - type: string - reference: - description: |- - Reference is image reference to a container image in the registry. - Example: ghcr.io/kyverno/kyverno:latest - type: string - required: - - reference - type: object - name: - description: Name is the variable name. - type: string - variable: - description: Variable defines an arbitrary JMESPath context - variable that can be defined inline. - properties: - default: - description: |- - Default is an optional arbitrary JSON object that the variable may take if the JMESPath - expression evaluates to nil - x-kubernetes-preserve-unknown-fields: true - jmesPath: - description: |- - JMESPath is an optional JMESPath Expression that can be used to - transform the variable. - type: string - value: - description: Value is any arbitrary JSON object representable - in YAML or JSON form. - x-kubernetes-preserve-unknown-fields: true - type: object - type: object - type: array - exclude: - description: |- - ExcludeResources defines when this policy rule should not be applied. The exclude - criteria can include resource information (e.g. kind, name, namespace, labels) - and admission review request information like the name or role. - properties: - all: - description: All allows specifying resources which will - be ANDed - items: - description: ResourceFilter allow users to "AND" or "OR" - between resources - properties: - clusterRoles: - description: ClusterRoles is the list of cluster-wide - role names for the user. - items: - type: string - type: array - resources: - description: ResourceDescription contains information - about the resource being created or modified. - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations is a map of annotations (key-value pairs of type string). Annotation keys - and values support the wildcard characters "*" (matches zero or many characters) and - "?" (matches at least one character). - type: object - kinds: - description: Kinds is a list of resource kinds. - items: - type: string - type: array - name: - description: |- - Name is the name of the resource. The name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - NOTE: "Name" is being deprecated in favor of "Names". - type: string - names: - description: |- - Names are the names of the resources. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - namespaceSelector: - description: |- - NamespaceSelector is a label selector for the resource namespace. Label keys and values - in `matchLabels` support the wildcard characters `*` (matches zero or many characters) - and `?` (matches one character).Wildcards allows writing label selectors like - ["storage.k8s.io/*": "*"]. Note that using ["*" : "*"] matches any key and value but - does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - Namespaces is a list of namespaces names. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - operations: - description: Operations can contain values ["CREATE, - "UPDATE", "CONNECT", "DELETE"], which are used - to match a specific action. - items: - description: AdmissionOperation can have one - of the values CREATE, UPDATE, CONNECT, DELETE, - which are used to match a specific action. - enum: - - CREATE - - CONNECT - - UPDATE - - DELETE - type: string - type: array - selector: - description: |- - Selector is a label selector. Label keys and values in `matchLabels` support the wildcard - characters `*` (matches zero or many characters) and `?` (matches one character). - Wildcards allows writing label selectors like ["storage.k8s.io/*": "*"]. Note that - using ["*" : "*"] matches any key and value but does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: object - roles: - description: Roles is the list of namespaced role - names for the user. - items: - type: string - type: array - subjects: - description: Subjects is the list of subject names - like users, user groups, and service accounts. - items: - description: |- - Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, - or a value for non-objects such as user and group names. - properties: - apiGroup: - description: |- - APIGroup holds the API group of the referenced subject. - Defaults to "" for ServiceAccount subjects. - Defaults to "rbac.authorization.k8s.io" for User and Group subjects. - type: string - kind: - description: |- - Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". - If the Authorizer does not recognized the kind value, the Authorizer should report an error. - type: string - name: - description: Name of the object being referenced. - type: string - namespace: - description: |- - Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty - the Authorizer should report an error. - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - type: array - type: object - type: array - any: - description: Any allows specifying resources which will - be ORed - items: - description: ResourceFilter allow users to "AND" or "OR" - between resources - properties: - clusterRoles: - description: ClusterRoles is the list of cluster-wide - role names for the user. - items: - type: string - type: array - resources: - description: ResourceDescription contains information - about the resource being created or modified. - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations is a map of annotations (key-value pairs of type string). Annotation keys - and values support the wildcard characters "*" (matches zero or many characters) and - "?" (matches at least one character). - type: object - kinds: - description: Kinds is a list of resource kinds. - items: - type: string - type: array - name: - description: |- - Name is the name of the resource. The name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - NOTE: "Name" is being deprecated in favor of "Names". - type: string - names: - description: |- - Names are the names of the resources. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - namespaceSelector: - description: |- - NamespaceSelector is a label selector for the resource namespace. Label keys and values - in `matchLabels` support the wildcard characters `*` (matches zero or many characters) - and `?` (matches one character).Wildcards allows writing label selectors like - ["storage.k8s.io/*": "*"]. Note that using ["*" : "*"] matches any key and value but - does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - Namespaces is a list of namespaces names. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - operations: - description: Operations can contain values ["CREATE, - "UPDATE", "CONNECT", "DELETE"], which are used - to match a specific action. - items: - description: AdmissionOperation can have one - of the values CREATE, UPDATE, CONNECT, DELETE, - which are used to match a specific action. - enum: - - CREATE - - CONNECT - - UPDATE - - DELETE - type: string - type: array - selector: - description: |- - Selector is a label selector. Label keys and values in `matchLabels` support the wildcard - characters `*` (matches zero or many characters) and `?` (matches one character). - Wildcards allows writing label selectors like ["storage.k8s.io/*": "*"]. Note that - using ["*" : "*"] matches any key and value but does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: object - roles: - description: Roles is the list of namespaced role - names for the user. - items: - type: string - type: array - subjects: - description: Subjects is the list of subject names - like users, user groups, and service accounts. - items: - description: |- - Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, - or a value for non-objects such as user and group names. - properties: - apiGroup: - description: |- - APIGroup holds the API group of the referenced subject. - Defaults to "" for ServiceAccount subjects. - Defaults to "rbac.authorization.k8s.io" for User and Group subjects. - type: string - kind: - description: |- - Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". - If the Authorizer does not recognized the kind value, the Authorizer should report an error. - type: string - name: - description: Name of the object being referenced. - type: string - namespace: - description: |- - Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty - the Authorizer should report an error. - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - type: array - type: object - type: array - clusterRoles: - description: ClusterRoles is the list of cluster-wide role - names for the user. - items: - type: string - type: array - resources: - description: |- - ResourceDescription contains information about the resource being created or modified. - Requires at least one tag to be specified when under MatchResources. - Specifying ResourceDescription directly under match is being deprecated. - Please specify under "any" or "all" instead. - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations is a map of annotations (key-value pairs of type string). Annotation keys - and values support the wildcard characters "*" (matches zero or many characters) and - "?" (matches at least one character). - type: object - kinds: - description: Kinds is a list of resource kinds. - items: - type: string - type: array - name: - description: |- - Name is the name of the resource. The name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - NOTE: "Name" is being deprecated in favor of "Names". - type: string - names: - description: |- - Names are the names of the resources. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - namespaceSelector: - description: |- - NamespaceSelector is a label selector for the resource namespace. Label keys and values - in `matchLabels` support the wildcard characters `*` (matches zero or many characters) - and `?` (matches one character).Wildcards allows writing label selectors like - ["storage.k8s.io/*": "*"]. Note that using ["*" : "*"] matches any key and value but - does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - Namespaces is a list of namespaces names. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - operations: - description: Operations can contain values ["CREATE, - "UPDATE", "CONNECT", "DELETE"], which are used to - match a specific action. - items: - description: AdmissionOperation can have one of the - values CREATE, UPDATE, CONNECT, DELETE, which are - used to match a specific action. - enum: - - CREATE - - CONNECT - - UPDATE - - DELETE - type: string - type: array - selector: - description: |- - Selector is a label selector. Label keys and values in `matchLabels` support the wildcard - characters `*` (matches zero or many characters) and `?` (matches one character). - Wildcards allows writing label selectors like ["storage.k8s.io/*": "*"]. Note that - using ["*" : "*"] matches any key and value but does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: object - roles: - description: Roles is the list of namespaced role names - for the user. - items: - type: string - type: array - subjects: - description: Subjects is the list of subject names like - users, user groups, and service accounts. - items: - description: |- - Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, - or a value for non-objects such as user and group names. - properties: - apiGroup: - description: |- - APIGroup holds the API group of the referenced subject. - Defaults to "" for ServiceAccount subjects. - Defaults to "rbac.authorization.k8s.io" for User and Group subjects. - type: string - kind: - description: |- - Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". - If the Authorizer does not recognized the kind value, the Authorizer should report an error. - type: string - name: - description: Name of the object being referenced. - type: string - namespace: - description: |- - Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty - the Authorizer should report an error. - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - type: array - type: object - generate: - description: Generation is used to create new resources. - properties: - apiVersion: - description: APIVersion specifies resource apiVersion. - type: string - clone: - description: |- - Clone specifies the source resource used to populate each generated resource. - At most one of Data or Clone can be specified. If neither are provided, the generated - resource will be created with default data only. - properties: - name: - description: Name specifies name of the resource. - type: string - namespace: - description: Namespace specifies source resource namespace. - type: string - type: object - cloneList: - description: CloneList specifies the list of source resource - used to populate each generated resource. - properties: - kinds: - description: Kinds is a list of resource kinds. - items: - type: string - type: array - namespace: - description: Namespace specifies source resource namespace. - type: string - selector: - description: |- - Selector is a label selector. Label keys and values in `matchLabels`. - wildcard characters are not supported. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: object - data: - description: |- - Data provides the resource declaration used to populate each generated resource. - At most one of Data or Clone must be specified. If neither are provided, the generated - resource will be created with default data only. - x-kubernetes-preserve-unknown-fields: true - kind: - description: Kind specifies resource kind. - type: string - name: - description: Name specifies the resource name. - type: string - namespace: - description: Namespace specifies resource namespace. - type: string - synchronize: - description: |- - Synchronize controls if generated resources should be kept in-sync with their source resource. - If Synchronize is set to "true" changes to generated resources will be overwritten with resource - data from Data or the resource specified in the Clone declaration. - Optional. Defaults to "false" if not specified. - type: boolean - uid: - description: UID specifies the resource uid. - type: string - type: object - imageExtractors: - additionalProperties: - items: - properties: - jmesPath: - description: |- - JMESPath is an optional JMESPath expression to apply to the image value. - This is useful when the extracted image begins with a prefix like 'docker://'. - The 'trim_prefix' function may be used to trim the prefix: trim_prefix(@, 'docker://'). - Note - Image digest mutation may not be used when applying a JMESPAth to an image. - type: string - key: - description: |- - Key is an optional name of the field within 'path' that will be used to uniquely identify an image. - Note - this field MUST be unique. - type: string - name: - description: |- - Name is the entry the image will be available under 'images.' in the context. - If this field is not defined, image entries will appear under 'images.custom'. - type: string - path: - description: |- - Path is the path to the object containing the image field in a custom resource. - It should be slash-separated. Each slash-separated key must be a valid YAML key or a wildcard '*'. - Wildcard keys are expanded in case of arrays or objects. - type: string - value: - description: |- - Value is an optional name of the field within 'path' that points to the image URI. - This is useful when a custom 'key' is also defined. - type: string - required: - - path - type: object - type: array - description: |- - ImageExtractors defines a mapping from kinds to ImageExtractorConfigs. - This config is only valid for verifyImages rules. - type: object - match: - description: |- - MatchResources defines when this policy rule should be applied. The match - criteria can include resource information (e.g. kind, name, namespace, labels) - and admission review request information like the user name or role. - At least one kind is required. - properties: - all: - description: All allows specifying resources which will - be ANDed - items: - description: ResourceFilter allow users to "AND" or "OR" - between resources - properties: - clusterRoles: - description: ClusterRoles is the list of cluster-wide - role names for the user. - items: - type: string - type: array - resources: - description: ResourceDescription contains information - about the resource being created or modified. - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations is a map of annotations (key-value pairs of type string). Annotation keys - and values support the wildcard characters "*" (matches zero or many characters) and - "?" (matches at least one character). - type: object - kinds: - description: Kinds is a list of resource kinds. - items: - type: string - type: array - name: - description: |- - Name is the name of the resource. The name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - NOTE: "Name" is being deprecated in favor of "Names". - type: string - names: - description: |- - Names are the names of the resources. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - namespaceSelector: - description: |- - NamespaceSelector is a label selector for the resource namespace. Label keys and values - in `matchLabels` support the wildcard characters `*` (matches zero or many characters) - and `?` (matches one character).Wildcards allows writing label selectors like - ["storage.k8s.io/*": "*"]. Note that using ["*" : "*"] matches any key and value but - does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - Namespaces is a list of namespaces names. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - operations: - description: Operations can contain values ["CREATE, - "UPDATE", "CONNECT", "DELETE"], which are used - to match a specific action. - items: - description: AdmissionOperation can have one - of the values CREATE, UPDATE, CONNECT, DELETE, - which are used to match a specific action. - enum: - - CREATE - - CONNECT - - UPDATE - - DELETE - type: string - type: array - selector: - description: |- - Selector is a label selector. Label keys and values in `matchLabels` support the wildcard - characters `*` (matches zero or many characters) and `?` (matches one character). - Wildcards allows writing label selectors like ["storage.k8s.io/*": "*"]. Note that - using ["*" : "*"] matches any key and value but does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: object - roles: - description: Roles is the list of namespaced role - names for the user. - items: - type: string - type: array - subjects: - description: Subjects is the list of subject names - like users, user groups, and service accounts. - items: - description: |- - Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, - or a value for non-objects such as user and group names. - properties: - apiGroup: - description: |- - APIGroup holds the API group of the referenced subject. - Defaults to "" for ServiceAccount subjects. - Defaults to "rbac.authorization.k8s.io" for User and Group subjects. - type: string - kind: - description: |- - Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". - If the Authorizer does not recognized the kind value, the Authorizer should report an error. - type: string - name: - description: Name of the object being referenced. - type: string - namespace: - description: |- - Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty - the Authorizer should report an error. - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - type: array - type: object - type: array - any: - description: Any allows specifying resources which will - be ORed - items: - description: ResourceFilter allow users to "AND" or "OR" - between resources - properties: - clusterRoles: - description: ClusterRoles is the list of cluster-wide - role names for the user. - items: - type: string - type: array - resources: - description: ResourceDescription contains information - about the resource being created or modified. - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations is a map of annotations (key-value pairs of type string). Annotation keys - and values support the wildcard characters "*" (matches zero or many characters) and - "?" (matches at least one character). - type: object - kinds: - description: Kinds is a list of resource kinds. - items: - type: string - type: array - name: - description: |- - Name is the name of the resource. The name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - NOTE: "Name" is being deprecated in favor of "Names". - type: string - names: - description: |- - Names are the names of the resources. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - namespaceSelector: - description: |- - NamespaceSelector is a label selector for the resource namespace. Label keys and values - in `matchLabels` support the wildcard characters `*` (matches zero or many characters) - and `?` (matches one character).Wildcards allows writing label selectors like - ["storage.k8s.io/*": "*"]. Note that using ["*" : "*"] matches any key and value but - does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - Namespaces is a list of namespaces names. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - operations: - description: Operations can contain values ["CREATE, - "UPDATE", "CONNECT", "DELETE"], which are used - to match a specific action. - items: - description: AdmissionOperation can have one - of the values CREATE, UPDATE, CONNECT, DELETE, - which are used to match a specific action. - enum: - - CREATE - - CONNECT - - UPDATE - - DELETE - type: string - type: array - selector: - description: |- - Selector is a label selector. Label keys and values in `matchLabels` support the wildcard - characters `*` (matches zero or many characters) and `?` (matches one character). - Wildcards allows writing label selectors like ["storage.k8s.io/*": "*"]. Note that - using ["*" : "*"] matches any key and value but does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: object - roles: - description: Roles is the list of namespaced role - names for the user. - items: - type: string - type: array - subjects: - description: Subjects is the list of subject names - like users, user groups, and service accounts. - items: - description: |- - Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, - or a value for non-objects such as user and group names. - properties: - apiGroup: - description: |- - APIGroup holds the API group of the referenced subject. - Defaults to "" for ServiceAccount subjects. - Defaults to "rbac.authorization.k8s.io" for User and Group subjects. - type: string - kind: - description: |- - Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". - If the Authorizer does not recognized the kind value, the Authorizer should report an error. - type: string - name: - description: Name of the object being referenced. - type: string - namespace: - description: |- - Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty - the Authorizer should report an error. - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - type: array - type: object - type: array - clusterRoles: - description: ClusterRoles is the list of cluster-wide role - names for the user. - items: - type: string - type: array - resources: - description: |- - ResourceDescription contains information about the resource being created or modified. - Requires at least one tag to be specified when under MatchResources. - Specifying ResourceDescription directly under match is being deprecated. - Please specify under "any" or "all" instead. - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations is a map of annotations (key-value pairs of type string). Annotation keys - and values support the wildcard characters "*" (matches zero or many characters) and - "?" (matches at least one character). - type: object - kinds: - description: Kinds is a list of resource kinds. - items: - type: string - type: array - name: - description: |- - Name is the name of the resource. The name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - NOTE: "Name" is being deprecated in favor of "Names". - type: string - names: - description: |- - Names are the names of the resources. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - namespaceSelector: - description: |- - NamespaceSelector is a label selector for the resource namespace. Label keys and values - in `matchLabels` support the wildcard characters `*` (matches zero or many characters) - and `?` (matches one character).Wildcards allows writing label selectors like - ["storage.k8s.io/*": "*"]. Note that using ["*" : "*"] matches any key and value but - does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - Namespaces is a list of namespaces names. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - operations: - description: Operations can contain values ["CREATE, - "UPDATE", "CONNECT", "DELETE"], which are used to - match a specific action. - items: - description: AdmissionOperation can have one of the - values CREATE, UPDATE, CONNECT, DELETE, which are - used to match a specific action. - enum: - - CREATE - - CONNECT - - UPDATE - - DELETE - type: string - type: array - selector: - description: |- - Selector is a label selector. Label keys and values in `matchLabels` support the wildcard - characters `*` (matches zero or many characters) and `?` (matches one character). - Wildcards allows writing label selectors like ["storage.k8s.io/*": "*"]. Note that - using ["*" : "*"] matches any key and value but does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: object - roles: - description: Roles is the list of namespaced role names - for the user. - items: - type: string - type: array - subjects: - description: Subjects is the list of subject names like - users, user groups, and service accounts. - items: - description: |- - Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, - or a value for non-objects such as user and group names. - properties: - apiGroup: - description: |- - APIGroup holds the API group of the referenced subject. - Defaults to "" for ServiceAccount subjects. - Defaults to "rbac.authorization.k8s.io" for User and Group subjects. - type: string - kind: - description: |- - Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". - If the Authorizer does not recognized the kind value, the Authorizer should report an error. - type: string - name: - description: Name of the object being referenced. - type: string - namespace: - description: |- - Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty - the Authorizer should report an error. - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - type: array - type: object - mutate: - description: Mutation is used to modify matching resources. - properties: - foreach: - description: ForEach applies mutation rules to a list of - sub-elements by creating a context for each entry in the - list and looping over it to apply the specified logic. - items: - description: ForEachMutation applies mutation rules to - a list of sub-elements by creating a context for each - entry in the list and looping over it to apply the specified - logic. - properties: - context: - description: Context defines variables and data sources - that can be used during rule execution. - items: - description: |- - ContextEntry adds variables and data sources to a rule Context. Either a - ConfigMap reference or a APILookup must be provided. - properties: - apiCall: - description: |- - APICall is an HTTP request to the Kubernetes API server, or other JSON web service. - The data returned is stored in the context with the name for the context entry. - properties: - data: - description: Data specifies the POST data - sent to the server. - items: - description: RequestData contains the - HTTP POST data - properties: - key: - description: Key is a unique identifier - for the data value - type: string - value: - description: Value is the data value - x-kubernetes-preserve-unknown-fields: true - required: - - key - - value - type: object - type: array - jmesPath: - description: |- - JMESPath is an optional JSON Match Expression that can be used to - transform the JSON response returned from the server. For example - a JMESPath of "items | length(@)" applied to the API server response - for the URLPath "/apis/apps/v1/deployments" will return the total count - of deployments across all namespaces. - type: string - method: - default: GET - description: Method is the HTTP request - type (GET or POST). - enum: - - GET - - POST - type: string - service: - description: Service is an API call to a - JSON web service - properties: - caBundle: - description: |- - CABundle is a PEM encoded CA bundle which will be used to validate - the server certificate. - type: string - url: - description: |- - URL is the JSON web service URL. A typical form is - `https://{service}.{namespace}:{port}/{path}`. - type: string - required: - - url - type: object - urlPath: - description: |- - URLPath is the URL path to be used in the HTTP GET or POST request to the - Kubernetes API server (e.g. "/api/v1/namespaces" or "/apis/apps/v1/deployments"). - The format required is the same format used by the `kubectl get --raw` command. - See https://kyverno.io/docs/writing-policies/external-data-sources/#variables-from-kubernetes-api-server-calls - for details. - type: string - type: object - configMap: - description: ConfigMap is the ConfigMap reference. - properties: - name: - description: Name is the ConfigMap name. - type: string - namespace: - description: Namespace is the ConfigMap - namespace. - type: string - required: - - name - type: object - imageRegistry: - description: |- - ImageRegistry defines requests to an OCI/Docker V2 registry to fetch image - details. - properties: - imageRegistryCredentials: - description: ImageRegistryCredentials provides - credentials that will be used for authentication - with registry - properties: - allowInsecureRegistry: - description: AllowInsecureRegistry allows - insecure access to a registry. - type: boolean - providers: - description: |- - Providers specifies a list of OCI Registry names, whose authentication providers are provided. - It can be of one of these values: default,google,azure,amazon,github. - items: - description: ImageRegistryCredentialsProvidersType - provides the list of credential - providers required. - enum: - - default - - amazon - - azure - - google - - github - type: string - type: array - secrets: - description: |- - Secrets specifies a list of secrets that are provided for credentials. - Secrets must live in the Kyverno namespace. - items: - type: string - type: array - type: object - jmesPath: - description: |- - JMESPath is an optional JSON Match Expression that can be used to - transform the ImageData struct returned as a result of processing - the image reference. - type: string - reference: - description: |- - Reference is image reference to a container image in the registry. - Example: ghcr.io/kyverno/kyverno:latest - type: string - required: - - reference - type: object - name: - description: Name is the variable name. - type: string - variable: - description: Variable defines an arbitrary JMESPath - context variable that can be defined inline. - properties: - default: - description: |- - Default is an optional arbitrary JSON object that the variable may take if the JMESPath - expression evaluates to nil - x-kubernetes-preserve-unknown-fields: true - jmesPath: - description: |- - JMESPath is an optional JMESPath Expression that can be used to - transform the variable. - type: string - value: - description: Value is any arbitrary JSON - object representable in YAML or JSON form. - x-kubernetes-preserve-unknown-fields: true - type: object - type: object - type: array - foreach: - description: Foreach declares a nested foreach iterator - x-kubernetes-preserve-unknown-fields: true - list: - description: |- - List specifies a JMESPath expression that results in one or more elements - to which the validation logic is applied. - type: string - order: - description: |- - Order defines the iteration order on the list. - Can be Ascending to iterate from first to last element or Descending to iterate in from last to first element. - enum: - - Ascending - - Descending - type: string - patchStrategicMerge: - description: |- - PatchStrategicMerge is a strategic merge patch used to modify resources. - See https://kubernetes.io/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/ - and https://kubectl.docs.kubernetes.io/references/kustomize/patchesstrategicmerge/. - x-kubernetes-preserve-unknown-fields: true - patchesJson6902: - description: |- - PatchesJSON6902 is a list of RFC 6902 JSON Patch declarations used to modify resources. - See https://tools.ietf.org/html/rfc6902 and https://kubectl.docs.kubernetes.io/references/kustomize/patchesjson6902/. - type: string - preconditions: - description: |- - AnyAllConditions are used to determine if a policy rule should be applied by evaluating a - set of conditions. The declaration can contain nested `any` or `all` statements. - See: https://kyverno.io/docs/writing-policies/preconditions/ - properties: - all: - description: |- - AllConditions enable variable-based conditional rule execution. This is useful for - finer control of when an rule is applied. A condition can reference object data - using JMESPath notation. - Here, all of the conditions need to pass - items: - description: Condition defines variable-based - conditional criteria for rule execution. - properties: - key: - description: Key is the context entry (using - JMESPath) for conditional rule evaluation. - x-kubernetes-preserve-unknown-fields: true - message: - description: Message is an optional display - message - type: string - operator: - description: |- - Operator is the conditional operation to perform. Valid operators are: - Equals, NotEquals, In, AnyIn, AllIn, NotIn, AnyNotIn, AllNotIn, GreaterThanOrEquals, - GreaterThan, LessThanOrEquals, LessThan, DurationGreaterThanOrEquals, DurationGreaterThan, - DurationLessThanOrEquals, DurationLessThan - enum: - - Equals - - NotEquals - - In - - AnyIn - - AllIn - - NotIn - - AnyNotIn - - AllNotIn - - GreaterThanOrEquals - - GreaterThan - - LessThanOrEquals - - LessThan - - DurationGreaterThanOrEquals - - DurationGreaterThan - - DurationLessThanOrEquals - - DurationLessThan - type: string - value: - description: |- - Value is the conditional value, or set of values. The values can be fixed set - or can be variables declared using JMESPath. - x-kubernetes-preserve-unknown-fields: true - type: object - type: array - any: - description: |- - AnyConditions enable variable-based conditional rule execution. This is useful for - finer control of when an rule is applied. A condition can reference object data - using JMESPath notation. - Here, at least one of the conditions need to pass - items: - description: Condition defines variable-based - conditional criteria for rule execution. - properties: - key: - description: Key is the context entry (using - JMESPath) for conditional rule evaluation. - x-kubernetes-preserve-unknown-fields: true - message: - description: Message is an optional display - message - type: string - operator: - description: |- - Operator is the conditional operation to perform. Valid operators are: - Equals, NotEquals, In, AnyIn, AllIn, NotIn, AnyNotIn, AllNotIn, GreaterThanOrEquals, - GreaterThan, LessThanOrEquals, LessThan, DurationGreaterThanOrEquals, DurationGreaterThan, - DurationLessThanOrEquals, DurationLessThan - enum: - - Equals - - NotEquals - - In - - AnyIn - - AllIn - - NotIn - - AnyNotIn - - AllNotIn - - GreaterThanOrEquals - - GreaterThan - - LessThanOrEquals - - LessThan - - DurationGreaterThanOrEquals - - DurationGreaterThan - - DurationLessThanOrEquals - - DurationLessThan - type: string - value: - description: |- - Value is the conditional value, or set of values. The values can be fixed set - or can be variables declared using JMESPath. - x-kubernetes-preserve-unknown-fields: true - type: object - type: array - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - type: array - patchStrategicMerge: - description: |- - PatchStrategicMerge is a strategic merge patch used to modify resources. - See https://kubernetes.io/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/ - and https://kubectl.docs.kubernetes.io/references/kustomize/patchesstrategicmerge/. - x-kubernetes-preserve-unknown-fields: true - patchesJson6902: - description: |- - PatchesJSON6902 is a list of RFC 6902 JSON Patch declarations used to modify resources. - See https://tools.ietf.org/html/rfc6902 and https://kubectl.docs.kubernetes.io/references/kustomize/patchesjson6902/. - type: string - targets: - description: Targets defines the target resources to be - mutated. - items: - description: TargetResourceSpec defines targets for mutating - existing resources. - properties: - apiVersion: - description: APIVersion specifies resource apiVersion. - type: string - context: - description: Context defines variables and data sources - that can be used during rule execution. - items: - description: |- - ContextEntry adds variables and data sources to a rule Context. Either a - ConfigMap reference or a APILookup must be provided. - properties: - apiCall: - description: |- - APICall is an HTTP request to the Kubernetes API server, or other JSON web service. - The data returned is stored in the context with the name for the context entry. - properties: - data: - description: Data specifies the POST data - sent to the server. - items: - description: RequestData contains the - HTTP POST data - properties: - key: - description: Key is a unique identifier - for the data value - type: string - value: - description: Value is the data value - x-kubernetes-preserve-unknown-fields: true - required: - - key - - value - type: object - type: array - jmesPath: - description: |- - JMESPath is an optional JSON Match Expression that can be used to - transform the JSON response returned from the server. For example - a JMESPath of "items | length(@)" applied to the API server response - for the URLPath "/apis/apps/v1/deployments" will return the total count - of deployments across all namespaces. - type: string - method: - default: GET - description: Method is the HTTP request - type (GET or POST). - enum: - - GET - - POST - type: string - service: - description: Service is an API call to a - JSON web service - properties: - caBundle: - description: |- - CABundle is a PEM encoded CA bundle which will be used to validate - the server certificate. - type: string - url: - description: |- - URL is the JSON web service URL. A typical form is - `https://{service}.{namespace}:{port}/{path}`. - type: string - required: - - url - type: object - urlPath: - description: |- - URLPath is the URL path to be used in the HTTP GET or POST request to the - Kubernetes API server (e.g. "/api/v1/namespaces" or "/apis/apps/v1/deployments"). - The format required is the same format used by the `kubectl get --raw` command. - See https://kyverno.io/docs/writing-policies/external-data-sources/#variables-from-kubernetes-api-server-calls - for details. - type: string - type: object - configMap: - description: ConfigMap is the ConfigMap reference. - properties: - name: - description: Name is the ConfigMap name. - type: string - namespace: - description: Namespace is the ConfigMap - namespace. - type: string - required: - - name - type: object - imageRegistry: - description: |- - ImageRegistry defines requests to an OCI/Docker V2 registry to fetch image - details. - properties: - imageRegistryCredentials: - description: ImageRegistryCredentials provides - credentials that will be used for authentication - with registry - properties: - allowInsecureRegistry: - description: AllowInsecureRegistry allows - insecure access to a registry. - type: boolean - providers: - description: |- - Providers specifies a list of OCI Registry names, whose authentication providers are provided. - It can be of one of these values: default,google,azure,amazon,github. - items: - description: ImageRegistryCredentialsProvidersType - provides the list of credential - providers required. - enum: - - default - - amazon - - azure - - google - - github - type: string - type: array - secrets: - description: |- - Secrets specifies a list of secrets that are provided for credentials. - Secrets must live in the Kyverno namespace. - items: - type: string - type: array - type: object - jmesPath: - description: |- - JMESPath is an optional JSON Match Expression that can be used to - transform the ImageData struct returned as a result of processing - the image reference. - type: string - reference: - description: |- - Reference is image reference to a container image in the registry. - Example: ghcr.io/kyverno/kyverno:latest - type: string - required: - - reference - type: object - name: - description: Name is the variable name. - type: string - variable: - description: Variable defines an arbitrary JMESPath - context variable that can be defined inline. - properties: - default: - description: |- - Default is an optional arbitrary JSON object that the variable may take if the JMESPath - expression evaluates to nil - x-kubernetes-preserve-unknown-fields: true - jmesPath: - description: |- - JMESPath is an optional JMESPath Expression that can be used to - transform the variable. - type: string - value: - description: Value is any arbitrary JSON - object representable in YAML or JSON form. - x-kubernetes-preserve-unknown-fields: true - type: object - type: object - type: array - kind: - description: Kind specifies resource kind. - type: string - name: - description: Name specifies the resource name. - type: string - namespace: - description: Namespace specifies resource namespace. - type: string - preconditions: - description: |- - Preconditions are used to determine if a policy rule should be applied by evaluating a - set of conditions. The declaration can contain nested `any` or `all` statements. A direct list - of conditions (without `any` or `all` statements is supported for backwards compatibility but - will be deprecated in the next major release. - See: https://kyverno.io/docs/writing-policies/preconditions/ - x-kubernetes-preserve-unknown-fields: true - uid: - description: UID specifies the resource uid. - type: string - type: object - type: array - type: object - name: - description: Name is a label to identify the rule, It must be - unique within the policy. - maxLength: 63 - type: string - preconditions: - description: |- - Preconditions are used to determine if a policy rule should be applied by evaluating a - set of conditions. The declaration can contain nested `any` or `all` statements. A direct list - of conditions (without `any` or `all` statements is supported for backwards compatibility but - will be deprecated in the next major release. - See: https://kyverno.io/docs/writing-policies/preconditions/ - x-kubernetes-preserve-unknown-fields: true - skipBackgroundRequests: - default: true - description: |- - SkipBackgroundRequests bypasses admission requests that are sent by the background controller. - The default value is set to "true", it must be set to "false" to apply - generate and mutateExisting rules to those requests. - type: boolean - validate: - description: Validation is used to validate matching resources. - properties: - anyPattern: - description: |- - AnyPattern specifies list of validation patterns. At least one of the patterns - must be satisfied for the validation rule to succeed. - x-kubernetes-preserve-unknown-fields: true - cel: - description: CEL allows validation checks using the Common - Expression Language (https://kubernetes.io/docs/reference/using-api/cel/). - properties: - auditAnnotations: - description: AuditAnnotations contains CEL expressions - which are used to produce audit annotations for the - audit event of the API request. - items: - description: AuditAnnotation describes how to produce - an audit annotation for an API request. - properties: - key: - description: |- - key specifies the audit annotation key. The audit annotation keys of - a ValidatingAdmissionPolicy must be unique. The key must be a qualified - name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length. - - The key is combined with the resource name of the - ValidatingAdmissionPolicy to construct an audit annotation key: - "{ValidatingAdmissionPolicy name}/{key}". - - If an admission webhook uses the same resource name as this ValidatingAdmissionPolicy - and the same audit annotation key, the annotation key will be identical. - In this case, the first annotation written with the key will be included - in the audit event and all subsequent annotations with the same key - will be discarded. - - Required. - type: string - valueExpression: - description: |- - valueExpression represents the expression which is evaluated by CEL to - produce an audit annotation value. The expression must evaluate to either - a string or null value. If the expression evaluates to a string, the - audit annotation is included with the string value. If the expression - evaluates to null or empty string the audit annotation will be omitted. - The valueExpression may be no longer than 5kb in length. - If the result of the valueExpression is more than 10kb in length, it - will be truncated to 10kb. - - If multiple ValidatingAdmissionPolicyBinding resources match an - API request, then the valueExpression will be evaluated for - each binding. All unique values produced by the valueExpressions - will be joined together in a comma-separated list. - - Required. - type: string - required: - - key - - valueExpression - type: object - type: array - expressions: - description: Expressions is a list of CELExpression - types. - items: - description: Validation specifies the CEL expression - which is used to apply the validation. - properties: - expression: - description: "Expression represents the expression - which will be evaluated by CEL.\nref: https://github.com/google/cel-spec\nCEL - expressions have access to the contents of the - API request/response, organized into CEL variables - as well as some other useful variables:\n\n- - 'object' - The object from the incoming request. - The value is null for DELETE requests.\n- 'oldObject' - - The existing object. The value is null for - CREATE requests.\n- 'request' - Attributes of - the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).\n- - 'params' - Parameter resource referred to by - the policy binding being evaluated. Only populated - if the policy has a ParamKind.\n- 'namespaceObject' - - The namespace object that the incoming object - belongs to. The value is null for cluster-scoped - resources.\n- 'variables' - Map of composited - variables, from its name to its lazily evaluated - value.\n For example, a variable named 'foo' - can be accessed as 'variables.foo'.\n- 'authorizer' - - A CEL Authorizer. May be used to perform authorization - checks for the principal (user or service account) - of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- - 'authorizer.requestResource' - A CEL ResourceCheck - constructed from the 'authorizer' and configured - with the\n request resource.\n\nThe `apiVersion`, - `kind`, `metadata.name` and `metadata.generateName` - are always accessible from the root of the\nobject. - No other metadata properties are accessible.\n\nOnly - property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` - are accessible.\nAccessible property names are - escaped according to the following rules when - accessed in the expression:\n- '__' escapes - to '__underscores__'\n- '.' escapes to '__dot__'\n- - '-' escapes to '__dash__'\n- '/' escapes to - '__slash__'\n- Property names that exactly match - a CEL RESERVED keyword escape to '__{keyword}__'. - The keywords are:\n\t \"true\", \"false\", - \"null\", \"in\", \"as\", \"break\", \"const\", - \"continue\", \"else\", \"for\", \"function\", - \"if\",\n\t \"import\", \"let\", \"loop\", - \"package\", \"namespace\", \"return\".\nExamples:\n - \ - Expression accessing a property named \"namespace\": - {\"Expression\": \"object.__namespace__ > 0\"}\n - \ - Expression accessing a property named \"x-prop\": - {\"Expression\": \"object.x__dash__prop > 0\"}\n - \ - Expression accessing a property named \"redact__d\": - {\"Expression\": \"object.redact__underscores__d - > 0\"}\n\nEquality on arrays with list type - of 'set' or 'map' ignores element order, i.e. - [1, 2] == [2, 1].\nConcatenation on arrays with - x-kubernetes-list-type use the semantics of - the list type:\n - 'set': `X + Y` performs - a union where the array positions of all elements - in `X` are preserved and\n non-intersecting - elements in `Y` are appended, retaining their - partial order.\n - 'map': `X + Y` performs - a merge where the array positions of all keys - in `X` are preserved but the values\n are - overwritten by values in `Y` when the key sets - of `X` and `Y` intersect. Elements in `Y` with\n - \ non-intersecting keys are appended, retaining - their partial order.\nRequired." - type: string - message: - description: |- - Message represents the message displayed when validation fails. The message is required if the Expression contains - line breaks. The message must not contain line breaks. - If unset, the message is "failed rule: {Rule}". - e.g. "must be a URL with the host matching spec.host" - If the Expression contains line breaks. Message is required. - The message must not contain line breaks. - If unset, the message is "failed Expression: {Expression}". - type: string - messageExpression: - description: |- - messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. - Since messageExpression is used as a failure message, it must evaluate to a string. - If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. - If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced - as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string - that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and - the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. - messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. - Example: - "object.x must be less than max ("+string(params.max)+")" - type: string - reason: - description: |- - Reason represents a machine-readable description of why this validation failed. - If this is the first validation in the list to fail, this reason, as well as the - corresponding HTTP response code, are used in the - HTTP response to the client. - The currently supported reasons are: "Unauthorized", "Forbidden", "Invalid", "RequestEntityTooLarge". - If not set, StatusReasonInvalid is used in the response to the client. - type: string - required: - - expression - type: object - type: array - paramKind: - description: ParamKind is a tuple of Group Kind and - Version. - properties: - apiVersion: - description: |- - APIVersion is the API group version the resources belong to. - In format of "group/version". - Required. - type: string - kind: - description: |- - Kind is the API kind the resources belong to. - Required. - type: string - type: object - x-kubernetes-map-type: atomic - paramRef: - description: ParamRef references a parameter resource. - properties: - name: - description: |- - `name` is the name of the resource being referenced. - - `name` and `selector` are mutually exclusive properties. If one is set, - the other must be unset. - type: string - namespace: - description: |- - namespace is the namespace of the referenced resource. Allows limiting - the search for params to a specific namespace. Applies to both `name` and - `selector` fields. - - A per-namespace parameter may be used by specifying a namespace-scoped - `paramKind` in the policy and leaving this field empty. - - - If `paramKind` is cluster-scoped, this field MUST be unset. Setting this - field results in a configuration error. - - - If `paramKind` is namespace-scoped, the namespace of the object being - evaluated for admission will be used when this field is left unset. Take - care that if this is left empty the binding must not match any cluster-scoped - resources, which will result in an error. - type: string - parameterNotFoundAction: - description: |- - `parameterNotFoundAction` controls the behavior of the binding when the resource - exists, and name or selector is valid, but there are no parameters - matched by the binding. If the value is set to `Allow`, then no - matched parameters will be treated as successful validation by the binding. - If set to `Deny`, then no matched parameters will be subject to the - `failurePolicy` of the policy. - - Allowed values are `Allow` or `Deny` - Default to `Deny` - type: string - selector: - description: |- - selector can be used to match multiple param objects based on their labels. - Supply selector: {} to match all resources of the ParamKind. - - If multiple params are found, they are all evaluated with the policy expressions - and the results are ANDed together. - - One of `name` or `selector` must be set, but `name` and `selector` are - mutually exclusive properties. If one is set, the other must be unset. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: object - x-kubernetes-map-type: atomic - variables: - description: |- - Variables contain definitions of variables that can be used in composition of other expressions. - Each variable is defined as a named CEL expression. - The variables defined here will be available under `variables` in other expressions of the policy. - items: - description: Variable is the definition of a variable - that is used for composition. - properties: - expression: - description: |- - Expression is the expression that will be evaluated as the value of the variable. - The CEL expression has access to the same identifiers as the CEL expressions in Validation. - type: string - name: - description: |- - Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. - The variable can be accessed in other expressions through `variables` - For example, if name is "foo", the variable will be available as `variables.foo` - type: string - required: - - expression - - name - type: object - type: array - type: object - deny: - description: Deny defines conditions used to pass or fail - a validation rule. - properties: - conditions: - description: |- - Multiple conditions can be declared under an `any` or `all` statement. A direct list - of conditions (without `any` or `all` statements) is also supported for backwards compatibility - but will be deprecated in the next major release. - See: https://kyverno.io/docs/writing-policies/validate/#deny-rules - x-kubernetes-preserve-unknown-fields: true - type: object - foreach: - description: ForEach applies validate rules to a list of - sub-elements by creating a context for each entry in the - list and looping over it to apply the specified logic. - items: - description: ForEachValidation applies validate rules - to a list of sub-elements by creating a context for - each entry in the list and looping over it to apply - the specified logic. - properties: - anyPattern: - description: |- - AnyPattern specifies list of validation patterns. At least one of the patterns - must be satisfied for the validation rule to succeed. - x-kubernetes-preserve-unknown-fields: true - context: - description: Context defines variables and data sources - that can be used during rule execution. - items: - description: |- - ContextEntry adds variables and data sources to a rule Context. Either a - ConfigMap reference or a APILookup must be provided. - properties: - apiCall: - description: |- - APICall is an HTTP request to the Kubernetes API server, or other JSON web service. - The data returned is stored in the context with the name for the context entry. - properties: - data: - description: Data specifies the POST data - sent to the server. - items: - description: RequestData contains the - HTTP POST data - properties: - key: - description: Key is a unique identifier - for the data value - type: string - value: - description: Value is the data value - x-kubernetes-preserve-unknown-fields: true - required: - - key - - value - type: object - type: array - jmesPath: - description: |- - JMESPath is an optional JSON Match Expression that can be used to - transform the JSON response returned from the server. For example - a JMESPath of "items | length(@)" applied to the API server response - for the URLPath "/apis/apps/v1/deployments" will return the total count - of deployments across all namespaces. - type: string - method: - default: GET - description: Method is the HTTP request - type (GET or POST). - enum: - - GET - - POST - type: string - service: - description: Service is an API call to a - JSON web service - properties: - caBundle: - description: |- - CABundle is a PEM encoded CA bundle which will be used to validate - the server certificate. - type: string - url: - description: |- - URL is the JSON web service URL. A typical form is - `https://{service}.{namespace}:{port}/{path}`. - type: string - required: - - url - type: object - urlPath: - description: |- - URLPath is the URL path to be used in the HTTP GET or POST request to the - Kubernetes API server (e.g. "/api/v1/namespaces" or "/apis/apps/v1/deployments"). - The format required is the same format used by the `kubectl get --raw` command. - See https://kyverno.io/docs/writing-policies/external-data-sources/#variables-from-kubernetes-api-server-calls - for details. - type: string - type: object - configMap: - description: ConfigMap is the ConfigMap reference. - properties: - name: - description: Name is the ConfigMap name. - type: string - namespace: - description: Namespace is the ConfigMap - namespace. - type: string - required: - - name - type: object - imageRegistry: - description: |- - ImageRegistry defines requests to an OCI/Docker V2 registry to fetch image - details. - properties: - imageRegistryCredentials: - description: ImageRegistryCredentials provides - credentials that will be used for authentication - with registry - properties: - allowInsecureRegistry: - description: AllowInsecureRegistry allows - insecure access to a registry. - type: boolean - providers: - description: |- - Providers specifies a list of OCI Registry names, whose authentication providers are provided. - It can be of one of these values: default,google,azure,amazon,github. - items: - description: ImageRegistryCredentialsProvidersType - provides the list of credential - providers required. - enum: - - default - - amazon - - azure - - google - - github - type: string - type: array - secrets: - description: |- - Secrets specifies a list of secrets that are provided for credentials. - Secrets must live in the Kyverno namespace. - items: - type: string - type: array - type: object - jmesPath: - description: |- - JMESPath is an optional JSON Match Expression that can be used to - transform the ImageData struct returned as a result of processing - the image reference. - type: string - reference: - description: |- - Reference is image reference to a container image in the registry. - Example: ghcr.io/kyverno/kyverno:latest - type: string - required: - - reference - type: object - name: - description: Name is the variable name. - type: string - variable: - description: Variable defines an arbitrary JMESPath - context variable that can be defined inline. - properties: - default: - description: |- - Default is an optional arbitrary JSON object that the variable may take if the JMESPath - expression evaluates to nil - x-kubernetes-preserve-unknown-fields: true - jmesPath: - description: |- - JMESPath is an optional JMESPath Expression that can be used to - transform the variable. - type: string - value: - description: Value is any arbitrary JSON - object representable in YAML or JSON form. - x-kubernetes-preserve-unknown-fields: true - type: object - type: object - type: array - deny: - description: Deny defines conditions used to pass - or fail a validation rule. - properties: - conditions: - description: |- - Multiple conditions can be declared under an `any` or `all` statement. A direct list - of conditions (without `any` or `all` statements) is also supported for backwards compatibility - but will be deprecated in the next major release. - See: https://kyverno.io/docs/writing-policies/validate/#deny-rules - x-kubernetes-preserve-unknown-fields: true - type: object - elementScope: - description: |- - ElementScope specifies whether to use the current list element as the scope for validation. Defaults to "true" if not specified. - When set to "false", "request.object" is used as the validation scope within the foreach - block to allow referencing other elements in the subtree. - type: boolean - foreach: - description: Foreach declares a nested foreach iterator - x-kubernetes-preserve-unknown-fields: true - list: - description: |- - List specifies a JMESPath expression that results in one or more elements - to which the validation logic is applied. - type: string - pattern: - description: Pattern specifies an overlay-style pattern - used to check resources. - x-kubernetes-preserve-unknown-fields: true - preconditions: - description: |- - AnyAllConditions are used to determine if a policy rule should be applied by evaluating a - set of conditions. The declaration can contain nested `any` or `all` statements. - See: https://kyverno.io/docs/writing-policies/preconditions/ - properties: - all: - description: |- - AllConditions enable variable-based conditional rule execution. This is useful for - finer control of when an rule is applied. A condition can reference object data - using JMESPath notation. - Here, all of the conditions need to pass - items: - description: Condition defines variable-based - conditional criteria for rule execution. - properties: - key: - description: Key is the context entry (using - JMESPath) for conditional rule evaluation. - x-kubernetes-preserve-unknown-fields: true - message: - description: Message is an optional display - message - type: string - operator: - description: |- - Operator is the conditional operation to perform. Valid operators are: - Equals, NotEquals, In, AnyIn, AllIn, NotIn, AnyNotIn, AllNotIn, GreaterThanOrEquals, - GreaterThan, LessThanOrEquals, LessThan, DurationGreaterThanOrEquals, DurationGreaterThan, - DurationLessThanOrEquals, DurationLessThan - enum: - - Equals - - NotEquals - - In - - AnyIn - - AllIn - - NotIn - - AnyNotIn - - AllNotIn - - GreaterThanOrEquals - - GreaterThan - - LessThanOrEquals - - LessThan - - DurationGreaterThanOrEquals - - DurationGreaterThan - - DurationLessThanOrEquals - - DurationLessThan - type: string - value: - description: |- - Value is the conditional value, or set of values. The values can be fixed set - or can be variables declared using JMESPath. - x-kubernetes-preserve-unknown-fields: true - type: object - type: array - any: - description: |- - AnyConditions enable variable-based conditional rule execution. This is useful for - finer control of when an rule is applied. A condition can reference object data - using JMESPath notation. - Here, at least one of the conditions need to pass - items: - description: Condition defines variable-based - conditional criteria for rule execution. - properties: - key: - description: Key is the context entry (using - JMESPath) for conditional rule evaluation. - x-kubernetes-preserve-unknown-fields: true - message: - description: Message is an optional display - message - type: string - operator: - description: |- - Operator is the conditional operation to perform. Valid operators are: - Equals, NotEquals, In, AnyIn, AllIn, NotIn, AnyNotIn, AllNotIn, GreaterThanOrEquals, - GreaterThan, LessThanOrEquals, LessThan, DurationGreaterThanOrEquals, DurationGreaterThan, - DurationLessThanOrEquals, DurationLessThan - enum: - - Equals - - NotEquals - - In - - AnyIn - - AllIn - - NotIn - - AnyNotIn - - AllNotIn - - GreaterThanOrEquals - - GreaterThan - - LessThanOrEquals - - LessThan - - DurationGreaterThanOrEquals - - DurationGreaterThan - - DurationLessThanOrEquals - - DurationLessThan - type: string - value: - description: |- - Value is the conditional value, or set of values. The values can be fixed set - or can be variables declared using JMESPath. - x-kubernetes-preserve-unknown-fields: true - type: object - type: array - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - type: array - manifests: - description: Manifest specifies conditions for manifest - verification - properties: - annotationDomain: - description: AnnotationDomain is custom domain of annotation - for message and signature. Default is "cosign.sigstore.dev". - type: string - attestors: - description: Attestors specified the required attestors - (i.e. authorities) - items: - properties: - count: - description: |- - Count specifies the required number of entries that must match. If the count is null, all entries must match - (a logical AND). If the count is 1, at least one entry must match (a logical OR). If the count contains a - value N, then N must be less than or equal to the size of entries, and at least N entries must match. - minimum: 1 - type: integer - entries: - description: |- - Entries contains the available attestors. An attestor can be a static key, - attributes for keyless verification, or a nested attestor declaration. - items: - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations are used for image verification. - Every specified key-value pair must exist and match in the verified payload. - The payload may contain other key-value pairs. - type: object - attestor: - description: Attestor is a nested set of - Attestor used to specify a more complex - set of match authorities. - x-kubernetes-preserve-unknown-fields: true - certificates: - description: Certificates specifies one - or more certificates. - properties: - cert: - description: Cert is an optional PEM-encoded - public certificate. - type: string - certChain: - description: CertChain is an optional - PEM encoded set of certificates used - to verify. - type: string - ctlog: - description: |- - CTLog (certificate timestamp log) provides a configuration for validation of Signed Certificate - Timestamps (SCTs). If the value is unset, the default behavior by Cosign is used. - properties: - ignoreSCT: - description: |- - IgnoreSCT defines whether to use the Signed Certificate Timestamp (SCT) log to check for a certificate - timestamp. Default is false. Set to true if this was opted out during signing. - type: boolean - pubkey: - description: PubKey, if set, is - used to validate SCTs against - a custom source. - type: string - type: object - rekor: - description: |- - Rekor provides configuration for the Rekor transparency log service. If an empty object - is provided the public instance of Rekor (https://rekor.sigstore.dev) is used. - properties: - ignoreTlog: - description: IgnoreTlog skips transparency - log verification. - type: boolean - pubkey: - description: |- - RekorPubKey is an optional PEM-encoded public key to use for a custom Rekor. - If set, this will be used to validate transparency log signatures from a custom Rekor. - type: string - url: - description: URL is the address - of the transparency log. Defaults - to the public Rekor log instance - https://rekor.sigstore.dev. - type: string - required: - - url - type: object - type: object - keyless: - description: |- - Keyless is a set of attribute used to verify a Sigstore keyless attestor. - See https://github.com/sigstore/cosign/blob/main/KEYLESS.md. - properties: - additionalExtensions: - additionalProperties: - type: string - description: AdditionalExtensions are - certificate-extensions used for keyless - signing. - type: object - ctlog: - description: |- - CTLog (certificate timestamp log) provides a configuration for validation of Signed Certificate - Timestamps (SCTs). If the value is unset, the default behavior by Cosign is used. - properties: - ignoreSCT: - description: |- - IgnoreSCT defines whether to use the Signed Certificate Timestamp (SCT) log to check for a certificate - timestamp. Default is false. Set to true if this was opted out during signing. - type: boolean - pubkey: - description: PubKey, if set, is - used to validate SCTs against - a custom source. - type: string - type: object - issuer: - description: Issuer is the certificate - issuer used for keyless signing. - type: string - rekor: - description: |- - Rekor provides configuration for the Rekor transparency log service. If an empty object - is provided the public instance of Rekor (https://rekor.sigstore.dev) is used. - properties: - ignoreTlog: - description: IgnoreTlog skips transparency - log verification. - type: boolean - pubkey: - description: |- - RekorPubKey is an optional PEM-encoded public key to use for a custom Rekor. - If set, this will be used to validate transparency log signatures from a custom Rekor. - type: string - url: - description: URL is the address - of the transparency log. Defaults - to the public Rekor log instance - https://rekor.sigstore.dev. - type: string - required: - - url - type: object - roots: - description: |- - Roots is an optional set of PEM encoded trusted root certificates. - If not provided, the system roots are used. - type: string - subject: - description: Subject is the verified - identity used for keyless signing, - for example the email address. - type: string - type: object - keys: - description: Keys specifies one or more - public keys. - properties: - ctlog: - description: |- - CTLog (certificate timestamp log) provides a configuration for validation of Signed Certificate - Timestamps (SCTs). If the value is unset, the default behavior by Cosign is used. - properties: - ignoreSCT: - description: |- - IgnoreSCT defines whether to use the Signed Certificate Timestamp (SCT) log to check for a certificate - timestamp. Default is false. Set to true if this was opted out during signing. - type: boolean - pubkey: - description: PubKey, if set, is - used to validate SCTs against - a custom source. - type: string - type: object - kms: - description: |- - KMS provides the URI to the public key stored in a Key Management System. See: - https://github.com/sigstore/cosign/blob/main/KMS.md - type: string - publicKeys: - description: |- - Keys is a set of X.509 public keys used to verify image signatures. The keys can be directly - specified or can be a variable reference to a key specified in a ConfigMap (see - https://kyverno.io/docs/writing-policies/variables/), or reference a standard Kubernetes Secret - elsewhere in the cluster by specifying it in the format "k8s:///". - The named Secret must specify a key `cosign.pub` containing the public key used for - verification, (see https://github.com/sigstore/cosign/blob/main/KMS.md#kubernetes-secret). - When multiple keys are specified each key is processed as a separate staticKey entry - (.attestors[*].entries.keys) within the set of attestors and the count is applied across the keys. - type: string - rekor: - description: |- - Rekor provides configuration for the Rekor transparency log service. If an empty object - is provided the public instance of Rekor (https://rekor.sigstore.dev) is used. - properties: - ignoreTlog: - description: IgnoreTlog skips transparency - log verification. - type: boolean - pubkey: - description: |- - RekorPubKey is an optional PEM-encoded public key to use for a custom Rekor. - If set, this will be used to validate transparency log signatures from a custom Rekor. - type: string - url: - description: URL is the address - of the transparency log. Defaults - to the public Rekor log instance - https://rekor.sigstore.dev. - type: string - required: - - url - type: object - secret: - description: Reference to a Secret resource - that contains a public key - properties: - name: - description: Name of the secret. - The provided secret must contain - a key named cosign.pub. - type: string - namespace: - description: Namespace name where - the Secret exists. - type: string - required: - - name - - namespace - type: object - signatureAlgorithm: - default: sha256 - description: Specify signature algorithm - for public keys. Supported values - are sha224, sha256, sha384 and sha512. - type: string - type: object - repository: - description: |- - Repository is an optional alternate OCI repository to use for signatures and attestations that match this rule. - If specified Repository will override other OCI image repository locations for this Attestor. - type: string - type: object - type: array - type: object - type: array - dryRun: - description: DryRun configuration - properties: - enable: - type: boolean - namespace: - type: string - type: object - ignoreFields: - description: Fields which will be ignored while comparing - manifests. - items: - properties: - fields: - items: - type: string - type: array - objects: - items: - properties: - group: - type: string - kind: - type: string - name: - type: string - namespace: - type: string - version: - type: string - type: object - type: array - type: object - type: array - repository: - description: |- - Repository is an optional alternate OCI repository to use for resource bundle reference. - The repository can be overridden per Attestor or Attestation. - type: string - type: object - message: - description: Message specifies a custom message to be displayed - on failure. - type: string - pattern: - description: Pattern specifies an overlay-style pattern - used to check resources. - x-kubernetes-preserve-unknown-fields: true - podSecurity: - description: |- - PodSecurity applies exemptions for Kubernetes Pod Security admission - by specifying exclusions for Pod Security Standards controls. - properties: - exclude: - description: Exclude specifies the Pod Security Standard - controls to be excluded. - items: - description: PodSecurityStandard specifies the Pod - Security Standard controls to be excluded. - properties: - controlName: - description: |- - ControlName specifies the name of the Pod Security Standard control. - See: https://kubernetes.io/docs/concepts/security/pod-security-standards/ - enum: - - HostProcess - - Host Namespaces - - Privileged Containers - - Capabilities - - HostPath Volumes - - Host Ports - - AppArmor - - SELinux - - /proc Mount Type - - Seccomp - - Sysctls - - Volume Types - - Privilege Escalation - - Running as Non-root - - Running as Non-root user - type: string - images: - description: |- - Images selects matching containers and applies the container level PSS. - Each image is the image name consisting of the registry address, repository, image, and tag. - Empty list matches no containers, PSS checks are applied at the pod level only. - Wildcards ('*' and '?') are allowed. See: https://kubernetes.io/docs/concepts/containers/images. - items: - type: string - type: array - required: - - controlName - type: object - type: array - level: - description: |- - Level defines the Pod Security Standard level to be applied to workloads. - Allowed values are privileged, baseline, and restricted. - enum: - - privileged - - baseline - - restricted - type: string - version: - description: |- - Version defines the Pod Security Standard versions that Kubernetes supports. - Allowed values are v1.19, v1.20, v1.21, v1.22, v1.23, v1.24, v1.25, v1.26, latest. Defaults to latest. - enum: - - v1.19 - - v1.20 - - v1.21 - - v1.22 - - v1.23 - - v1.24 - - v1.25 - - v1.26 - - latest - type: string - type: object - type: object - verifyImages: - description: VerifyImages is used to verify image signatures - and mutate them to add a digest - items: - description: |- - ImageVerification validates that images that match the specified pattern - are signed with the supplied public key. Once the image is verified it is - mutated to include the SHA digest retrieved during the registration. - properties: - additionalExtensions: - additionalProperties: - type: string - description: Deprecated. - type: object - annotations: - additionalProperties: - type: string - description: Deprecated. Use annotations per Attestor - instead. - type: object - attestations: - description: |- - Attestations are optional checks for signed in-toto Statements used to verify the image. - See https://github.com/in-toto/attestation. Kyverno fetches signed attestations from the - OCI registry and decodes them into a list of Statement declarations. - items: - description: |- - Attestation are checks for signed in-toto Statements that are used to verify the image. - See https://github.com/in-toto/attestation. Kyverno fetches signed attestations from the - OCI registry and decodes them into a list of Statements. - properties: - attestors: - description: Attestors specify the required attestors - (i.e. authorities). - items: - properties: - count: - description: |- - Count specifies the required number of entries that must match. If the count is null, all entries must match - (a logical AND). If the count is 1, at least one entry must match (a logical OR). If the count contains a - value N, then N must be less than or equal to the size of entries, and at least N entries must match. - minimum: 1 - type: integer - entries: - description: |- - Entries contains the available attestors. An attestor can be a static key, - attributes for keyless verification, or a nested attestor declaration. - items: - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations are used for image verification. - Every specified key-value pair must exist and match in the verified payload. - The payload may contain other key-value pairs. - type: object - attestor: - description: Attestor is a nested set - of Attestor used to specify a more - complex set of match authorities. - x-kubernetes-preserve-unknown-fields: true - certificates: - description: Certificates specifies - one or more certificates. - properties: - cert: - description: Cert is an optional - PEM-encoded public certificate. - type: string - certChain: - description: CertChain is an optional - PEM encoded set of certificates - used to verify. - type: string - ctlog: - description: |- - CTLog (certificate timestamp log) provides a configuration for validation of Signed Certificate - Timestamps (SCTs). If the value is unset, the default behavior by Cosign is used. - properties: - ignoreSCT: - description: |- - IgnoreSCT defines whether to use the Signed Certificate Timestamp (SCT) log to check for a certificate - timestamp. Default is false. Set to true if this was opted out during signing. - type: boolean - pubkey: - description: PubKey, if set, - is used to validate SCTs against - a custom source. - type: string - type: object - rekor: - description: |- - Rekor provides configuration for the Rekor transparency log service. If an empty object - is provided the public instance of Rekor (https://rekor.sigstore.dev) is used. - properties: - ignoreTlog: - description: IgnoreTlog skips - transparency log verification. - type: boolean - pubkey: - description: |- - RekorPubKey is an optional PEM-encoded public key to use for a custom Rekor. - If set, this will be used to validate transparency log signatures from a custom Rekor. - type: string - url: - description: URL is the address - of the transparency log. Defaults - to the public Rekor log instance - https://rekor.sigstore.dev. - type: string - required: - - url - type: object - type: object - keyless: - description: |- - Keyless is a set of attribute used to verify a Sigstore keyless attestor. - See https://github.com/sigstore/cosign/blob/main/KEYLESS.md. - properties: - additionalExtensions: - additionalProperties: - type: string - description: AdditionalExtensions - are certificate-extensions used - for keyless signing. - type: object - ctlog: - description: |- - CTLog (certificate timestamp log) provides a configuration for validation of Signed Certificate - Timestamps (SCTs). If the value is unset, the default behavior by Cosign is used. - properties: - ignoreSCT: - description: |- - IgnoreSCT defines whether to use the Signed Certificate Timestamp (SCT) log to check for a certificate - timestamp. Default is false. Set to true if this was opted out during signing. - type: boolean - pubkey: - description: PubKey, if set, - is used to validate SCTs against - a custom source. - type: string - type: object - issuer: - description: Issuer is the certificate - issuer used for keyless signing. - type: string - rekor: - description: |- - Rekor provides configuration for the Rekor transparency log service. If an empty object - is provided the public instance of Rekor (https://rekor.sigstore.dev) is used. - properties: - ignoreTlog: - description: IgnoreTlog skips - transparency log verification. - type: boolean - pubkey: - description: |- - RekorPubKey is an optional PEM-encoded public key to use for a custom Rekor. - If set, this will be used to validate transparency log signatures from a custom Rekor. - type: string - url: - description: URL is the address - of the transparency log. Defaults - to the public Rekor log instance - https://rekor.sigstore.dev. - type: string - required: - - url - type: object - roots: - description: |- - Roots is an optional set of PEM encoded trusted root certificates. - If not provided, the system roots are used. - type: string - subject: - description: Subject is the verified - identity used for keyless signing, - for example the email address. - type: string - type: object - keys: - description: Keys specifies one or more - public keys. - properties: - ctlog: - description: |- - CTLog (certificate timestamp log) provides a configuration for validation of Signed Certificate - Timestamps (SCTs). If the value is unset, the default behavior by Cosign is used. - properties: - ignoreSCT: - description: |- - IgnoreSCT defines whether to use the Signed Certificate Timestamp (SCT) log to check for a certificate - timestamp. Default is false. Set to true if this was opted out during signing. - type: boolean - pubkey: - description: PubKey, if set, - is used to validate SCTs against - a custom source. - type: string - type: object - kms: - description: |- - KMS provides the URI to the public key stored in a Key Management System. See: - https://github.com/sigstore/cosign/blob/main/KMS.md - type: string - publicKeys: - description: |- - Keys is a set of X.509 public keys used to verify image signatures. The keys can be directly - specified or can be a variable reference to a key specified in a ConfigMap (see - https://kyverno.io/docs/writing-policies/variables/), or reference a standard Kubernetes Secret - elsewhere in the cluster by specifying it in the format "k8s:///". - The named Secret must specify a key `cosign.pub` containing the public key used for - verification, (see https://github.com/sigstore/cosign/blob/main/KMS.md#kubernetes-secret). - When multiple keys are specified each key is processed as a separate staticKey entry - (.attestors[*].entries.keys) within the set of attestors and the count is applied across the keys. - type: string - rekor: - description: |- - Rekor provides configuration for the Rekor transparency log service. If an empty object - is provided the public instance of Rekor (https://rekor.sigstore.dev) is used. - properties: - ignoreTlog: - description: IgnoreTlog skips - transparency log verification. - type: boolean - pubkey: - description: |- - RekorPubKey is an optional PEM-encoded public key to use for a custom Rekor. - If set, this will be used to validate transparency log signatures from a custom Rekor. - type: string - url: - description: URL is the address - of the transparency log. Defaults - to the public Rekor log instance - https://rekor.sigstore.dev. - type: string - required: - - url - type: object - secret: - description: Reference to a Secret - resource that contains a public - key - properties: - name: - description: Name of the secret. - The provided secret must contain - a key named cosign.pub. - type: string - namespace: - description: Namespace name - where the Secret exists. - type: string - required: - - name - - namespace - type: object - signatureAlgorithm: - default: sha256 - description: Specify signature algorithm - for public keys. Supported values - are sha224, sha256, sha384 and - sha512. - type: string - type: object - repository: - description: |- - Repository is an optional alternate OCI repository to use for signatures and attestations that match this rule. - If specified Repository will override other OCI image repository locations for this Attestor. - type: string - type: object - type: array - type: object - type: array - conditions: - description: |- - Conditions are used to verify attributes within a Predicate. If no Conditions are specified - the attestation check is satisfied as long there are predicates that match the predicate type. - items: - description: |- - AnyAllConditions consists of conditions wrapped denoting a logical criteria to be fulfilled. - AnyConditions get fulfilled when at least one of its sub-conditions passes. - AllConditions get fulfilled only when all of its sub-conditions pass. - properties: - all: - description: |- - AllConditions enable variable-based conditional rule execution. This is useful for - finer control of when an rule is applied. A condition can reference object data - using JMESPath notation. - Here, all of the conditions need to pass - items: - description: Condition defines variable-based - conditional criteria for rule execution. - properties: - key: - description: Key is the context entry - (using JMESPath) for conditional rule - evaluation. - x-kubernetes-preserve-unknown-fields: true - message: - description: Message is an optional - display message - type: string - operator: - description: |- - Operator is the conditional operation to perform. Valid operators are: - Equals, NotEquals, In, AnyIn, AllIn, NotIn, AnyNotIn, AllNotIn, GreaterThanOrEquals, - GreaterThan, LessThanOrEquals, LessThan, DurationGreaterThanOrEquals, DurationGreaterThan, - DurationLessThanOrEquals, DurationLessThan - enum: - - Equals - - NotEquals - - In - - AnyIn - - AllIn - - NotIn - - AnyNotIn - - AllNotIn - - GreaterThanOrEquals - - GreaterThan - - LessThanOrEquals - - LessThan - - DurationGreaterThanOrEquals - - DurationGreaterThan - - DurationLessThanOrEquals - - DurationLessThan - type: string - value: - description: |- - Value is the conditional value, or set of values. The values can be fixed set - or can be variables declared using JMESPath. - x-kubernetes-preserve-unknown-fields: true - type: object - type: array - any: - description: |- - AnyConditions enable variable-based conditional rule execution. This is useful for - finer control of when an rule is applied. A condition can reference object data - using JMESPath notation. - Here, at least one of the conditions need to pass - items: - description: Condition defines variable-based - conditional criteria for rule execution. - properties: - key: - description: Key is the context entry - (using JMESPath) for conditional rule - evaluation. - x-kubernetes-preserve-unknown-fields: true - message: - description: Message is an optional - display message - type: string - operator: - description: |- - Operator is the conditional operation to perform. Valid operators are: - Equals, NotEquals, In, AnyIn, AllIn, NotIn, AnyNotIn, AllNotIn, GreaterThanOrEquals, - GreaterThan, LessThanOrEquals, LessThan, DurationGreaterThanOrEquals, DurationGreaterThan, - DurationLessThanOrEquals, DurationLessThan - enum: - - Equals - - NotEquals - - In - - AnyIn - - AllIn - - NotIn - - AnyNotIn - - AllNotIn - - GreaterThanOrEquals - - GreaterThan - - LessThanOrEquals - - LessThan - - DurationGreaterThanOrEquals - - DurationGreaterThan - - DurationLessThanOrEquals - - DurationLessThan - type: string - value: - description: |- - Value is the conditional value, or set of values. The values can be fixed set - or can be variables declared using JMESPath. - x-kubernetes-preserve-unknown-fields: true - type: object - type: array - type: object - type: array - predicateType: - description: Deprecated in favour of 'Type', to - be removed soon - type: string - type: - description: Type defines the type of attestation - contained within the Statement. - type: string - type: object - type: array - attestors: - description: Attestors specified the required attestors - (i.e. authorities) - items: - properties: - count: - description: |- - Count specifies the required number of entries that must match. If the count is null, all entries must match - (a logical AND). If the count is 1, at least one entry must match (a logical OR). If the count contains a - value N, then N must be less than or equal to the size of entries, and at least N entries must match. - minimum: 1 - type: integer - entries: - description: |- - Entries contains the available attestors. An attestor can be a static key, - attributes for keyless verification, or a nested attestor declaration. - items: - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations are used for image verification. - Every specified key-value pair must exist and match in the verified payload. - The payload may contain other key-value pairs. - type: object - attestor: - description: Attestor is a nested set of Attestor - used to specify a more complex set of match - authorities. - x-kubernetes-preserve-unknown-fields: true - certificates: - description: Certificates specifies one or - more certificates. - properties: - cert: - description: Cert is an optional PEM-encoded - public certificate. - type: string - certChain: - description: CertChain is an optional - PEM encoded set of certificates used - to verify. - type: string - ctlog: - description: |- - CTLog (certificate timestamp log) provides a configuration for validation of Signed Certificate - Timestamps (SCTs). If the value is unset, the default behavior by Cosign is used. - properties: - ignoreSCT: - description: |- - IgnoreSCT defines whether to use the Signed Certificate Timestamp (SCT) log to check for a certificate - timestamp. Default is false. Set to true if this was opted out during signing. - type: boolean - pubkey: - description: PubKey, if set, is used - to validate SCTs against a custom - source. - type: string - type: object - rekor: - description: |- - Rekor provides configuration for the Rekor transparency log service. If an empty object - is provided the public instance of Rekor (https://rekor.sigstore.dev) is used. - properties: - ignoreTlog: - description: IgnoreTlog skips transparency - log verification. - type: boolean - pubkey: - description: |- - RekorPubKey is an optional PEM-encoded public key to use for a custom Rekor. - If set, this will be used to validate transparency log signatures from a custom Rekor. - type: string - url: - description: URL is the address of - the transparency log. Defaults to - the public Rekor log instance https://rekor.sigstore.dev. - type: string - required: - - url - type: object - type: object - keyless: - description: |- - Keyless is a set of attribute used to verify a Sigstore keyless attestor. - See https://github.com/sigstore/cosign/blob/main/KEYLESS.md. - properties: - additionalExtensions: - additionalProperties: - type: string - description: AdditionalExtensions are - certificate-extensions used for keyless - signing. - type: object - ctlog: - description: |- - CTLog (certificate timestamp log) provides a configuration for validation of Signed Certificate - Timestamps (SCTs). If the value is unset, the default behavior by Cosign is used. - properties: - ignoreSCT: - description: |- - IgnoreSCT defines whether to use the Signed Certificate Timestamp (SCT) log to check for a certificate - timestamp. Default is false. Set to true if this was opted out during signing. - type: boolean - pubkey: - description: PubKey, if set, is used - to validate SCTs against a custom - source. - type: string - type: object - issuer: - description: Issuer is the certificate - issuer used for keyless signing. - type: string - rekor: - description: |- - Rekor provides configuration for the Rekor transparency log service. If an empty object - is provided the public instance of Rekor (https://rekor.sigstore.dev) is used. - properties: - ignoreTlog: - description: IgnoreTlog skips transparency - log verification. - type: boolean - pubkey: - description: |- - RekorPubKey is an optional PEM-encoded public key to use for a custom Rekor. - If set, this will be used to validate transparency log signatures from a custom Rekor. - type: string - url: - description: URL is the address of - the transparency log. Defaults to - the public Rekor log instance https://rekor.sigstore.dev. - type: string - required: - - url - type: object - roots: - description: |- - Roots is an optional set of PEM encoded trusted root certificates. - If not provided, the system roots are used. - type: string - subject: - description: Subject is the verified identity - used for keyless signing, for example - the email address. - type: string - type: object - keys: - description: Keys specifies one or more public - keys. - properties: - ctlog: - description: |- - CTLog (certificate timestamp log) provides a configuration for validation of Signed Certificate - Timestamps (SCTs). If the value is unset, the default behavior by Cosign is used. - properties: - ignoreSCT: - description: |- - IgnoreSCT defines whether to use the Signed Certificate Timestamp (SCT) log to check for a certificate - timestamp. Default is false. Set to true if this was opted out during signing. - type: boolean - pubkey: - description: PubKey, if set, is used - to validate SCTs against a custom - source. - type: string - type: object - kms: - description: |- - KMS provides the URI to the public key stored in a Key Management System. See: - https://github.com/sigstore/cosign/blob/main/KMS.md - type: string - publicKeys: - description: |- - Keys is a set of X.509 public keys used to verify image signatures. The keys can be directly - specified or can be a variable reference to a key specified in a ConfigMap (see - https://kyverno.io/docs/writing-policies/variables/), or reference a standard Kubernetes Secret - elsewhere in the cluster by specifying it in the format "k8s:///". - The named Secret must specify a key `cosign.pub` containing the public key used for - verification, (see https://github.com/sigstore/cosign/blob/main/KMS.md#kubernetes-secret). - When multiple keys are specified each key is processed as a separate staticKey entry - (.attestors[*].entries.keys) within the set of attestors and the count is applied across the keys. - type: string - rekor: - description: |- - Rekor provides configuration for the Rekor transparency log service. If an empty object - is provided the public instance of Rekor (https://rekor.sigstore.dev) is used. - properties: - ignoreTlog: - description: IgnoreTlog skips transparency - log verification. - type: boolean - pubkey: - description: |- - RekorPubKey is an optional PEM-encoded public key to use for a custom Rekor. - If set, this will be used to validate transparency log signatures from a custom Rekor. - type: string - url: - description: URL is the address of - the transparency log. Defaults to - the public Rekor log instance https://rekor.sigstore.dev. - type: string - required: - - url - type: object - secret: - description: Reference to a Secret resource - that contains a public key - properties: - name: - description: Name of the secret. The - provided secret must contain a key - named cosign.pub. - type: string - namespace: - description: Namespace name where - the Secret exists. - type: string - required: - - name - - namespace - type: object - signatureAlgorithm: - default: sha256 - description: Specify signature algorithm - for public keys. Supported values are - sha224, sha256, sha384 and sha512. - type: string - type: object - repository: - description: |- - Repository is an optional alternate OCI repository to use for signatures and attestations that match this rule. - If specified Repository will override other OCI image repository locations for this Attestor. - type: string - type: object - type: array - type: object - type: array - image: - description: Deprecated. Use ImageReferences instead. - type: string - imageReferences: - description: |- - ImageReferences is a list of matching image reference patterns. At least one pattern in the - list must match the image for the rule to apply. Each image reference consists of a registry - address (defaults to docker.io), repository, image, and tag (defaults to latest). - Wildcards ('*' and '?') are allowed. See: https://kubernetes.io/docs/concepts/containers/images. - items: - type: string - type: array - imageRegistryCredentials: - description: ImageRegistryCredentials provides credentials - that will be used for authentication with registry. - properties: - allowInsecureRegistry: - description: AllowInsecureRegistry allows insecure - access to a registry. - type: boolean - providers: - description: |- - Providers specifies a list of OCI Registry names, whose authentication providers are provided. - It can be of one of these values: default,google,azure,amazon,github. - items: - description: ImageRegistryCredentialsProvidersType - provides the list of credential providers required. - enum: - - default - - amazon - - azure - - google - - github - type: string - type: array - secrets: - description: |- - Secrets specifies a list of secrets that are provided for credentials. - Secrets must live in the Kyverno namespace. - items: - type: string - type: array - type: object - issuer: - description: Deprecated. Use KeylessAttestor instead. - type: string - key: - description: Deprecated. Use StaticKeyAttestor instead. - type: string - mutateDigest: - default: true - description: |- - MutateDigest enables replacement of image tags with digests. - Defaults to true. - type: boolean - repository: - description: |- - Repository is an optional alternate OCI repository to use for image signatures and attestations that match this rule. - If specified Repository will override the default OCI image repository configured for the installation. - The repository can also be overridden per Attestor or Attestation. - type: string - required: - default: true - description: Required validates that images are verified - i.e. have matched passed a signature or attestation - check. - type: boolean - roots: - description: Deprecated. Use KeylessAttestor instead. - type: string - subject: - description: Deprecated. Use KeylessAttestor instead. - type: string - type: - description: |- - Type specifies the method of signature validation. The allowed options - are Cosign and Notary. By default Cosign is used if a type is not specified. - enum: - - Cosign - - Notary - type: string - useCache: - default: true - description: UseCache enables caching of image verify - responses for this rule. - type: boolean - verifyDigest: - default: true - description: VerifyDigest validates that images have a - digest. - type: boolean - type: object - type: array - required: - - name - type: object - type: array - schemaValidation: - description: |- - SchemaValidation skips validation checks for policies as well as patched resources. - Optional. The default value is set to "true", it must be set to "false" to disable the validation checks. - type: boolean - useServerSideApply: - description: |- - UseServerSideApply controls whether to use server-side apply for generate rules - If is set to "true" create & update for generate rules will use apply instead of create/update. - Defaults to "false" if not specified. - type: boolean - validationFailureAction: - default: Audit - description: |- - ValidationFailureAction defines if a validation policy rule violation should block - the admission review request (enforce), or allow (audit) the admission review request - and report an error in a policy report. Optional. - Allowed values are audit or enforce. The default value is "Audit". - enum: - - audit - - enforce - - Audit - - Enforce - type: string - validationFailureActionOverrides: - description: |- - ValidationFailureActionOverrides is a Cluster Policy attribute that specifies ValidationFailureAction - namespace-wise. It overrides ValidationFailureAction for the specified namespaces. - items: - properties: - action: - description: ValidationFailureAction defines the policy validation - failure action - enum: - - audit - - enforce - - Audit - - Enforce - type: string - namespaceSelector: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - items: - type: string - type: array - type: object - type: array - webhookTimeoutSeconds: - description: |- - WebhookTimeoutSeconds specifies the maximum time in seconds allowed to apply this policy. - After the configured time expires, the admission request may fail, or may simply ignore the policy results, - based on the failure policy. The default timeout is 10s, the value must be between 1 and 30 seconds. - format: int32 - type: integer - required: - - controlPlaneSelector - type: object - x-kubernetes-validations: - - message: policyName is immutable - rule: has(self.policyName) == has(oldSelf.policyName) - status: - description: SharedUpboundPolicyStatus defines the observed state of the - projected polcies. - properties: - failed: - description: list of provisioning failures. - items: - description: SharedUpboundPolicyProvisioningFailure defines policy - provisioning failure. - properties: - conditions: - description: List of conditions. - items: - description: Condition contains details for one aspect of - the current state of this API Resource. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, - Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - controlPlane: - description: ControlPlane name where the failure occurred. - type: string - required: - - controlPlane - type: object - type: array - x-kubernetes-list-map-keys: - - controlPlane - x-kubernetes-list-type: map - observedGeneration: - description: observed resource generation. - format: int64 - type: integer - provisioned: - description: List of successfully provisioned targets. - items: - description: SharedUpboundPolicyProvisioningSuccess defines policy - provisioning success. - properties: - controlPlane: - description: ControlPlane name where the external secret got - successfully projected. - type: string - required: - - controlPlane - type: object - type: array - x-kubernetes-list-map-keys: - - controlPlane - x-kubernetes-list-type: map - type: object - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.10/spaces.upbound.io_backups.yaml b/static/crds/space/v1.10/spaces.upbound.io_backups.yaml deleted file mode 100644 index 9f338e5c9..000000000 --- a/static/crds/space/v1.10/spaces.upbound.io_backups.yaml +++ /dev/null @@ -1,200 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: backups.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - spaces - kind: Backup - listKind: BackupList - plural: backups - singular: backup - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .status.phase - name: Phase - type: string - - jsonPath: .status.retries - name: Retries - type: integer - - jsonPath: .spec.ttl - name: TTL - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: Backup represents a single backup of a ControlPlane. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: BackupSpec defines a backup over a set of ControlPlanes. - properties: - configRef: - description: |- - ConfigRef is a reference to the backup configuration. - ApiGroup is optional and defaults to "spaces.upbound.io". - Kind is required, and the only supported value is "SharedBackupConfig" at - the moment. - Name is required. - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. - type: string - kind: - description: Kind is the type of resource being referenced. - minLength: 1 - type: string - name: - description: Name is the name of resource being referenced. - minLength: 1 - type: string - type: object - x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: backup config ref must be a reference to a SharedBackupConfig - rule: (!has(self.apiGroup) || self.apiGroup == 'spaces.upbound.io') - && self.kind == 'SharedBackupConfig' - - message: backup config ref must have a name - rule: size(self.name) > 0 - controlPlane: - description: |- - ControlPlane is the name of the ControlPlane to backup. - Requires "backup" permission on the referenced ControlPlane. - minLength: 1 - type: string - deletionPolicy: - default: Orphan - description: DeletionPolicy is the policy for the backup. - enum: - - Orphan - - Delete - type: string - excludedResources: - description: |- - ExcludedResources is a slice of resource names that are not - included in the backup. Used to filter the included extra resources. - items: - type: string - type: array - x-kubernetes-list-type: set - ttl: - description: |- - TTL is the time to live for the backup. After this time, the backup - will be eligible for garbage collection. If not set, the backup will - not be garbage collected. - type: string - required: - - configRef - - controlPlane - type: object - x-kubernetes-validations: - - message: backup target controlplane can not be changed after creation - rule: self.controlPlane == oldSelf.controlPlane - - message: backup excluded resources can not be changed after creation - rule: (!has(self.excludedResources) && !has(oldSelf.excludedResources)) - || self.excludedResources == oldSelf.excludedResources - - message: backup config ref can not be changed after creation - rule: self.configRef == oldSelf.configRef - status: - description: BackupStatus represents the observed state of a Backup. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - phase: - default: Pending - description: Phase is the current phase of the backup. - enum: - - Pending - - InProgress - - Failed - - Completed - - Deleted - type: string - retries: - description: Retries is the number of times the backup has been retried. - format: int32 - type: integer - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.10/spaces.upbound.io_backupschedules.yaml b/static/crds/space/v1.10/spaces.upbound.io_backupschedules.yaml deleted file mode 100644 index e3dd879ee..000000000 --- a/static/crds/space/v1.10/spaces.upbound.io_backupschedules.yaml +++ /dev/null @@ -1,213 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: backupschedules.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - spaces - kind: BackupSchedule - listKind: BackupScheduleList - plural: backupschedules - singular: backupschedule - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .spec.schedule - name: Schedule - type: string - - jsonPath: .status.lastBackup - name: LastBackup - type: date - - jsonPath: .spec.ttl - name: TTL - type: string - - jsonPath: .spec.controlPlane - name: ControlPlane - type: string - - jsonPath: .spec.suspend - name: Suspended - type: boolean - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: BackupSchedule represents a single ControlPlane schedule for - Backups. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: BackupScheduleSpec defines a backup schedule over a set of - ControlPlanes. - properties: - configRef: - description: |- - ConfigRef is a reference to the backup configuration. - ApiGroup is optional and defaults to "spaces.upbound.io". - Kind is required, and the only supported value is "SharedBackupConfig" at - the moment. - Name is required. - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. - type: string - kind: - description: Kind is the type of resource being referenced. - minLength: 1 - type: string - name: - description: Name is the name of resource being referenced. - minLength: 1 - type: string - type: object - x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: backup config ref must be a reference to a SharedBackupConfig - rule: (!has(self.apiGroup) || self.apiGroup == 'spaces.upbound.io') - && self.kind == 'SharedBackupConfig' - - message: backup config ref must have a name - rule: size(self.name) > 0 - controlPlane: - description: |- - ControlPlane is the name of the ControlPlane to which the schedule - applies. - Requires "get" permission on the referenced ControlPlane. - type: string - x-kubernetes-validations: - - message: target can not be changed after creation - rule: self == oldSelf - deletionPolicy: - default: Orphan - description: DeletionPolicy is the policy for the backup. - enum: - - Orphan - - Delete - type: string - excludedResources: - description: |- - ExcludedResources is a slice of resource names that are not - included in the backup. Used to filter the included extra resources. - items: - type: string - type: array - x-kubernetes-list-type: set - schedule: - description: Schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - minLength: 1 - type: string - suspend: - description: |- - Suspend specifies whether the schedule is suspended. If true, no - Backups will be created, but running backups will be allowed to - complete. - type: boolean - ttl: - description: |- - TTL is the time to live for the backup. After this time, the backup - will be eligible for garbage collection. If not set, the backup will - not be garbage collected. - type: string - useOwnerReferencesInBackup: - description: |- - UseOwnerReferencesBackup specifies whether an ownership chain should be - established between this resource and the Backup it creates. - If set to true, the Backup will be garbage collected when this resource - is deleted. - type: boolean - required: - - configRef - - controlPlane - - schedule - type: object - status: - description: BackupScheduleStatus represents the observed state of a BackupSchedule. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - lastBackup: - description: |- - LastBackup is the last time a Backup was run for this - Schedule schedule - format: date-time - type: string - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.10/spaces.upbound.io_controlplanes.yaml b/static/crds/space/v1.10/spaces.upbound.io_controlplanes.yaml deleted file mode 100644 index 6285bf83d..000000000 --- a/static/crds/space/v1.10/spaces.upbound.io_controlplanes.yaml +++ /dev/null @@ -1,279 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: controlplanes.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - spaces - kind: ControlPlane - listKind: ControlPlaneList - plural: controlplanes - shortNames: - - ctp - - ctps - singular: controlplane - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .spec.crossplane.version - name: Crossplane - type: string - - jsonPath: .status.conditions[?(@.type=='Ready')].status - name: Ready - type: string - - jsonPath: .status.message - name: Message - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1beta1 - schema: - openAPIV3Schema: - description: ControlPlane defines a managed Crossplane instance. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: A ControlPlaneSpec represents the desired state of the ControlPlane. - properties: - class: - default: default - description: |- - [[GATE:EnableControlPlaneClasses]] - Class specifies the class of the control plane. This affects the - control plane sizing, including component replicas and resource - requirements. There are multiple predefined classes, with "default" - being the standard Spaces control plane without any additional class - configuration. Check the Upbound Cloud documentation for a list of all - available classes. Defaults to "default". - type: string - x-kubernetes-validations: - - message: class is immutable - rule: self == oldSelf - crossplane: - description: Crossplane defines the configuration for Crossplane. - properties: - autoUpgrade: - default: - channel: Stable - description: AutoUpgrades defines the auto upgrade configuration - for Crossplane. - properties: - channel: - default: Stable - description: |- - Channel defines the upgrade channels for Crossplane. We support the following channels where 'Stable' is the - default: - - None: disables auto-upgrades and keeps the control plane at its current version of Crossplane. - - Patch: automatically upgrades the control plane to the latest supported patch version when it - becomes available while keeping the minor version the same. - - Stable: automatically upgrades the control plane to the latest supported patch release on minor - version N-1, where N is the latest supported minor version. - - Rapid: automatically upgrades the cluster to the latest supported patch release on the latest - supported minor version. - enum: - - None - - Patch - - Stable - - Rapid - type: string - type: object - state: - default: Running - description: |- - State defines the state for crossplane and provider workloads. We support - the following states where 'Running' is the default: - - Running: Starts/Scales up all crossplane and provider workloads in the ControlPlane - - Paused: Pauses/Scales down all crossplane and provider workloads in the ControlPlane - enum: - - Running - - Paused - type: string - version: - description: Version is the version of Universal Crossplane to - install. - type: string - x-kubernetes-validations: - - message: The version must not start with a leading 'v' - rule: (self.matches('^[^v].*')) - type: object - restore: - description: |- - [[GATE:EnableSharedBackup]] THIS IS AN ALPHA FIELD. Do not use it in production. - Restore specifies details about the control planes restore configuration. - properties: - finishedAt: - description: |- - FinishedAt is the time at which the control plane was restored, it's not - meant to be set by the user, but rather by the system when the control - plane is restored. - format: date-time - type: string - source: - description: |- - Source of the Backup or BackupSchedule to restore from. - Require "restore" permission on the referenced Backup or BackupSchedule. - ApiGroup is optional and defaults to "spaces.upbound.io". - Kind is required, and the only supported kinds are Backup and - BackupSchedule at the moment. - Name is required. - properties: - apiGroup: - description: APIGroup is the group for the resource being - referenced. - type: string - kind: - description: Kind is the type of resource being referenced. - minLength: 1 - type: string - name: - description: Name is the name of resource being referenced. - minLength: 1 - type: string - type: object - x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: source must be a reference to a Backup or BackupSchedule - (v1alpha1) - rule: (!has(self.apiGroup) || self.apiGroup == 'spaces.upbound.io') - && (self.kind == 'Backup' || self.kind == 'BackupSchedule') - - message: source is immutable - rule: oldSelf == self - required: - - source - type: object - x-kubernetes-validations: - - message: finishedAt is immutable once set - rule: '!has(oldSelf.finishedAt) || oldSelf.finishedAt == self.finishedAt' - writeConnectionSecretToRef: - description: |- - WriteConnectionSecretToReference specifies the namespace and name of a - Secret to which any connection details for this managed resource should - be written. Connection details frequently include the endpoint, username, - and password required to connect to the managed resource. - This field is planned to be replaced in a future release in favor of - PublishConnectionDetailsTo. Currently, both could be set independently - and connection details would be published to both without affecting - each other. - - If omitted, it is defaulted to the namespace of the ControlPlane. - Deprecated: Use Hub or Upbound identities instead. - properties: - name: - description: Name of the secret. - type: string - namespace: - description: |- - Namespace of the secret. If omitted, it is equal to - the namespace of the resource containing this reference as a field. - type: string - required: - - name - type: object - type: object - x-kubernetes-validations: - - message: '[[GATE:EnableSharedBackup]] restore source can not be unset' - rule: '!has(oldSelf.restore) || has(self.restore)' - - message: '[[GATE:EnableSharedBackup]] restore source can not be set - after creation' - rule: has(oldSelf.restore) || !has(self.restore) - - message: '"version" cannot be empty when upgrade channel is "None"' - rule: '!has(self.crossplane.autoUpgrade) || self.crossplane.autoUpgrade.channel - != "None" || self.crossplane.version != ""' - status: - description: A ControlPlaneStatus represents the observed state of a ControlPlane. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - controlPlaneID: - type: string - firstAvailableAt: - description: FirstAvailableAt is the time at which the control plane - was available for the first time. - format: date-time - type: string - message: - description: |- - Message is a human-readable message indicating details about why the - ControlPlane is in this condition. - type: string - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.10/spaces.upbound.io_incontrolplaneoverrides.yaml b/static/crds/space/v1.10/spaces.upbound.io_incontrolplaneoverrides.yaml deleted file mode 100644 index 295fc05d0..000000000 --- a/static/crds/space/v1.10/spaces.upbound.io_incontrolplaneoverrides.yaml +++ /dev/null @@ -1,256 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: incontrolplaneoverrides.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - spaces - kind: InControlPlaneOverride - listKind: InControlPlaneOverrideList - plural: incontrolplaneoverrides - singular: incontrolplaneoverride - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .status.conditions[?(@.type=='Synced')].status - name: SYNCED - type: string - - jsonPath: .status.conditions[?(@.type=='Ready')].status - name: READY - type: string - name: v1alpha1 - schema: - openAPIV3Schema: - description: |- - InControlPlaneOverride represents resource configuration overrides in - a ControlPlane. The specified override can be applied on single objects - as well as claim/XR object hierarchies. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: |- - InControlPlaneOverrideSpec defines a configuration override - on a target object hierarchy in a target ControlPlane with the - given name. - properties: - controlPlaneName: - description: |- - ControlPlaneName is the name of the target ControlPlane where - the resource configuration overrides will be applied. - minLength: 1 - type: string - x-kubernetes-validations: - - message: controlPlaneName is immutable - rule: self == oldSelf - deletionPolicy: - default: RollBack - description: |- - DeletionPolicy specifies whether when the InControlPlaneOverride object - is deleted, the configuration override should be kept (Keep) or - rolled back (RollBack). - enum: - - RollBack - - Keep - type: string - override: - description: |- - Override denotes the configuration override to be applied on the target - object hierarchy. The fully specified intent is obtained by serializing - the Override. - properties: - metadata: - description: Metadata specifies the patch metadata. - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations represents the Kube object annotations. - Only the following annotations are allowed to be patched: - - crossplane.io/paused - - spaces.upbound.io/force-reconcile-at - type: object - x-kubernetes-validations: - - message: Only the crossplane.io/paused and spaces.upbound.io/force-reconcile-at - annotations are allowed - rule: self.all(k, k == 'crossplane.io/paused' || k == 'spaces.upbound.io/force-reconcile-at') - type: object - type: object - propagationPolicy: - default: None - description: |- - PropagationPolicy specifies whether the configuration override will be - applied only to the object referenced in TargetRef (None), after an - ascending or descending hierarchy traversal will be done starting with - the target object. - enum: - - None - - Ascending - - Descending - type: string - targetRef: - description: |- - TargetRef is the object reference to a Kubernetes API object where the - configuration override will start. The controller will traverse the - target object's hierarchy depending on the PropagationPolicy. If - PropagationPolicy is None, then only the target object will be updated. - properties: - apiVersion: - description: APIVersion of the referenced object. - minLength: 1 - type: string - kind: - description: Kind of the referenced object. - minLength: 1 - type: string - name: - description: Name of the referenced object. - minLength: 1 - type: string - namespace: - description: Namespace of the referenced object. - type: string - required: - - apiVersion - - kind - - name - type: object - required: - - controlPlaneName - - override - - targetRef - type: object - status: - description: |- - InControlPlaneOverrideStatus defines the status of an InControlPlaneOverride - object. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - objectRefs: - items: - description: |- - PatchedObjectStatus represents the state of an applied patch to an object - in the target hierarchy. - properties: - apiVersion: - description: APIVersion of the referenced object. - minLength: 1 - type: string - kind: - description: Kind of the referenced object. - minLength: 1 - type: string - message: - description: Message holds an optional detail message detailing - the observed state. - type: string - name: - description: Name of the referenced object. - minLength: 1 - type: string - namespace: - description: Namespace of the referenced object. - type: string - reason: - description: Reason is the reason for the target objects override - Status. - type: string - status: - description: Status of the configuration override. - enum: - - Success - - Skipped - - Error - type: string - uid: - description: Metadata UID of the patch target object. - type: string - required: - - apiVersion - - kind - - name - - reason - - status - type: object - type: array - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.10/spaces.upbound.io_sharedbackupconfigs.yaml b/static/crds/space/v1.10/spaces.upbound.io_sharedbackupconfigs.yaml deleted file mode 100644 index d716be334..000000000 --- a/static/crds/space/v1.10/spaces.upbound.io_sharedbackupconfigs.yaml +++ /dev/null @@ -1,143 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: sharedbackupconfigs.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - spaces - kind: SharedBackupConfig - listKind: SharedBackupConfigList - plural: sharedbackupconfigs - singular: sharedbackupconfig - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .spec.objectStorage.provider - name: Provider - type: string - - jsonPath: .spec.objectStorage.bucket - name: Bucket - type: string - - jsonPath: .spec.objectStorage.credentials.source - name: Auth - type: string - - jsonPath: .spec.objectStorage.credentials.secretRef.name - name: Secret - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: SharedBackupConfig defines the configuration to backup and restore - ControlPlanes. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: |- - A SharedBackupConfigSpec represents the configuration to backup or restore - ControlPlanes. - properties: - objectStorage: - description: ObjectStorage specifies the object storage configuration - for the given provider. - properties: - bucket: - description: Bucket is the name of the bucket to store backups - in. - minLength: 1 - type: string - config: - description: |- - Config is a free-form map of configuration options for the object storage provider. - See https://github.com/thanos-io/objstore?tab=readme-ov-file for more - information on the formats for each supported cloud provider. Bucket and - Provider will override the required values in the config. - type: object - x-kubernetes-preserve-unknown-fields: true - credentials: - description: Credentials specifies the credentials to access the - object storage. - properties: - secretRef: - description: |- - A SecretRef is a reference to a secret key that contains the credentials - that must be used to connect to the provider. - properties: - key: - default: credentials - description: The key to select. - type: string - name: - description: Name of the secret. - type: string - required: - - key - - name - type: object - source: - description: |- - Source of the credentials. - Source "Secret" requires "get" permissions on the referenced Secret. - enum: - - Secret - - InjectedIdentity - type: string - required: - - source - type: object - prefix: - description: |- - Prefix is the prefix to use for all backups using this - SharedBackupConfig, e.g. "prod/cluster1", resulting in backups for - controlplane "ctp1" in namespace "ns1" being stored in - "prod/cluster1/ns1/ctp1". - type: string - provider: - description: Provider is the name of the object storage provider. - enum: - - AWS - - Azure - - GCP - type: string - required: - - bucket - - credentials - - provider - type: object - x-kubernetes-validations: - - message: credentials.secretRef.name must be set when source is Secret - rule: self.credentials.source != 'Secret' || (has(self.credentials.secretRef) - && has(self.credentials.secretRef.name)) - required: - - objectStorage - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.10/spaces.upbound.io_sharedbackups.yaml b/static/crds/space/v1.10/spaces.upbound.io_sharedbackups.yaml deleted file mode 100644 index ffa7b41c5..000000000 --- a/static/crds/space/v1.10/spaces.upbound.io_sharedbackups.yaml +++ /dev/null @@ -1,291 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: sharedbackups.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - spaces - kind: SharedBackup - listKind: SharedBackupList - plural: sharedbackups - singular: sharedbackup - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.annotations.sharedbackup\.internal\.spaces\.upbound\.io/completed - name: Completed - type: string - - jsonPath: .metadata.annotations.sharedbackup\.internal\.spaces\.upbound\.io/failed - name: Failed - type: string - - jsonPath: .metadata.annotations.sharedbackup\.internal\.spaces\.upbound\.io/provisioned - name: Provisioned - type: string - - jsonPath: .status.phase - name: Phase - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: SharedBackup defines a backup over a set of ControlPlanes. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: SharedBackupSpec defines a backup over a set of ControlPlanes. - properties: - configRef: - description: |- - ConfigRef is a reference to the backup configuration. - ApiGroup is optional and defaults to "spaces.upbound.io". - Kind is required, and the only supported value is "SharedBackupConfig" at - the moment. - Name is required. - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. - type: string - kind: - description: Kind is the type of resource being referenced. - minLength: 1 - type: string - name: - description: Name is the name of resource being referenced. - minLength: 1 - type: string - type: object - x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: backup config ref must be a reference to a SharedBackupConfig - rule: (!has(self.apiGroup) || self.apiGroup == 'spaces.upbound.io') - && self.kind == 'SharedBackupConfig' - - message: backup config ref must have a name - rule: size(self.name) > 0 - controlPlaneSelector: - description: |- - ControlPlaneSelector defines the selector for ControlPlanes to backup. - Requires "backup" permission on all ControlPlanes in the same namespace. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: either names or a labelSelector must be specified - rule: (has(self.labelSelectors) || has(self.names)) && (size(self.labelSelectors) - > 0 || size(self.names) > 0) - deletionPolicy: - default: Orphan - description: DeletionPolicy is the policy for the backup. - enum: - - Orphan - - Delete - type: string - excludedResources: - description: |- - ExcludedResources is a slice of resource names that are not - included in the backup. Used to filter the included extra resources. - items: - type: string - type: array - x-kubernetes-list-type: set - ttl: - description: |- - TTL is the time to live for the backup. After this time, the backup - will be eligible for garbage collection. If not set, the backup will - not be garbage collected. - type: string - useOwnerReferencesInBackup: - description: |- - UseOwnerReferencesBackup specifies whether an ownership chain should be - established between this resource and the Backup it creates. - If set to true, the Backup will be garbage collected when this resource - is deleted. - type: boolean - required: - - configRef - - controlPlaneSelector - type: object - x-kubernetes-validations: - - message: shared backup ControlPlane selectors can not be changed after - creation - rule: self.controlPlaneSelector == oldSelf.controlPlaneSelector - - message: shared backup excluded resources can not be changed after creation - rule: (!has(self.excludedResources) && !has(oldSelf.excludedResources)) - || self.excludedResources == oldSelf.excludedResources - status: - description: SharedBackupStatus represents the observed state of a SharedBackup. - properties: - completed: - description: Completed is the list of ControlPlanes for which the - backup completed successfully. - items: - type: string - type: array - x-kubernetes-list-type: set - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - failed: - description: Failed is the list of ControlPlanes for which the backup - failed. - items: - type: string - type: array - x-kubernetes-list-type: set - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - phase: - default: Pending - description: Phase represents the current phase of the SharedBackup. - enum: - - Pending - - InProgress - - Failed - - Completed - type: string - selectedControlPlanes: - description: SelectedControlPlanes represents the names of the selected - ControlPlanes. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.10/spaces.upbound.io_sharedbackupschedules.yaml b/static/crds/space/v1.10/spaces.upbound.io_sharedbackupschedules.yaml deleted file mode 100644 index 1c173c0a8..000000000 --- a/static/crds/space/v1.10/spaces.upbound.io_sharedbackupschedules.yaml +++ /dev/null @@ -1,273 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: sharedbackupschedules.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - spaces - kind: SharedBackupSchedule - listKind: SharedBackupScheduleList - plural: sharedbackupschedules - singular: sharedbackupschedule - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .spec.schedule - name: Schedule - type: string - - jsonPath: .spec.suspend - name: Suspended - type: boolean - - jsonPath: .metadata.annotations.sharedbackupschedule\.internal\.spaces\.upbound\.io/provisioned-total - name: Provisioned - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: |- - SharedBackupSchedule defines a schedule for SharedBackup on a set of - ControlPlanes. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: SharedBackupScheduleSpec defines the desired state of a SharedBackupSchedule. - properties: - configRef: - description: |- - ConfigRef is a reference to the backup configuration. - ApiGroup is optional and defaults to "spaces.upbound.io". - Kind is required, and the only supported value is "SharedBackupConfig" at - the moment. - Name is required. - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. - type: string - kind: - description: Kind is the type of resource being referenced. - minLength: 1 - type: string - name: - description: Name is the name of resource being referenced. - minLength: 1 - type: string - type: object - x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: backup config ref must be a reference to a SharedBackupConfig - rule: (!has(self.apiGroup) || self.apiGroup == 'spaces.upbound.io') - && self.kind == 'SharedBackupConfig' - - message: backup config ref must have a name - rule: size(self.name) > 0 - controlPlaneSelector: - description: |- - ControlPlaneSelector defines the selector for ControlPlanes to backup. - Requires "backup" permission on all ControlPlanes in the same namespace. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: either names or a labelSelector must be specified - rule: (has(self.labelSelectors) || has(self.names)) && (size(self.labelSelectors) - > 0 || size(self.names) > 0) - deletionPolicy: - default: Orphan - description: DeletionPolicy is the policy for the backup. - enum: - - Orphan - - Delete - type: string - excludedResources: - description: |- - ExcludedResources is a slice of resource names that are not - included in the backup. Used to filter the included extra resources. - items: - type: string - type: array - x-kubernetes-list-type: set - schedule: - description: Schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - minLength: 1 - type: string - suspend: - description: |- - Suspend specifies whether the schedule is suspended. If true, no - Backups will be created, but running backups will be allowed to - complete. - type: boolean - ttl: - description: |- - TTL is the time to live for the backup. After this time, the backup - will be eligible for garbage collection. If not set, the backup will - not be garbage collected. - type: string - useOwnerReferencesInBackup: - description: |- - UseOwnerReferencesBackup specifies whether an ownership chain should be - established between this resource and the Backup it creates. - If set to true, the Backup will be garbage collected when this resource - is deleted. - type: boolean - required: - - configRef - - controlPlaneSelector - - schedule - type: object - status: - description: SharedBackupScheduleStatus represents the observed state - of a SharedBackupSchedule. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - selectedControlPlanes: - description: |- - SelectedControlPlanes is the list of ControlPlanes that are selected - for backup. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.10/spaces.upbound.io_sharedexternalsecrets.yaml b/static/crds/space/v1.10/spaces.upbound.io_sharedexternalsecrets.yaml deleted file mode 100644 index 00c2dd3ab..000000000 --- a/static/crds/space/v1.10/spaces.upbound.io_sharedexternalsecrets.yaml +++ /dev/null @@ -1,745 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: sharedexternalsecrets.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - externalsecrets - kind: SharedExternalSecret - listKind: SharedExternalSecretList - plural: sharedexternalsecrets - shortNames: - - ses - singular: sharedexternalsecret - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.annotations.sharedexternalsecrets\.internal\.spaces\.upbound\.io/provisioned-total - name: Provisioned - type: string - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: |- - SharedExternalSecret specifies a shared ExternalSecret projected into the specified - ControlPlanes of the same namespace as ClusterExternalSecret and with that - propagated into the specified namespaces. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: SharedExternalSecretSpec defines the desired state of SharedExternalSecret. - properties: - controlPlaneSelector: - description: |- - The secret is projected only to control planes - matching the provided selector. Either names or a labelSelector must be specified. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: either names or a labelSelector must be specified - rule: (has(self.labelSelectors) || has(self.names)) && (size(self.labelSelectors) - > 0 || size(self.names) > 0) - externalSecretMetadata: - description: The metadata of the secret store to be created. - properties: - annotations: - additionalProperties: - type: string - description: Annotations that are set on projected resource. - type: object - labels: - additionalProperties: - type: string - description: Labels that are set on projected resource. - type: object - type: object - externalSecretName: - description: |- - ExternalSecretName is the name to use when creating external secret within a control plane. - optional, if not set, SharedExternalSecret name will be used. - When set, it is immutable. - maxLength: 253 - minLength: 1 - type: string - x-kubernetes-validations: - - message: externalSecretName is immutable - rule: self == oldSelf - externalSecretSpec: - description: The spec for the ExternalSecrets to be created. - properties: - data: - description: Data defines the connection between the Kubernetes - Secret keys and the Provider data - items: - description: ExternalSecretData defines the connection between - the Kubernetes Secret key (spec.data.) and the Provider - data. - properties: - remoteRef: - description: |- - RemoteRef points to the remote secret and defines - which secret (version/property/..) to fetch. - properties: - conversionStrategy: - default: Default - description: Used to define a conversion Strategy - enum: - - Default - - Unicode - type: string - decodingStrategy: - default: None - description: Used to define a decoding Strategy - enum: - - Auto - - Base64 - - Base64URL - - None - type: string - key: - description: Key is the key used in the Provider, mandatory - type: string - metadataPolicy: - default: None - description: Policy for fetching tags/labels from provider - secrets, possible options are Fetch, None. Defaults - to None - enum: - - None - - Fetch - type: string - property: - description: Used to select a specific property of the - Provider value (if a map), if supported - type: string - version: - description: Used to select a specific version of the - Provider value, if supported - type: string - required: - - key - type: object - secretKey: - description: |- - SecretKey defines the key in which the controller stores - the value. This is the key in the Kind=Secret - type: string - sourceRef: - description: |- - SourceRef allows you to override the source - from which the value will pulled from. - maxProperties: 1 - properties: - generatorRef: - description: |- - GeneratorRef points to a generator custom resource. - - Deprecated: The generatorRef is not implemented in .data[]. - this will be removed with v1. - properties: - apiVersion: - default: generators.external-secrets.io/v1alpha1 - description: Specify the apiVersion of the generator - resource - type: string - kind: - description: Specify the Kind of the resource, e.g. - Password, ACRAccessToken etc. - type: string - name: - description: Specify the name of the generator resource - type: string - required: - - kind - - name - type: object - storeRef: - description: SecretStoreRef defines which SecretStore - to fetch the ExternalSecret data. - properties: - kind: - description: |- - Kind of the SecretStore resource (SecretStore or ClusterSecretStore) - Defaults to `SecretStore` - type: string - name: - description: Name of the SecretStore resource - type: string - required: - - name - type: object - type: object - required: - - remoteRef - - secretKey - type: object - type: array - dataFrom: - description: |- - DataFrom is used to fetch all properties from a specific Provider data - If multiple entries are specified, the Secret keys are merged in the specified order - items: - properties: - extract: - description: |- - Used to extract multiple key/value pairs from one secret - Note: Extract does not support sourceRef.Generator or sourceRef.GeneratorRef. - properties: - conversionStrategy: - default: Default - description: Used to define a conversion Strategy - enum: - - Default - - Unicode - type: string - decodingStrategy: - default: None - description: Used to define a decoding Strategy - enum: - - Auto - - Base64 - - Base64URL - - None - type: string - key: - description: Key is the key used in the Provider, mandatory - type: string - metadataPolicy: - default: None - description: Policy for fetching tags/labels from provider - secrets, possible options are Fetch, None. Defaults - to None - enum: - - None - - Fetch - type: string - property: - description: Used to select a specific property of the - Provider value (if a map), if supported - type: string - version: - description: Used to select a specific version of the - Provider value, if supported - type: string - required: - - key - type: object - find: - description: |- - Used to find secrets based on tags or regular expressions - Note: Find does not support sourceRef.Generator or sourceRef.GeneratorRef. - properties: - conversionStrategy: - default: Default - description: Used to define a conversion Strategy - enum: - - Default - - Unicode - type: string - decodingStrategy: - default: None - description: Used to define a decoding Strategy - enum: - - Auto - - Base64 - - Base64URL - - None - type: string - name: - description: Finds secrets based on the name. - properties: - regexp: - description: Finds secrets base - type: string - type: object - path: - description: A root path to start the find operations. - type: string - tags: - additionalProperties: - type: string - description: Find secrets based on tags. - type: object - type: object - rewrite: - description: |- - Used to rewrite secret Keys after getting them from the secret Provider - Multiple Rewrite operations can be provided. They are applied in a layered order (first to last) - items: - properties: - regexp: - description: |- - Used to rewrite with regular expressions. - The resulting key will be the output of a regexp.ReplaceAll operation. - properties: - source: - description: Used to define the regular expression - of a re.Compiler. - type: string - target: - description: Used to define the target pattern - of a ReplaceAll operation. - type: string - required: - - source - - target - type: object - transform: - description: |- - Used to apply string transformation on the secrets. - The resulting key will be the output of the template applied by the operation. - properties: - template: - description: |- - Used to define the template to apply on the secret name. - `.value ` will specify the secret name in the template. - type: string - required: - - template - type: object - type: object - type: array - sourceRef: - description: |- - SourceRef points to a store or generator - which contains secret values ready to use. - Use this in combination with Extract or Find pull values out of - a specific SecretStore. - When sourceRef points to a generator Extract or Find is not supported. - The generator returns a static map of values - maxProperties: 1 - properties: - generatorRef: - description: GeneratorRef points to a generator custom - resource. - properties: - apiVersion: - default: generators.external-secrets.io/v1alpha1 - description: Specify the apiVersion of the generator - resource - type: string - kind: - description: Specify the Kind of the resource, e.g. - Password, ACRAccessToken etc. - type: string - name: - description: Specify the name of the generator resource - type: string - required: - - kind - - name - type: object - storeRef: - description: SecretStoreRef defines which SecretStore - to fetch the ExternalSecret data. - properties: - kind: - description: |- - Kind of the SecretStore resource (SecretStore or ClusterSecretStore) - Defaults to `SecretStore` - type: string - name: - description: Name of the SecretStore resource - type: string - required: - - name - type: object - type: object - type: object - type: array - refreshInterval: - default: 1h - description: |- - RefreshInterval is the amount of time before the values are read again from the SecretStore provider - Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h" - May be set to zero to fetch and create it once. Defaults to 1h. - type: string - secretStoreRef: - description: SecretStoreRef defines which SecretStore to fetch - the ExternalSecret data. - properties: - kind: - description: |- - Kind of the SecretStore resource (SecretStore or ClusterSecretStore) - Defaults to `SecretStore` - type: string - name: - description: Name of the SecretStore resource - type: string - required: - - name - type: object - target: - default: - creationPolicy: Owner - deletionPolicy: Retain - description: |- - ExternalSecretTarget defines the Kubernetes Secret to be created - There can be only one target per ExternalSecret. - properties: - creationPolicy: - default: Owner - description: |- - CreationPolicy defines rules on how to create the resulting Secret - Defaults to 'Owner' - enum: - - Owner - - Orphan - - Merge - - None - type: string - deletionPolicy: - default: Retain - description: |- - DeletionPolicy defines rules on how to delete the resulting Secret - Defaults to 'Retain' - enum: - - Delete - - Merge - - Retain - type: string - immutable: - description: Immutable defines if the final secret will be - immutable - type: boolean - name: - description: |- - Name defines the name of the Secret resource to be managed - This field is immutable - Defaults to the .metadata.name of the ExternalSecret resource - type: string - template: - description: Template defines a blueprint for the created - Secret resource. - properties: - data: - additionalProperties: - type: string - type: object - engineVersion: - default: v2 - description: |- - EngineVersion specifies the template engine version - that should be used to compile/execute the - template specified in .data and .templateFrom[]. - enum: - - v1 - - v2 - type: string - mergePolicy: - default: Replace - enum: - - Replace - - Merge - type: string - metadata: - description: ExternalSecretTemplateMetadata defines metadata - fields for the Secret blueprint. - properties: - annotations: - additionalProperties: - type: string - type: object - labels: - additionalProperties: - type: string - type: object - type: object - templateFrom: - items: - properties: - configMap: - properties: - items: - items: - properties: - key: - type: string - templateAs: - default: Values - enum: - - Values - - KeysAndValues - type: string - required: - - key - type: object - type: array - name: - type: string - required: - - items - - name - type: object - literal: - type: string - secret: - properties: - items: - items: - properties: - key: - type: string - templateAs: - default: Values - enum: - - Values - - KeysAndValues - type: string - required: - - key - type: object - type: array - name: - type: string - required: - - items - - name - type: object - target: - default: Data - enum: - - Data - - Annotations - - Labels - type: string - type: object - type: array - type: - type: string - type: object - type: object - type: object - namespaceSelector: - description: |- - The projected secret can be consumed - only within namespaces matching the provided selector. - Either names or a labelSelector must be specified. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: either names or a labelSelector must be specified - rule: (has(self.labelSelectors) || has(self.names)) && (size(self.labelSelectors) - > 0 || size(self.names) > 0) - refreshTime: - description: Used to configure secret refresh interval in seconds. - type: string - required: - - controlPlaneSelector - - externalSecretSpec - - namespaceSelector - type: object - x-kubernetes-validations: - - message: externalSecretName is immutable - rule: has(self.externalSecretName) == has(oldSelf.externalSecretName) - status: - description: SharedExternalSecretStatus defines the observed state of - the ExternalSecret. - properties: - failed: - description: list of provisioning failures. - items: - description: |- - SharedExternalSecretProvisioningFailure describes a external secret provisioning - failure in a specific control plane. - properties: - conditions: - description: List of conditions. - items: - properties: - message: - type: string - status: - type: string - type: - type: string - required: - - status - - type - type: object - type: array - controlPlane: - description: ControlPlane name where the failure occurred. - type: string - required: - - controlPlane - type: object - type: array - x-kubernetes-list-map-keys: - - controlPlane - x-kubernetes-list-type: map - observedGeneration: - description: observed resource generation. - format: int64 - type: integer - provisioned: - description: List of successfully provisioned targets. - items: - description: SharedExternalSecretProvisioningSuccess defines external - secret provisioning success. - properties: - controlPlane: - description: ControlPlane name where the external secret got - successfully projected. - type: string - required: - - controlPlane - type: object - type: array - x-kubernetes-list-map-keys: - - controlPlane - x-kubernetes-list-type: map - type: object - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.10/spaces.upbound.io_sharedsecretstores.yaml b/static/crds/space/v1.10/spaces.upbound.io_sharedsecretstores.yaml deleted file mode 100644 index 499a2208f..000000000 --- a/static/crds/space/v1.10/spaces.upbound.io_sharedsecretstores.yaml +++ /dev/null @@ -1,2702 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: sharedsecretstores.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - externalsecrets - kind: SharedSecretStore - listKind: SharedSecretStoreList - plural: sharedsecretstores - shortNames: - - sss - singular: sharedsecretstore - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.annotations.sharedsecretstores\.internal\.spaces\.upbound\.io/provisioned-total - name: Provisioned - type: string - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: |- - SharedSecretStore represents a shared SecretStore projected as ClusterSecretStore - into matching ControlPlanes in the same namespace. Once projected into a ControlPlane, - it can be referenced from ExternalSecret instances, as part of `storeRef` fields. - The secret store configuration including referenced credential are not leaked into the - ControlPlanes and in that sense can be called secure as they are invisible to the - ControlPlane workloads. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: SharedSecretStoreSpec defines the desired state of SecretStore. - properties: - controlPlaneSelector: - description: |- - The store is projected only to control planes - matching the provided selector. Either names or a labelSelector must be specified. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: either names or a labelSelector must be specified - rule: (has(self.labelSelectors) || has(self.names)) && (size(self.labelSelectors) - > 0 || size(self.names) > 0) - namespaceSelector: - description: |- - The projected secret store can be consumed - only within namespaces matching the provided selector. - Either names or a labelSelector must be specified. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: either names or a labelSelector must be specified - rule: (has(self.labelSelectors) || has(self.names)) && (size(self.labelSelectors) - > 0 || size(self.names) > 0) - provider: - description: Used to configure the provider. Only one provider may - be set. - maxProperties: 1 - minProperties: 1 - properties: - akeyless: - description: Akeyless configures this store to sync secrets using - Akeyless Vault provider - properties: - akeylessGWApiURL: - description: Akeyless GW API Url from which the secrets to - be fetched from. - type: string - authSecretRef: - description: Auth configures how the operator authenticates - with Akeyless. - properties: - kubernetesAuth: - description: |- - Kubernetes authenticates with Akeyless by passing the ServiceAccount - token stored in the named Secret resource. - properties: - accessID: - description: the Akeyless Kubernetes auth-method access-id - type: string - k8sConfName: - description: Kubernetes-auth configuration name in - Akeyless-Gateway - type: string - secretRef: - description: |- - Optional secret field containing a Kubernetes ServiceAccount JWT used - for authenticating with Akeyless. If a name is specified without a key, - `token` is the default. If one is not specified, the one bound to - the controller will be used. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - serviceAccountRef: - description: |- - Optional service account field containing the name of a kubernetes ServiceAccount. - If the service account is specified, the service account secret token JWT will be used - for authenticating with Akeyless. If the service account selector is not supplied, - the secretRef will be used instead. - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount resource - being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - required: - - accessID - - k8sConfName - type: object - secretRef: - description: |- - Reference to a Secret that contains the details - to authenticate with Akeyless. - properties: - accessID: - description: The SecretAccessID is used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - accessType: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - accessTypeParam: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - type: object - caBundle: - description: |- - PEM/base64 encoded CA bundle used to validate Akeyless Gateway certificate. Only used - if the AkeylessGWApiURL URL is using HTTPS protocol. If not set the system root certificates - are used to validate the TLS connection. - format: byte - type: string - caProvider: - description: The provider for the CA bundle to use to validate - Akeyless Gateway certificate. - properties: - key: - description: The key where the CA certificate can be found - in the Secret or ConfigMap. - type: string - name: - description: The name of the object located at the provider - type. - type: string - namespace: - description: |- - The namespace the Provider type is in. - Can only be defined when used in a ClusterSecretStore. - type: string - type: - description: The type of provider to use such as "Secret", - or "ConfigMap". - enum: - - Secret - - ConfigMap - type: string - required: - - name - - type - type: object - required: - - akeylessGWApiURL - - authSecretRef - type: object - alibaba: - description: Alibaba configures this store to sync secrets using - Alibaba Cloud provider - properties: - auth: - description: AlibabaAuth contains a secretRef for credentials. - properties: - rrsa: - description: Authenticate against Alibaba using RRSA. - properties: - oidcProviderArn: - type: string - oidcTokenFilePath: - type: string - roleArn: - type: string - sessionName: - type: string - required: - - oidcProviderArn - - oidcTokenFilePath - - roleArn - - sessionName - type: object - secretRef: - description: AlibabaAuthSecretRef holds secret references - for Alibaba credentials. - properties: - accessKeyIDSecretRef: - description: The AccessKeyID is used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - accessKeySecretSecretRef: - description: The AccessKeySecret is used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - required: - - accessKeyIDSecretRef - - accessKeySecretSecretRef - type: object - type: object - regionID: - description: Alibaba Region to be used for the provider - type: string - required: - - auth - - regionID - type: object - aws: - description: AWS configures this store to sync secrets using AWS - Secret Manager provider - properties: - additionalRoles: - description: AdditionalRoles is a chained list of Role ARNs - which the provider will sequentially assume before assuming - the Role - items: - type: string - type: array - auth: - description: |- - Auth defines the information necessary to authenticate against AWS - if not set aws sdk will infer credentials from your environment - see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials - properties: - jwt: - description: Authenticate against AWS using service account - tokens. - properties: - serviceAccountRef: - description: A reference to a ServiceAccount resource. - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount resource - being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - type: object - secretRef: - description: |- - AWSAuthSecretRef holds secret references for AWS credentials - both AccessKeyID and SecretAccessKey must be defined in order to properly authenticate. - properties: - accessKeyIDSecretRef: - description: The AccessKeyID is used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - secretAccessKeySecretRef: - description: The SecretAccessKey is used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - sessionTokenSecretRef: - description: |- - The SessionToken used for authentication - This must be defined if AccessKeyID and SecretAccessKey are temporary credentials - see: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - type: object - externalID: - description: AWS External ID set on assumed IAM roles - type: string - region: - description: AWS Region to be used for the provider - type: string - role: - description: Role is a Role ARN which the provider will assume - type: string - secretsManager: - description: SecretsManager defines how the provider behaves - when interacting with AWS SecretsManager - properties: - forceDeleteWithoutRecovery: - description: |- - Specifies whether to delete the secret without any recovery window. You - can't use both this parameter and RecoveryWindowInDays in the same call. - If you don't use either, then by default Secrets Manager uses a 30 day - recovery window. - see: https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_DeleteSecret.html#SecretsManager-DeleteSecret-request-ForceDeleteWithoutRecovery - type: boolean - recoveryWindowInDays: - description: |- - The number of days from 7 to 30 that Secrets Manager waits before - permanently deleting the secret. You can't use both this parameter and - ForceDeleteWithoutRecovery in the same call. If you don't use either, - then by default Secrets Manager uses a 30 day recovery window. - see: https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_DeleteSecret.html#SecretsManager-DeleteSecret-request-RecoveryWindowInDays - format: int64 - type: integer - type: object - service: - description: Service defines which service should be used - to fetch the secrets - enum: - - SecretsManager - - ParameterStore - type: string - sessionTags: - description: AWS STS assume role session tags - items: - properties: - key: - type: string - value: - type: string - required: - - key - - value - type: object - type: array - transitiveTagKeys: - description: AWS STS assume role transitive session tags. - Required when multiple rules are used with the provider - items: - type: string - type: array - required: - - region - - service - type: object - azurekv: - description: AzureKV configures this store to sync secrets using - Azure Key Vault provider - properties: - authSecretRef: - description: Auth configures how the operator authenticates - with Azure. Required for ServicePrincipal auth type. - properties: - clientId: - description: The Azure clientId of the service principle - used for authentication. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - clientSecret: - description: The Azure ClientSecret of the service principle - used for authentication. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - authType: - default: ServicePrincipal - description: |- - Auth type defines how to authenticate to the keyvault service. - Valid values are: - - "ServicePrincipal" (default): Using a service principal (tenantId, clientId, clientSecret) - - "ManagedIdentity": Using Managed Identity assigned to the pod (see aad-pod-identity) - enum: - - ServicePrincipal - - ManagedIdentity - - WorkloadIdentity - type: string - environmentType: - default: PublicCloud - description: |- - EnvironmentType specifies the Azure cloud environment endpoints to use for - connecting and authenticating with Azure. By default it points to the public cloud AAD endpoint. - The following endpoints are available, also see here: https://github.com/Azure/go-autorest/blob/main/autorest/azure/environments.go#L152 - PublicCloud, USGovernmentCloud, ChinaCloud, GermanCloud - enum: - - PublicCloud - - USGovernmentCloud - - ChinaCloud - - GermanCloud - type: string - identityId: - description: If multiple Managed Identity is assigned to the - pod, you can select the one to be used - type: string - serviceAccountRef: - description: |- - ServiceAccountRef specified the service account - that should be used when authenticating with WorkloadIdentity. - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - tenantId: - description: TenantID configures the Azure Tenant to send - requests to. Required for ServicePrincipal auth type. - type: string - vaultUrl: - description: Vault Url from which the secrets to be fetched - from. - type: string - required: - - vaultUrl - type: object - conjur: - description: Conjur configures this store to sync secrets using - conjur provider - properties: - auth: - properties: - apikey: - properties: - account: - type: string - apiKeyRef: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - userRef: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - required: - - account - - apiKeyRef - - userRef - type: object - jwt: - properties: - account: - type: string - secretRef: - description: |- - Optional SecretRef that refers to a key in a Secret resource containing JWT token to - authenticate with Conjur using the JWT authentication method. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - serviceAccountRef: - description: |- - Optional ServiceAccountRef specifies the Kubernetes service account for which to request - a token for with the `TokenRequest` API. - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount resource - being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - serviceID: - description: The conjur authn jwt webservice id - type: string - required: - - account - - serviceID - type: object - type: object - caBundle: - type: string - caProvider: - description: |- - Used to provide custom certificate authority (CA) certificates - for a secret store. The CAProvider points to a Secret or ConfigMap resource - that contains a PEM-encoded certificate. - properties: - key: - description: The key where the CA certificate can be found - in the Secret or ConfigMap. - type: string - name: - description: The name of the object located at the provider - type. - type: string - namespace: - description: |- - The namespace the Provider type is in. - Can only be defined when used in a ClusterSecretStore. - type: string - type: - description: The type of provider to use such as "Secret", - or "ConfigMap". - enum: - - Secret - - ConfigMap - type: string - required: - - name - - type - type: object - url: - type: string - required: - - auth - - url - type: object - delinea: - description: |- - Delinea DevOps Secrets Vault - https://docs.delinea.com/online-help/products/devops-secrets-vault/current - properties: - clientId: - description: ClientID is the non-secret part of the credential. - properties: - secretRef: - description: SecretRef references a key in a secret that - will be used as value. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - value: - description: Value can be specified directly to set a - value without using a secret. - type: string - type: object - clientSecret: - description: ClientSecret is the secret part of the credential. - properties: - secretRef: - description: SecretRef references a key in a secret that - will be used as value. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - value: - description: Value can be specified directly to set a - value without using a secret. - type: string - type: object - tenant: - description: Tenant is the chosen hostname / site name. - type: string - tld: - description: |- - TLD is based on the server location that was chosen during provisioning. - If unset, defaults to "com". - type: string - urlTemplate: - description: |- - URLTemplate - If unset, defaults to "https://%s.secretsvaultcloud.%s/v1/%s%s". - type: string - required: - - clientId - - clientSecret - - tenant - type: object - doppler: - description: Doppler configures this store to sync secrets using - the Doppler provider - properties: - auth: - description: Auth configures how the Operator authenticates - with the Doppler API - properties: - secretRef: - properties: - dopplerToken: - description: |- - The DopplerToken is used for authentication. - See https://docs.doppler.com/reference/api#authentication for auth token types. - The Key attribute defaults to dopplerToken if not specified. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - required: - - dopplerToken - type: object - required: - - secretRef - type: object - config: - description: Doppler config (required if not using a Service - Token) - type: string - format: - description: Format enables the downloading of secrets as - a file (string) - enum: - - json - - dotnet-json - - env - - yaml - - docker - type: string - nameTransformer: - description: Environment variable compatible name transforms - that change secret names to a different format - enum: - - upper-camel - - camel - - lower-snake - - tf-var - - dotnet-env - - lower-kebab - type: string - project: - description: Doppler project (required if not using a Service - Token) - type: string - required: - - auth - type: object - fake: - description: Fake configures a store with static key/value pairs - properties: - data: - items: - properties: - key: - type: string - value: - type: string - valueMap: - additionalProperties: - type: string - description: 'Deprecated: ValueMap is deprecated and - is intended to be removed in the future, use the `value` - field instead.' - type: object - version: - type: string - required: - - key - type: object - type: array - required: - - data - type: object - gcpsm: - description: GCPSM configures this store to sync secrets using - Google Cloud Platform Secret Manager provider - properties: - auth: - description: Auth defines the information necessary to authenticate - against GCP - properties: - secretRef: - properties: - secretAccessKeySecretRef: - description: The SecretAccessKey is used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - workloadIdentity: - properties: - clusterLocation: - type: string - clusterName: - type: string - clusterProjectID: - type: string - serviceAccountRef: - description: A reference to a ServiceAccount resource. - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount resource - being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - required: - - clusterLocation - - clusterName - - serviceAccountRef - type: object - type: object - projectID: - description: ProjectID project where secret is located - type: string - type: object - gitlab: - description: GitLab configures this store to sync secrets using - GitLab Variables provider - properties: - auth: - description: Auth configures how secret-manager authenticates - with a GitLab instance. - properties: - SecretRef: - properties: - accessToken: - description: AccessToken is used for authentication. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - required: - - SecretRef - type: object - environment: - description: Environment environment_scope of gitlab CI/CD - variables (Please see https://docs.gitlab.com/ee/ci/environments/#create-a-static-environment - on how to create environments) - type: string - groupIDs: - description: GroupIDs specify, which gitlab groups to pull - secrets from. Group secrets are read from left to right - followed by the project variables. - items: - type: string - type: array - inheritFromGroups: - description: InheritFromGroups specifies whether parent groups - should be discovered and checked for secrets. - type: boolean - projectID: - description: ProjectID specifies a project where secrets are - located. - type: string - url: - description: URL configures the GitLab instance URL. Defaults - to https://gitlab.com/. - type: string - required: - - auth - type: object - ibm: - description: IBM configures this store to sync secrets using IBM - Cloud provider - properties: - auth: - description: Auth configures how secret-manager authenticates - with the IBM secrets manager. - maxProperties: 1 - minProperties: 1 - properties: - containerAuth: - description: IBM Container-based auth with IAM Trusted - Profile. - properties: - iamEndpoint: - type: string - profile: - description: the IBM Trusted Profile - type: string - tokenLocation: - description: Location the token is mounted on the - pod - type: string - required: - - profile - type: object - secretRef: - properties: - secretApiKeySecretRef: - description: The SecretAccessKey is used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - type: object - serviceUrl: - description: ServiceURL is the Endpoint URL that is specific - to the Secrets Manager service instance - type: string - required: - - auth - type: object - keepersecurity: - description: KeeperSecurity configures this store to sync secrets - using the KeeperSecurity provider - properties: - authRef: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being referred - to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - folderID: - type: string - required: - - authRef - - folderID - type: object - kubernetes: - description: Kubernetes configures this store to sync secrets - using a Kubernetes cluster provider - properties: - auth: - description: Auth configures how secret-manager authenticates - with a Kubernetes instance. - maxProperties: 1 - minProperties: 1 - properties: - cert: - description: has both clientCert and clientKey as secretKeySelector - properties: - clientCert: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - clientKey: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - serviceAccount: - description: points to a service account that should be - used for authentication - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount resource - being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - token: - description: use static token to authenticate with - properties: - bearerToken: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - type: object - remoteNamespace: - default: default - description: Remote namespace to fetch the secrets from - type: string - server: - description: configures the Kubernetes server Address. - properties: - caBundle: - description: CABundle is a base64-encoded CA certificate - format: byte - type: string - caProvider: - description: 'see: https://external-secrets.io/v0.4.1/spec/#external-secrets.io/v1alpha1.CAProvider' - properties: - key: - description: The key where the CA certificate can - be found in the Secret or ConfigMap. - type: string - name: - description: The name of the object located at the - provider type. - type: string - namespace: - description: |- - The namespace the Provider type is in. - Can only be defined when used in a ClusterSecretStore. - type: string - type: - description: The type of provider to use such as "Secret", - or "ConfigMap". - enum: - - Secret - - ConfigMap - type: string - required: - - name - - type - type: object - url: - default: kubernetes.default - description: configures the Kubernetes server Address. - type: string - type: object - required: - - auth - type: object - onepassword: - description: OnePassword configures this store to sync secrets - using the 1Password Cloud provider - properties: - auth: - description: Auth defines the information necessary to authenticate - against OnePassword Connect Server - properties: - secretRef: - description: OnePasswordAuthSecretRef holds secret references - for 1Password credentials. - properties: - connectTokenSecretRef: - description: The ConnectToken is used for authentication - to a 1Password Connect Server. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - required: - - connectTokenSecretRef - type: object - required: - - secretRef - type: object - connectHost: - description: ConnectHost defines the OnePassword Connect Server - to connect to - type: string - vaults: - additionalProperties: - type: integer - description: Vaults defines which OnePassword vaults to search - in which order - type: object - required: - - auth - - connectHost - - vaults - type: object - oracle: - description: Oracle configures this store to sync secrets using - Oracle Vault provider - properties: - auth: - description: |- - Auth configures how secret-manager authenticates with the Oracle Vault. - If empty, use the instance principal, otherwise the user credentials specified in Auth. - properties: - secretRef: - description: SecretRef to pass through sensitive information. - properties: - fingerprint: - description: Fingerprint is the fingerprint of the - API private key. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - privatekey: - description: PrivateKey is the user's API Signing - Key in PEM format, used for authentication. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - required: - - fingerprint - - privatekey - type: object - tenancy: - description: Tenancy is the tenancy OCID where user is - located. - type: string - user: - description: User is an access OCID specific to the account. - type: string - required: - - secretRef - - tenancy - - user - type: object - compartment: - description: |- - Compartment is the vault compartment OCID. - Required for PushSecret - type: string - encryptionKey: - description: |- - EncryptionKey is the OCID of the encryption key within the vault. - Required for PushSecret - type: string - principalType: - description: |- - The type of principal to use for authentication. If left blank, the Auth struct will - determine the principal type. This optional field must be specified if using - workload identity. - enum: - - "" - - UserPrincipal - - InstancePrincipal - - Workload - type: string - region: - description: Region is the region where vault is located. - type: string - serviceAccountRef: - description: |- - ServiceAccountRef specified the service account - that should be used when authenticating with WorkloadIdentity. - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - vault: - description: Vault is the vault's OCID of the specific vault - where secret is located. - type: string - required: - - region - - vault - type: object - scaleway: - description: Scaleway - properties: - accessKey: - description: AccessKey is the non-secret part of the api key. - properties: - secretRef: - description: SecretRef references a key in a secret that - will be used as value. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - value: - description: Value can be specified directly to set a - value without using a secret. - type: string - type: object - apiUrl: - description: APIURL is the url of the api to use. Defaults - to https://api.scaleway.com - type: string - projectId: - description: 'ProjectID is the id of your project, which you - can find in the console: https://console.scaleway.com/project/settings' - type: string - region: - description: 'Region where your secrets are located: https://developers.scaleway.com/en/quickstart/#region-and-zone' - type: string - secretKey: - description: SecretKey is the non-secret part of the api key. - properties: - secretRef: - description: SecretRef references a key in a secret that - will be used as value. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - value: - description: Value can be specified directly to set a - value without using a secret. - type: string - type: object - required: - - accessKey - - projectId - - region - - secretKey - type: object - senhasegura: - description: Senhasegura configures this store to sync secrets - using senhasegura provider - properties: - auth: - description: Auth defines parameters to authenticate in senhasegura - properties: - clientId: - type: string - clientSecretSecretRef: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - required: - - clientId - - clientSecretSecretRef - type: object - ignoreSslCertificate: - default: false - description: IgnoreSslCertificate defines if SSL certificate - must be ignored - type: boolean - module: - description: Module defines which senhasegura module should - be used to get secrets - type: string - url: - description: URL of senhasegura - type: string - required: - - auth - - module - - url - type: object - upboundspaces: - description: UpboundProvider configures a store to sync secrets - with Upbound Spaces. - properties: - storeRef: - description: StoreRef holds ref to Upbound Spaces secret store - properties: - name: - description: Name of the secret store on Upbound Spaces - type: string - required: - - name - type: object - required: - - storeRef - type: object - vault: - description: Vault configures this store to sync secrets using - Hashi provider - properties: - auth: - description: Auth configures how secret-manager authenticates - with the Vault server. - properties: - appRole: - description: |- - AppRole authenticates with Vault using the App Role auth mechanism, - with the role and secret stored in a Kubernetes Secret resource. - properties: - path: - default: approle - description: |- - Path where the App Role authentication backend is mounted - in Vault, e.g: "approle" - type: string - roleId: - description: |- - RoleID configured in the App Role authentication backend when setting - up the authentication backend in Vault. - type: string - roleRef: - description: |- - Reference to a key in a Secret that contains the App Role ID used - to authenticate with Vault. - The `key` field must be specified and denotes which entry within the Secret - resource is used as the app role id. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - secretRef: - description: |- - Reference to a key in a Secret that contains the App Role secret used - to authenticate with Vault. - The `key` field must be specified and denotes which entry within the Secret - resource is used as the app role secret. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - required: - - path - - secretRef - type: object - cert: - description: |- - Cert authenticates with TLS Certificates by passing client certificate, private key and ca certificate - Cert authentication method - properties: - clientCert: - description: |- - ClientCert is a certificate to authenticate using the Cert Vault - authentication method - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - secretRef: - description: |- - SecretRef to a key in a Secret resource containing client private key to - authenticate with Vault using the Cert authentication method - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - iam: - description: |- - Iam authenticates with vault by passing a special AWS request signed with AWS IAM credentials - AWS IAM authentication method - properties: - externalID: - description: AWS External ID set on assumed IAM roles - type: string - jwt: - description: Specify a service account with IRSA enabled - properties: - serviceAccountRef: - description: A reference to a ServiceAccount resource. - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount - resource being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - type: object - path: - description: 'Path where the AWS auth method is enabled - in Vault, e.g: "aws"' - type: string - region: - description: AWS region - type: string - role: - description: This is the AWS role to be assumed before - talking to vault - type: string - secretRef: - description: Specify credentials in a Secret object - properties: - accessKeyIDSecretRef: - description: The AccessKeyID is used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource - being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - secretAccessKeySecretRef: - description: The SecretAccessKey is used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource - being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - sessionTokenSecretRef: - description: |- - The SessionToken used for authentication - This must be defined if AccessKeyID and SecretAccessKey are temporary credentials - see: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource - being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - vaultAwsIamServerID: - description: 'X-Vault-AWS-IAM-Server-ID is an additional - header used by Vault IAM auth method to mitigate - against different types of replay attacks. More - details here: https://developer.hashicorp.com/vault/docs/auth/aws' - type: string - vaultRole: - description: Vault Role. In vault, a role describes - an identity with a set of permissions, groups, or - policies you want to attach a user of the secrets - engine - type: string - required: - - vaultRole - type: object - jwt: - description: |- - Jwt authenticates with Vault by passing role and JWT token using the - JWT/OIDC authentication method - properties: - kubernetesServiceAccountToken: - description: |- - Optional ServiceAccountToken specifies the Kubernetes service account for which to request - a token for with the `TokenRequest` API. - properties: - audiences: - description: |- - Optional audiences field that will be used to request a temporary Kubernetes service - account token for the service account referenced by `serviceAccountRef`. - Defaults to a single audience `vault` it not specified. - Deprecated: use serviceAccountRef.Audiences instead - items: - type: string - type: array - expirationSeconds: - description: |- - Optional expiration time in seconds that will be used to request a temporary - Kubernetes service account token for the service account referenced by - `serviceAccountRef`. - Deprecated: this will be removed in the future. - Defaults to 10 minutes. - format: int64 - type: integer - serviceAccountRef: - description: Service account field containing - the name of a kubernetes ServiceAccount. - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount - resource being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - required: - - serviceAccountRef - type: object - path: - default: jwt - description: |- - Path where the JWT authentication backend is mounted - in Vault, e.g: "jwt" - type: string - role: - description: |- - Role is a JWT role to authenticate using the JWT/OIDC Vault - authentication method - type: string - secretRef: - description: |- - Optional SecretRef that refers to a key in a Secret resource containing JWT token to - authenticate with Vault using the JWT/OIDC authentication method. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - required: - - path - type: object - kubernetes: - description: |- - Kubernetes authenticates with Vault by passing the ServiceAccount - token stored in the named Secret resource to the Vault server. - properties: - mountPath: - default: kubernetes - description: |- - Path where the Kubernetes authentication backend is mounted in Vault, e.g: - "kubernetes" - type: string - role: - description: |- - A required field containing the Vault Role to assume. A Role binds a - Kubernetes ServiceAccount with a set of Vault policies. - type: string - secretRef: - description: |- - Optional secret field containing a Kubernetes ServiceAccount JWT used - for authenticating with Vault. If a name is specified without a key, - `token` is the default. If one is not specified, the one bound to - the controller will be used. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - serviceAccountRef: - description: |- - Optional service account field containing the name of a kubernetes ServiceAccount. - If the service account is specified, the service account secret token JWT will be used - for authenticating with Vault. If the service account selector is not supplied, - the secretRef will be used instead. - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount resource - being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - required: - - mountPath - - role - type: object - ldap: - description: |- - Ldap authenticates with Vault by passing username/password pair using - the LDAP authentication method - properties: - path: - default: ldap - description: |- - Path where the LDAP authentication backend is mounted - in Vault, e.g: "ldap" - type: string - secretRef: - description: |- - SecretRef to a key in a Secret resource containing password for the LDAP - user used to authenticate with Vault using the LDAP authentication - method - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - username: - description: |- - Username is a LDAP user name used to authenticate using the LDAP Vault - authentication method - type: string - required: - - path - - username - type: object - tokenSecretRef: - description: TokenSecretRef authenticates with Vault by - presenting a token. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - userPass: - description: UserPass authenticates with Vault by passing - username/password pair - properties: - path: - default: user - description: |- - Path where the UserPassword authentication backend is mounted - in Vault, e.g: "user" - type: string - secretRef: - description: |- - SecretRef to a key in a Secret resource containing password for the - user used to authenticate with Vault using the UserPass authentication - method - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - username: - description: |- - Username is a user name used to authenticate using the UserPass Vault - authentication method - type: string - required: - - path - - username - type: object - type: object - caBundle: - description: |- - PEM encoded CA bundle used to validate Vault server certificate. Only used - if the Server URL is using HTTPS protocol. This parameter is ignored for - plain HTTP protocol connection. If not set the system root certificates - are used to validate the TLS connection. - format: byte - type: string - caProvider: - description: The provider for the CA bundle to use to validate - Vault server certificate. - properties: - key: - description: The key where the CA certificate can be found - in the Secret or ConfigMap. - type: string - name: - description: The name of the object located at the provider - type. - type: string - namespace: - description: |- - The namespace the Provider type is in. - Can only be defined when used in a ClusterSecretStore. - type: string - type: - description: The type of provider to use such as "Secret", - or "ConfigMap". - enum: - - Secret - - ConfigMap - type: string - required: - - name - - type - type: object - forwardInconsistent: - description: |- - ForwardInconsistent tells Vault to forward read-after-write requests to the Vault - leader instead of simply retrying within a loop. This can increase performance if - the option is enabled serverside. - https://www.vaultproject.io/docs/configuration/replication#allow_forwarding_via_header - type: boolean - namespace: - description: |- - Name of the vault namespace. Namespaces is a set of features within Vault Enterprise that allows - Vault environments to support Secure Multi-tenancy. e.g: "ns1". - More about namespaces can be found here https://www.vaultproject.io/docs/enterprise/namespaces - type: string - path: - description: |- - Path is the mount path of the Vault KV backend endpoint, e.g: - "secret". The v2 KV secret engine version specific "/data" path suffix - for fetching secrets from Vault is optional and will be appended - if not present in specified path. - type: string - readYourWrites: - description: |- - ReadYourWrites ensures isolated read-after-write semantics by - providing discovered cluster replication states in each request. - More information about eventual consistency in Vault can be found here - https://www.vaultproject.io/docs/enterprise/consistency - type: boolean - server: - description: 'Server is the connection address for the Vault - server, e.g: "https://vault.example.com:8200".' - type: string - version: - default: v2 - description: |- - Version is the Vault KV secret engine version. This can be either "v1" or - "v2". Version defaults to "v2". - enum: - - v1 - - v2 - type: string - required: - - auth - - server - type: object - webhook: - description: Webhook configures this store to sync secrets using - a generic templated webhook - properties: - body: - description: Body - type: string - caBundle: - description: |- - PEM encoded CA bundle used to validate webhook server certificate. Only used - if the Server URL is using HTTPS protocol. This parameter is ignored for - plain HTTP protocol connection. If not set the system root certificates - are used to validate the TLS connection. - format: byte - type: string - caProvider: - description: The provider for the CA bundle to use to validate - webhook server certificate. - properties: - key: - description: The key the value inside of the provider - type to use, only used with "Secret" type - type: string - name: - description: The name of the object located at the provider - type. - type: string - namespace: - description: The namespace the Provider type is in. - type: string - type: - description: The type of provider to use such as "Secret", - or "ConfigMap". - enum: - - Secret - - ConfigMap - type: string - required: - - name - - type - type: object - headers: - additionalProperties: - type: string - description: Headers - type: object - method: - description: Webhook Method - type: string - result: - description: Result formatting - properties: - jsonPath: - description: Json path of return value - type: string - type: object - secrets: - description: |- - Secrets to fill in templates - These secrets will be passed to the templating function as key value pairs under the given name - items: - properties: - name: - description: Name of this secret in templates - type: string - secretRef: - description: Secret ref to fill in credentials - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - required: - - name - - secretRef - type: object - type: array - timeout: - description: Timeout - type: string - url: - description: Webhook url to call - type: string - required: - - result - - url - type: object - yandexcertificatemanager: - description: YandexCertificateManager configures this store to - sync secrets using Yandex Certificate Manager provider - properties: - apiEndpoint: - description: Yandex.Cloud API endpoint (e.g. 'api.cloud.yandex.net:443') - type: string - auth: - description: Auth defines the information necessary to authenticate - against Yandex Certificate Manager - properties: - authorizedKeySecretRef: - description: The authorized key used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - caProvider: - description: The provider for the CA bundle to use to validate - Yandex.Cloud server certificate. - properties: - certSecretRef: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - required: - - auth - type: object - yandexlockbox: - description: YandexLockbox configures this store to sync secrets - using Yandex Lockbox provider - properties: - apiEndpoint: - description: Yandex.Cloud API endpoint (e.g. 'api.cloud.yandex.net:443') - type: string - auth: - description: Auth defines the information necessary to authenticate - against Yandex Lockbox - properties: - authorizedKeySecretRef: - description: The authorized key used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - caProvider: - description: The provider for the CA bundle to use to validate - Yandex.Cloud server certificate. - properties: - certSecretRef: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - required: - - auth - type: object - type: object - refreshInterval: - description: Used to configure store refresh interval in seconds. - type: integer - retrySettings: - description: Used to configure http retries if failed. - properties: - maxRetries: - format: int32 - type: integer - retryInterval: - type: string - type: object - secretStoreMetadata: - description: The metadata of the secret store to be created. - properties: - annotations: - additionalProperties: - type: string - description: Annotations that are set on projected resource. - type: object - labels: - additionalProperties: - type: string - description: Labels that are set on projected resource. - type: object - type: object - secretStoreName: - description: |- - SecretStoreName is the name to use when creating secret stores within a control plane. - optional, if not set, SharedSecretStore name will be used. - When set, it is immutable. - maxLength: 253 - minLength: 1 - type: string - x-kubernetes-validations: - - message: value is immutable - rule: self == oldSelf - required: - - controlPlaneSelector - - namespaceSelector - - provider - type: object - x-kubernetes-validations: - - message: secretStoreName is immutable - rule: has(self.secretStoreName) == has(oldSelf.secretStoreName) - status: - description: SharedSecretStoreStatus defines the observed state of the - SecretStore. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - failed: - description: List of provisioning failures. - items: - description: SecretStoreProvisioningFailure defines secret store - provisioning failure. - properties: - conditions: - description: List of occurred conditions. - items: - properties: - lastTransitionTime: - format: date-time - type: string - message: - type: string - reason: - type: string - status: - type: string - type: - type: string - required: - - status - - type - type: object - type: array - controlPlane: - description: ControlPlane name where the failure occurred. - type: string - required: - - controlPlane - type: object - type: array - x-kubernetes-list-map-keys: - - controlPlane - x-kubernetes-list-type: map - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - provisioned: - description: List of successfully provisioned targets. - items: - description: SecretStoreProvisioningSuccess defines secret store - provision success. - properties: - controlPlane: - description: ControlPlane name where the secret store got projected - type: string - required: - - controlPlane - type: object - type: array - x-kubernetes-list-map-keys: - - controlPlane - x-kubernetes-list-type: map - type: object - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.10/spaces.upbound.io_simulations.yaml b/static/crds/space/v1.10/spaces.upbound.io_simulations.yaml deleted file mode 100644 index 856d1a82b..000000000 --- a/static/crds/space/v1.10/spaces.upbound.io_simulations.yaml +++ /dev/null @@ -1,243 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: simulations.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - spaces - kind: Simulation - listKind: SimulationList - plural: simulations - singular: simulation - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .spec.controlPlaneName - name: SOURCE - type: string - - jsonPath: .status.simulatedControlPlaneName - name: SIMULATED - type: string - - jsonPath: .status.conditions[?(@.type=='AcceptingChanges')].status - name: ACCEPTING-CHANGES - type: string - - jsonPath: .status.conditions[?(@.type=='AcceptingChanges')].reason - name: STATE - type: string - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: |- - A Simulation creates a simulation of a source ControlPlane. You can apply a - change set to the simulated control plane. When the Simulation is complete it - will detect the changes and report the difference compared to the source - control plane. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: SimulationSpec specifies how to run the simulation. - properties: - completionCriteria: - description: |- - CompletionCriteria specify how Spaces should determine when the - simulation is complete. If any of the criteria are met, Spaces will set - the Simulation's desired state to complete. Omit the criteria if you want - to manually mark the Simulation complete. - items: - description: A CompletionCriterion specifies when a simulation is - complete. - properties: - duration: - description: Duration after which the simulation is complete. - type: string - type: - description: Type of criterion. - enum: - - Duration - type: string - required: - - duration - - type - type: object - type: array - controlPlaneName: - description: |- - ControlPlaneName is the name of the ControlPlane to simulate a change to. - This control plane is known as the Simulation's 'source' control plane. - minLength: 1 - type: string - x-kubernetes-validations: - - message: The source controlplane can't be changed - rule: self == oldSelf - desiredState: - default: AcceptingChanges - description: DesiredState of the simulation. - enum: - - AcceptingChanges - - Complete - - Terminated - type: string - x-kubernetes-validations: - - message: A complete Simulation can only be terminated - rule: oldSelf != 'Complete' || self == 'Complete' || self == 'Terminated' - - message: A Simulation can't be un-terminated - rule: oldSelf != 'Terminated' || self == oldSelf - required: - - controlPlaneName - - desiredState - type: object - status: - description: SimulationStatus represents the observed state of a Simulation. - properties: - changes: - description: |- - Changes detected by the simulation. Only changes that happen while the - simulation is in the AcceptingChanges state are included. - items: - description: |- - A SimulationChange represents an object that changed while the simulation was - in the AcceptingChanges state. - properties: - change: - description: Change type. - enum: - - Unknown - - Create - - Update - - Delete - type: string - objectRef: - description: ObjectReference to the changed object. - properties: - apiVersion: - description: APIVersion of the changed resource. - type: string - kind: - description: Kind of the changed resource. - type: string - name: - description: Name of the changed resource. - type: string - namespace: - description: Namespace of the changed resource. - type: string - required: - - apiVersion - - kind - - name - type: object - required: - - change - - objectRef - type: object - type: array - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - controlPlaneData: - description: |- - ControlPlaneData exported from the source control plane and imported to - the simulated control plane. - properties: - exportTimestamp: - description: |- - ExportTimestamp is the time at which the source control plane's resources - were exported. Resources are exported to temporary storage before they're - imported to the simulated control plane. - format: date-time - type: string - importTimestamp: - description: |- - ImportTiemstamp is the time at which the source control plane's resources - were imported to the simulated control plane. - format: date-time - type: string - type: object - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - simulatedControlPlaneName: - description: |- - SimulatedControlPlaneName is the name of the control plane used to run - the simulation. - minLength: 1 - type: string - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.11/admin.spaces.upbound.io_spacebackupconfigs.yaml b/static/crds/space/v1.11/admin.spaces.upbound.io_spacebackupconfigs.yaml deleted file mode 100644 index 9feedd34f..000000000 --- a/static/crds/space/v1.11/admin.spaces.upbound.io_spacebackupconfigs.yaml +++ /dev/null @@ -1,177 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: spacebackupconfigs.admin.spaces.upbound.io -spec: - group: admin.spaces.upbound.io - names: - categories: - - spaces - kind: SpaceBackupConfig - listKind: SpaceBackupConfigList - plural: spacebackupconfigs - singular: spacebackupconfig - scope: Cluster - versions: - - additionalPrinterColumns: - - jsonPath: .spec.objectStorage.provider - name: Provider - type: string - - jsonPath: .spec.objectStorage.bucket - name: Bucket - type: string - - jsonPath: .spec.objectStorage.credentials.source - name: Auth - type: string - - jsonPath: .metadata.annotations.spacebackupconfig\.admin\.internal\.spaces\.upbound\.io/secret - name: Secret - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: SpaceBackupConfig defines the configuration to backup a Space. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: |- - A SpaceBackupConfigSpec represents the configuration to backup or restore - a Space. - properties: - objectStorage: - description: ObjectStorage specifies the object storage configuration - for the given provider. - properties: - bucket: - description: Bucket is the name of the bucket to store backups - in. - minLength: 1 - type: string - config: - description: |- - Config is a free-form map of configuration options for the object storage provider. - See https://github.com/thanos-io/objstore?tab=readme-ov-file for more - information on the formats for each supported cloud provider. Bucket and - Provider will override the required values in the config. - type: object - x-kubernetes-preserve-unknown-fields: true - credentials: - description: Credentials specifies the credentials to access the - object storage. - properties: - env: - description: |- - Env is a reference to an environment variable that contains credentials - that must be used to connect to the provider. - properties: - name: - description: Name is the name of an environment variable. - type: string - required: - - name - type: object - fs: - description: |- - Fs is a reference to a filesystem location that contains credentials that - must be used to connect to the provider. - properties: - path: - description: Path is a filesystem path. - type: string - required: - - path - type: object - secretRef: - description: |- - A SecretRef is a reference to a secret key that contains the credentials - that must be used to connect to the provider. - properties: - key: - default: credentials - description: The key to select. - type: string - name: - description: Name of the secret. - type: string - namespace: - description: Namespace of the secret. - type: string - required: - - key - - name - - namespace - type: object - source: - allOf: - - enum: - - Secret - - InjectedIdentity - - enum: - - Secret - - InjectedIdentity - description: |- - Source of the credentials. - Source "Secret" requires "get" permissions on the referenced Secret. - type: string - required: - - source - type: object - x-kubernetes-validations: - - message: secretRef.name and namespace must be set when source - is Secret - rule: self.source != 'Secret' || (has(self.secretRef) && has(self.secretRef.name) - && has(self.secretRef.__namespace__)) - prefix: - description: |- - Prefix is the prefix to use for all backups using this - SharedBackupConfig, e.g. "prod/cluster1", resulting in backups for - controlplane "ctp1" in namespace "ns1" being stored in - "prod/cluster1/ns1/ctp1". - type: string - provider: - description: Provider is the name of the object storage provider. - enum: - - AWS - - Azure - - GCP - type: string - required: - - bucket - - credentials - - provider - type: object - x-kubernetes-validations: - - message: credentials.secretRef.name must be set when source is Secret - rule: self.credentials.source != 'Secret' || (has(self.credentials.secretRef) - && has(self.credentials.secretRef.name)) - required: - - objectStorage - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.11/admin.spaces.upbound.io_spacebackups.yaml b/static/crds/space/v1.11/admin.spaces.upbound.io_spacebackups.yaml deleted file mode 100644 index 2e0824038..000000000 --- a/static/crds/space/v1.11/admin.spaces.upbound.io_spacebackups.yaml +++ /dev/null @@ -1,787 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: spacebackups.admin.spaces.upbound.io -spec: - group: admin.spaces.upbound.io - names: - categories: - - spaces - kind: SpaceBackup - listKind: SpaceBackupList - plural: spacebackups - singular: spacebackup - scope: Cluster - versions: - - additionalPrinterColumns: - - jsonPath: .status.phase - name: Phase - type: string - - jsonPath: .status.retries - name: Retries - type: integer - - jsonPath: .spec.ttl - name: TTL - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: SpaceBackup represents a backup of a Space. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: SpaceBackupSpec defines a backup over a set of Match - properties: - configRef: - description: |- - ConfigRef is a reference to the space backup configuration. - ApiGroup is optional and defaults to "spaces.upbound.io". - Kind is required, and the only supported value is "SpaceBackupConfig" at - the moment. - Name is required. - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. - type: string - kind: - description: Kind is the type of resource being referenced. - minLength: 1 - type: string - name: - description: Name is the name of resource being referenced. - minLength: 1 - type: string - type: object - x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: backup config ref must be a reference to a SpaceBackupConfig - rule: (!has(self.apiGroup) || self.apiGroup == 'admin.spaces.upbound.io') - && self.kind == 'SpaceBackupConfig' - - message: backup config ref must have a name - rule: size(self.name) > 0 - controlPlaneBackups: - description: ControlPlaneBackups is the definition of the control - plane backups, - properties: - excludedResources: - description: |- - ExcludedResources is a slice of resource names that are not - included in the backup. Used to filter the included extra resources. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - deletionPolicy: - default: Orphan - description: DeletionPolicy is the policy for the backup. - enum: - - Orphan - - Delete - type: string - exclude: - description: |- - Exclude is the selector for resources that should be excluded from the backup. - If both Match and Exclude are specified, the Exclude selector will be applied - after the Match selector. - By default, only SpaceBackups are excluded. - properties: - controlPlanes: - description: |- - ControlPlanes specifies the control planes selected. - A control plane is matched if any of the control plane selectors matches, if not specified - any control plane in the selected groups is matched. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - extras: - description: Extras specifies the extra resources selected. - items: - description: GenericSpaceBackupResourceSelector represents a - generic resource selector. - properties: - apiGroup: - description: APIGroup is the group of the resource. - type: string - kind: - description: Kind is the kind of the resource. - type: string - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - required: - - apiGroup - - kind - type: object - type: array - groups: - description: |- - Groups specifies the groups selected. - A group is matched if any of the group selectors matches, if not specified - any group is matched. Group selector is ANDed with all other selectors, so no resource in - a group not matching the group selector will be included in the backup. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - secrets: - description: Spaces specifies the spaces selected. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - type: object - match: - description: |- - Match is the selector for resources that should be included in the backup. - By default, we'll back up all Groups and for each Group: - - All ControlPlanes. - - All Secrets. - - All other Space API resources, e.g. SharedBackupConfigs, SharedUpboundPolicies, Backups, etc... - properties: - controlPlanes: - description: |- - ControlPlanes specifies the control planes selected. - A control plane is matched if any of the control plane selectors matches, if not specified - any control plane in the selected groups is matched. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - extras: - description: Extras specifies the extra resources selected. - items: - description: GenericSpaceBackupResourceSelector represents a - generic resource selector. - properties: - apiGroup: - description: APIGroup is the group of the resource. - type: string - kind: - description: Kind is the kind of the resource. - type: string - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - required: - - apiGroup - - kind - type: object - type: array - groups: - description: |- - Groups specifies the groups selected. - A group is matched if any of the group selectors matches, if not specified - any group is matched. Group selector is ANDed with all other selectors, so no resource in - a group not matching the group selector will be included in the backup. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - secrets: - description: Spaces specifies the spaces selected. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - type: object - ttl: - description: |- - TTL is the time to live for the backup. After this time, the backup - will be eligible for garbage collection. If not set, the backup will - not be garbage collected. - type: string - required: - - configRef - type: object - x-kubernetes-validations: - - message: spec.configRef can't be changed or set after creation - rule: '!has(self.configRef) && !has(oldSelf.configRef) || (self.configRef - == oldSelf.configRef) ' - - message: spec.match can't be changed or set after creation - rule: '!has(self.match) && !has(oldSelf.match) || (self.match == oldSelf.match) ' - - message: spec.exclude can't be changed or set after creation - rule: '!has(self.exclude) && !has(oldSelf.exclude) || (self.exclude - == oldSelf.exclude) ' - - message: spec.controlPlaneBackups can't be changed or set after creation - rule: '!has(self.controlPlaneBackups) && !has(oldSelf.controlPlaneBackups) - || (self.controlPlaneBackups == oldSelf.controlPlaneBackups) ' - status: - description: SpaceBackupStatus represents the observed state of a SpaceBackup. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - phase: - default: Pending - description: Phase is the current phase of the backup. - enum: - - Pending - - InProgress - - Failed - - Completed - - Deleted - type: string - retries: - description: Retries is the number of times the backup has been retried. - format: int32 - type: integer - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.11/admin.spaces.upbound.io_spacebackupschedules.yaml b/static/crds/space/v1.11/admin.spaces.upbound.io_spacebackupschedules.yaml deleted file mode 100644 index bf66b7856..000000000 --- a/static/crds/space/v1.11/admin.spaces.upbound.io_spacebackupschedules.yaml +++ /dev/null @@ -1,789 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: spacebackupschedules.admin.spaces.upbound.io -spec: - group: admin.spaces.upbound.io - names: - categories: - - spaces - kind: SpaceBackupSchedule - listKind: SpaceBackupScheduleList - plural: spacebackupschedules - singular: spacebackupschedule - scope: Cluster - versions: - - additionalPrinterColumns: - - jsonPath: .spec.schedule - name: Schedule - type: string - - jsonPath: .status.lastBackup - name: LastBackup - type: date - - jsonPath: .spec.ttl - name: TTL - type: string - - jsonPath: .spec.suspend - name: Suspended - type: boolean - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: SpaceBackupSchedule represents a schedule to backup a Space. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: SpaceBackupScheduleSpec defines a space backup schedule. - properties: - configRef: - description: |- - ConfigRef is a reference to the space backup configuration. - ApiGroup is optional and defaults to "spaces.upbound.io". - Kind is required, and the only supported value is "SpaceBackupConfig" at - the moment. - Name is required. - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. - type: string - kind: - description: Kind is the type of resource being referenced. - minLength: 1 - type: string - name: - description: Name is the name of resource being referenced. - minLength: 1 - type: string - type: object - x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: backup config ref must be a reference to a SpaceBackupConfig - rule: (!has(self.apiGroup) || self.apiGroup == 'admin.spaces.upbound.io') - && self.kind == 'SpaceBackupConfig' - - message: backup config ref must have a name - rule: size(self.name) > 0 - controlPlaneBackups: - description: ControlPlaneBackups is the definition of the control - plane backups, - properties: - excludedResources: - description: |- - ExcludedResources is a slice of resource names that are not - included in the backup. Used to filter the included extra resources. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - deletionPolicy: - default: Orphan - description: DeletionPolicy is the policy for the backup. - enum: - - Orphan - - Delete - type: string - exclude: - description: |- - Exclude is the selector for resources that should be excluded from the backup. - If both Match and Exclude are specified, the Exclude selector will be applied - after the Match selector. - By default, only SpaceBackups are excluded. - properties: - controlPlanes: - description: |- - ControlPlanes specifies the control planes selected. - A control plane is matched if any of the control plane selectors matches, if not specified - any control plane in the selected groups is matched. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - extras: - description: Extras specifies the extra resources selected. - items: - description: GenericSpaceBackupResourceSelector represents a - generic resource selector. - properties: - apiGroup: - description: APIGroup is the group of the resource. - type: string - kind: - description: Kind is the kind of the resource. - type: string - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - required: - - apiGroup - - kind - type: object - type: array - groups: - description: |- - Groups specifies the groups selected. - A group is matched if any of the group selectors matches, if not specified - any group is matched. Group selector is ANDed with all other selectors, so no resource in - a group not matching the group selector will be included in the backup. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - secrets: - description: Spaces specifies the spaces selected. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - type: object - match: - description: |- - Match is the selector for resources that should be included in the backup. - By default, we'll back up all Groups and for each Group: - - All ControlPlanes. - - All Secrets. - - All other Space API resources, e.g. SharedBackupConfigs, SharedUpboundPolicies, Backups, etc... - properties: - controlPlanes: - description: |- - ControlPlanes specifies the control planes selected. - A control plane is matched if any of the control plane selectors matches, if not specified - any control plane in the selected groups is matched. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - extras: - description: Extras specifies the extra resources selected. - items: - description: GenericSpaceBackupResourceSelector represents a - generic resource selector. - properties: - apiGroup: - description: APIGroup is the group of the resource. - type: string - kind: - description: Kind is the kind of the resource. - type: string - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - required: - - apiGroup - - kind - type: object - type: array - groups: - description: |- - Groups specifies the groups selected. - A group is matched if any of the group selectors matches, if not specified - any group is matched. Group selector is ANDed with all other selectors, so no resource in - a group not matching the group selector will be included in the backup. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - secrets: - description: Spaces specifies the spaces selected. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - type: object - schedule: - description: Schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - minLength: 1 - type: string - suspend: - description: |- - Suspend specifies whether the schedule is suspended. If true, no - SpaceBackups will be created, but running backups will be allowed to - complete. - type: boolean - ttl: - description: |- - TTL is the time to live for the backup. After this time, the backup - will be eligible for garbage collection. If not set, the backup will - not be garbage collected. - type: string - useOwnerReferencesInBackup: - description: |- - UseOwnerReferencesBackup specifies whether an ownership chain should be - established between this resource and the Backup it creates. - If set to true, the Backup will be garbage collected when this resource - is deleted. - type: boolean - required: - - configRef - - schedule - type: object - status: - description: SpaceBackupScheduleStatus represents the observed state of - a SpaceBackupSchedule. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - lastBackup: - description: |- - LastBackup is the last time a Backup was run for this - Schedule schedule - format: date-time - type: string - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.11/authorization.spaces.upbound.io_objectrolebindings.yaml b/static/crds/space/v1.11/authorization.spaces.upbound.io_objectrolebindings.yaml deleted file mode 100644 index dcca4418f..000000000 --- a/static/crds/space/v1.11/authorization.spaces.upbound.io_objectrolebindings.yaml +++ /dev/null @@ -1,153 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: objectrolebindings.authorization.spaces.upbound.io -spec: - group: authorization.spaces.upbound.io - names: - categories: - - iam - kind: ObjectRoleBinding - listKind: ObjectRoleBindingList - plural: objectrolebindings - singular: objectrolebinding - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: |- - A ObjectRoleBinding binds a namespaced API object to a set of subjects, at varying access levels. - For now, there can be at most one ObjectRoleBinding pointing to each API object. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: ObjectRoleBindingSpec is ObjectRoleBinding's spec. - properties: - object: - description: |- - Object references the object to which the listed subjects should have access at varying levels. - The object value is immutable after creation. - properties: - apiGroup: - description: |- - APIGroup defines the apiGroup of the object being pointed to. - With some minor differences, this is essentially matched as a DNS subdomain, like how Kubernetes validates it. - The Kubernetes legacy core group is denoted as "core". - maxLength: 64 - pattern: ^[a-z][a-z0-9-]{0,61}[a-z0-9](\.[a-z][a-z0-9-]{0,61}[a-z0-9])*$ - type: string - x-kubernetes-validations: - - message: apiGroup is immutable - rule: self == oldSelf - - message: apiGroup must be 'core' for now. This will change in - the future. - rule: self == 'core' - name: - description: |- - Name points to the .metadata.name of the object targeted. - Kubernetes validates this as a DNS 1123 subdomain. - maxLength: 253 - pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ - type: string - x-kubernetes-validations: - - message: name is immutable - rule: self == oldSelf - resource: - description: |- - Resource defines the resource type (often kind in plural, e.g. - controlplanes) being pointed to. - With some minor differences, this is essentially matched as a DNS label, like how Kubernetes validates it. - maxLength: 63 - pattern: ^[a-z][a-z0-9-]{1,61}[a-z0-9]$ - type: string - x-kubernetes-validations: - - message: resource is immutable - rule: self == oldSelf - - message: resource must be 'namespaces' for now. This will change - in the future. - rule: self == 'namespaces' - required: - - apiGroup - - name - - resource - type: object - subjects: - description: Subjects should be a map type with both kind+name as - a key - items: - description: |- - SubjectBinding contains a reference to the object or user identities a role - binding applies to. - properties: - kind: - description: |- - Kind of subject being referenced. Values defined by this API group are - for now only "UpboundTeam". - enum: - - UpboundTeam - type: string - x-kubernetes-validations: - - message: kind must be 'UpboundTeam' for now. This will change - in the future. - rule: self == 'UpboundTeam' - name: - description: |- - Name (identifier) of the subject (of the specified kind) being referenced. - The identifier must be 2-100 chars, [a-zA-Z0-9-], no repeating dashes, can't start/end with a dash. - Notably, a UUID fits that format. - maxLength: 100 - pattern: ^([a-zA-Z0-9]+-?)+[a-zA-Z0-9]$ - type: string - role: - description: |- - Role this subject has on the associated Object. - The list of valid roles is defined for each target API resource separately. - For namespaces, valid values are "viewer", "editor", and "admin". - The format of this is essentially a RFC 1035 label with underscores instead of dashes, minimum three characters long. - maxLength: 63 - pattern: ^[a-z][a-z0-9_]{1,62}[a-z0-9]$ - type: string - required: - - kind - - name - - role - type: object - type: array - x-kubernetes-list-map-keys: - - kind - - name - x-kubernetes-list-type: map - required: - - object - - subjects - type: object - status: - description: ObjectRoleBindingStatus is RoleBindings' status. - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.11/observability.spaces.upbound.io_sharedtelemetryconfigs.yaml b/static/crds/space/v1.11/observability.spaces.upbound.io_sharedtelemetryconfigs.yaml deleted file mode 100644 index e119b699c..000000000 --- a/static/crds/space/v1.11/observability.spaces.upbound.io_sharedtelemetryconfigs.yaml +++ /dev/null @@ -1,401 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: sharedtelemetryconfigs.observability.spaces.upbound.io -spec: - group: observability.spaces.upbound.io - names: - categories: - - observability - kind: SharedTelemetryConfig - listKind: SharedTelemetryConfigList - plural: sharedtelemetryconfigs - shortNames: - - stc - singular: sharedtelemetryconfig - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.annotations.sharedtelemetryconfig\.internal\.spaces\.upbound\.io/selected - name: Selected - type: string - - jsonPath: .metadata.annotations.sharedtelemetryconfig\.internal\.spaces\.upbound\.io/failed - name: Failed - type: string - - jsonPath: .metadata.annotations.sharedtelemetryconfig\.internal\.spaces\.upbound\.io/provisioned - name: Provisioned - type: string - - jsonPath: .status.conditions[?(@.type=='Validated')].status - name: Validated - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: SharedTelemetryConfig defines a telemetry configuration over - a set of ControlPlanes. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: SharedTelemetryConfigSpec defines a telemetry configuration - over a set of ControlPlanes. - properties: - configPatchSecretRefs: - description: |- - ConfigPatchSecretRefs allows defining patches sourced from secrets to be - applied to the telemetry configuration. - items: - description: |- - ConfigPatchSecretRef defines a config patch sourced from a secret to be - applied to the telemetry configuration. - properties: - key: - description: Key in the secret from which to source the patch. - type: string - name: - description: Name of the secret. - type: string - path: - description: |- - Path to the field in the telemetry configuration to patch. - Currently, we only support patching exporters, so the path - needs to start with "exporters". - type: string - x-kubernetes-validations: - - message: Only 'exporters' patching is supported, path must - start with 'exporters.' - rule: self.startsWith('exporters.') - required: - - key - - name - - path - type: object - type: array - controlPlaneSelector: - description: |- - ControlPlaneSelector defines the selector for ControlPlanes on which to - configure telemetry. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: either names or a labelSelector must be specified - rule: (has(self.labelSelectors) || has(self.names)) && (size(self.labelSelectors) - > 0 || size(self.names) > 0) - exportPipeline: - description: |- - ExportPipeline defines the telemetry exporter pipeline to configure on - the selected ControlPlanes. - properties: - logs: - description: |- - Logs defines the logs exporter pipeline to configure on the - selected ControlPlanes. The value has to be present in the - spec.exporters field. - items: - type: string - maxItems: 10 - type: array - metrics: - description: |- - Metrics defines the metrics exporter pipeline to configure on the - selected ControlPlanes. The value has to be present in the - spec.exporters field. - items: - type: string - maxItems: 10 - type: array - traces: - description: |- - Traces defines the traces exporter pipeline to configure on the - selected ControlPlanes. The value has to be present in the - spec.exporters field. - items: - type: string - maxItems: 10 - type: array - type: object - exporters: - description: |- - Exporters defines the exporters to configure on the selected ControlPlanes. - Untyped as we use the underlying OpenTelemetryOperator to configure the - OpenTelemetry collector's exporters. Use the OpenTelemetry Collector - documentation to configure the exporters. - Currently only supported exporters are push based exporters. - type: object - x-kubernetes-preserve-unknown-fields: true - processorPipeline: - description: |- - ProcessorPipeline defines the telemetry processor pipeline to configure on - the selected ControlPlanes. - properties: - logs: - description: |- - Logs defines the logs exporter pipeline to configure on the - selected ControlPlanes. The value has to be present in the - spec.exporters field. - items: - type: string - maxItems: 10 - type: array - metrics: - description: |- - Metrics defines the metrics exporter pipeline to configure on the - selected ControlPlanes. The value has to be present in the - spec.exporters field. - items: - type: string - maxItems: 10 - type: array - traces: - description: |- - Traces defines the traces exporter pipeline to configure on the - selected ControlPlanes. The value has to be present in the - spec.exporters field. - items: - type: string - maxItems: 10 - type: array - type: object - processors: - description: |- - Processors defines the processors to configure on the selected ControlPlanes. - Untyped as we use the underlying OpenTelemetryOperator to configure the - OpenTelemetry collector's processors. Use the OpenTelemetry Collector - documentation to configure the processors. - type: object - x-kubernetes-preserve-unknown-fields: true - required: - - controlPlaneSelector - - exportPipeline - - exporters - type: object - status: - description: SharedTelemetryConfigStatus represents the observed state - of a SharedTelemetryConfig. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - failed: - description: list of provisioning failures. - items: - description: SharedTelemetryConfigProvisioningFailure defines configuration - provisioning failure. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition - from one status to another. - type: string - status: - description: Status of this condition; is it currently - True, False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - controlPlane: - description: ControlPlane name where the failure occurred. - type: string - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - required: - - controlPlane - type: object - type: array - x-kubernetes-list-map-keys: - - controlPlane - x-kubernetes-list-type: map - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - provisioned: - description: List of successfully provisioned targets. - items: - type: string - type: array - x-kubernetes-list-type: set - selectedControlPlanes: - description: SelectedControlPlanes represents the names of the selected - ControlPlanes. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.11/policy.spaces.upbound.io_sharedupboundpolicies.yaml b/static/crds/space/v1.11/policy.spaces.upbound.io_sharedupboundpolicies.yaml deleted file mode 100644 index 30f732e75..000000000 --- a/static/crds/space/v1.11/policy.spaces.upbound.io_sharedupboundpolicies.yaml +++ /dev/null @@ -1,4303 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: sharedupboundpolicies.policy.spaces.upbound.io -spec: - group: policy.spaces.upbound.io - names: - categories: - - policies - kind: SharedUpboundPolicy - listKind: SharedUpboundPolicyList - plural: sharedupboundpolicies - shortNames: - - sup - singular: sharedupboundpolicy - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.annotations.sharedupboundpolicies\.internal\.spaces\.upbound\.io/provisioned-total - name: Provisioned - type: string - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: |- - SharedUpboundPolicy specifies a shared Kyverno policy projected into the specified - ControlPlanes of the same namespace as SharedUpboundPolicy. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: SharedUpboundPolicySpec defines the desired state of SharedUpboundPolicy. - properties: - admission: - default: true - description: |- - Admission controls if rules are applied during admission. - Optional. Default value is "true". - type: boolean - applyRules: - description: |- - ApplyRules controls how rules in a policy are applied. Rule are processed in - the order of declaration. When set to `One` processing stops after a rule has - been applied i.e. the rule matches and results in a pass, fail, or error. When - set to `All` all rules in the policy are processed. The default is `All`. - enum: - - All - - One - type: string - background: - default: true - description: |- - Background controls if rules are applied to existing resources during a background scan. - Optional. Default value is "true". The value must be set to "false" if the policy rule - uses variables that are only available in the admission review request (e.g. user name). - type: boolean - controlPlaneSelector: - description: |- - The policy is projected only to control planes - matching the provided selector. Either names or a labelSelector must be specified. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: either names or a labelSelector must be specified - rule: (has(self.labelSelectors) || has(self.names)) && (size(self.labelSelectors) - > 0 || size(self.names) > 0) - failurePolicy: - description: |- - FailurePolicy defines how unexpected policy errors and webhook response timeout errors are handled. - Rules within the same policy share the same failure behavior. - This field should not be accessed directly, instead `GetFailurePolicy()` should be used. - Allowed values are Ignore or Fail. Defaults to Fail. - enum: - - Ignore - - Fail - type: string - generateExisting: - description: |- - GenerateExisting controls whether to trigger generate rule in existing resources - If is set to "true" generate rule will be triggered and applied to existing matched resources. - Defaults to "false" if not specified. - type: boolean - generateExistingOnPolicyUpdate: - description: Deprecated, use generateExisting instead - type: boolean - mutateExistingOnPolicyUpdate: - description: |- - MutateExistingOnPolicyUpdate controls if a mutateExisting policy is applied on policy events. - Default value is "false". - type: boolean - policyMetadata: - description: The metadata of the policy to be created. - properties: - annotations: - additionalProperties: - type: string - description: Annotations that are set on projected resource. - type: object - labels: - additionalProperties: - type: string - description: Labels that are set on projected resource. - type: object - type: object - policyName: - description: |- - PolicyName is the name to use when creating policy within a control plane. - optional, if not set, SharedUpboundPolicy name will be used. - When set, it is immutable. - maxLength: 253 - minLength: 1 - type: string - x-kubernetes-validations: - - message: policyName is immutable - rule: self == oldSelf - rules: - description: |- - Rules is a list of Rule instances. A Policy contains multiple rules and - each rule can validate, mutate, or generate resources. - items: - description: |- - Rule defines a validation, mutation, or generation control for matching resources. - Each rules contains a match declaration to select resources, and an optional exclude - declaration to specify which resources to exclude. - properties: - celPreconditions: - description: |- - CELPreconditions are used to determine if a policy rule should be applied by evaluating a - set of CEL conditions. It can only be used with the validate.cel subrule - items: - description: MatchCondition represents a condition which must - by fulfilled for a request to be sent to a webhook. - properties: - expression: - description: |- - Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. - CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables: - - 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. - See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the - request resource. - Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ - - Required. - type: string - name: - description: |- - Name is an identifier for this match condition, used for strategic merging of MatchConditions, - as well as providing an identifier for logging purposes. A good name should be descriptive of - the associated expression. - Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and - must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or - '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an - optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName') - - Required. - type: string - required: - - expression - - name - type: object - type: array - context: - description: Context defines variables and data sources that - can be used during rule execution. - items: - description: |- - ContextEntry adds variables and data sources to a rule Context. Either a - ConfigMap reference or a APILookup must be provided. - properties: - apiCall: - description: |- - APICall is an HTTP request to the Kubernetes API server, or other JSON web service. - The data returned is stored in the context with the name for the context entry. - properties: - data: - description: Data specifies the POST data sent to - the server. - items: - description: RequestData contains the HTTP POST - data - properties: - key: - description: Key is a unique identifier for - the data value - type: string - value: - description: Value is the data value - x-kubernetes-preserve-unknown-fields: true - required: - - key - - value - type: object - type: array - jmesPath: - description: |- - JMESPath is an optional JSON Match Expression that can be used to - transform the JSON response returned from the server. For example - a JMESPath of "items | length(@)" applied to the API server response - for the URLPath "/apis/apps/v1/deployments" will return the total count - of deployments across all namespaces. - type: string - method: - default: GET - description: Method is the HTTP request type (GET - or POST). - enum: - - GET - - POST - type: string - service: - description: Service is an API call to a JSON web - service - properties: - caBundle: - description: |- - CABundle is a PEM encoded CA bundle which will be used to validate - the server certificate. - type: string - url: - description: |- - URL is the JSON web service URL. A typical form is - `https://{service}.{namespace}:{port}/{path}`. - type: string - required: - - url - type: object - urlPath: - description: |- - URLPath is the URL path to be used in the HTTP GET or POST request to the - Kubernetes API server (e.g. "/api/v1/namespaces" or "/apis/apps/v1/deployments"). - The format required is the same format used by the `kubectl get --raw` command. - See https://kyverno.io/docs/writing-policies/external-data-sources/#variables-from-kubernetes-api-server-calls - for details. - type: string - type: object - configMap: - description: ConfigMap is the ConfigMap reference. - properties: - name: - description: Name is the ConfigMap name. - type: string - namespace: - description: Namespace is the ConfigMap namespace. - type: string - required: - - name - type: object - imageRegistry: - description: |- - ImageRegistry defines requests to an OCI/Docker V2 registry to fetch image - details. - properties: - imageRegistryCredentials: - description: ImageRegistryCredentials provides credentials - that will be used for authentication with registry - properties: - allowInsecureRegistry: - description: AllowInsecureRegistry allows insecure - access to a registry. - type: boolean - providers: - description: |- - Providers specifies a list of OCI Registry names, whose authentication providers are provided. - It can be of one of these values: default,google,azure,amazon,github. - items: - description: ImageRegistryCredentialsProvidersType - provides the list of credential providers - required. - enum: - - default - - amazon - - azure - - google - - github - type: string - type: array - secrets: - description: |- - Secrets specifies a list of secrets that are provided for credentials. - Secrets must live in the Kyverno namespace. - items: - type: string - type: array - type: object - jmesPath: - description: |- - JMESPath is an optional JSON Match Expression that can be used to - transform the ImageData struct returned as a result of processing - the image reference. - type: string - reference: - description: |- - Reference is image reference to a container image in the registry. - Example: ghcr.io/kyverno/kyverno:latest - type: string - required: - - reference - type: object - name: - description: Name is the variable name. - type: string - variable: - description: Variable defines an arbitrary JMESPath context - variable that can be defined inline. - properties: - default: - description: |- - Default is an optional arbitrary JSON object that the variable may take if the JMESPath - expression evaluates to nil - x-kubernetes-preserve-unknown-fields: true - jmesPath: - description: |- - JMESPath is an optional JMESPath Expression that can be used to - transform the variable. - type: string - value: - description: Value is any arbitrary JSON object representable - in YAML or JSON form. - x-kubernetes-preserve-unknown-fields: true - type: object - type: object - type: array - exclude: - description: |- - ExcludeResources defines when this policy rule should not be applied. The exclude - criteria can include resource information (e.g. kind, name, namespace, labels) - and admission review request information like the name or role. - properties: - all: - description: All allows specifying resources which will - be ANDed - items: - description: ResourceFilter allow users to "AND" or "OR" - between resources - properties: - clusterRoles: - description: ClusterRoles is the list of cluster-wide - role names for the user. - items: - type: string - type: array - resources: - description: ResourceDescription contains information - about the resource being created or modified. - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations is a map of annotations (key-value pairs of type string). Annotation keys - and values support the wildcard characters "*" (matches zero or many characters) and - "?" (matches at least one character). - type: object - kinds: - description: Kinds is a list of resource kinds. - items: - type: string - type: array - name: - description: |- - Name is the name of the resource. The name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - NOTE: "Name" is being deprecated in favor of "Names". - type: string - names: - description: |- - Names are the names of the resources. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - namespaceSelector: - description: |- - NamespaceSelector is a label selector for the resource namespace. Label keys and values - in `matchLabels` support the wildcard characters `*` (matches zero or many characters) - and `?` (matches one character).Wildcards allows writing label selectors like - ["storage.k8s.io/*": "*"]. Note that using ["*" : "*"] matches any key and value but - does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - Namespaces is a list of namespaces names. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - operations: - description: Operations can contain values ["CREATE, - "UPDATE", "CONNECT", "DELETE"], which are used - to match a specific action. - items: - description: AdmissionOperation can have one - of the values CREATE, UPDATE, CONNECT, DELETE, - which are used to match a specific action. - enum: - - CREATE - - CONNECT - - UPDATE - - DELETE - type: string - type: array - selector: - description: |- - Selector is a label selector. Label keys and values in `matchLabels` support the wildcard - characters `*` (matches zero or many characters) and `?` (matches one character). - Wildcards allows writing label selectors like ["storage.k8s.io/*": "*"]. Note that - using ["*" : "*"] matches any key and value but does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: object - roles: - description: Roles is the list of namespaced role - names for the user. - items: - type: string - type: array - subjects: - description: Subjects is the list of subject names - like users, user groups, and service accounts. - items: - description: |- - Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, - or a value for non-objects such as user and group names. - properties: - apiGroup: - description: |- - APIGroup holds the API group of the referenced subject. - Defaults to "" for ServiceAccount subjects. - Defaults to "rbac.authorization.k8s.io" for User and Group subjects. - type: string - kind: - description: |- - Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". - If the Authorizer does not recognized the kind value, the Authorizer should report an error. - type: string - name: - description: Name of the object being referenced. - type: string - namespace: - description: |- - Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty - the Authorizer should report an error. - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - type: array - type: object - type: array - any: - description: Any allows specifying resources which will - be ORed - items: - description: ResourceFilter allow users to "AND" or "OR" - between resources - properties: - clusterRoles: - description: ClusterRoles is the list of cluster-wide - role names for the user. - items: - type: string - type: array - resources: - description: ResourceDescription contains information - about the resource being created or modified. - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations is a map of annotations (key-value pairs of type string). Annotation keys - and values support the wildcard characters "*" (matches zero or many characters) and - "?" (matches at least one character). - type: object - kinds: - description: Kinds is a list of resource kinds. - items: - type: string - type: array - name: - description: |- - Name is the name of the resource. The name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - NOTE: "Name" is being deprecated in favor of "Names". - type: string - names: - description: |- - Names are the names of the resources. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - namespaceSelector: - description: |- - NamespaceSelector is a label selector for the resource namespace. Label keys and values - in `matchLabels` support the wildcard characters `*` (matches zero or many characters) - and `?` (matches one character).Wildcards allows writing label selectors like - ["storage.k8s.io/*": "*"]. Note that using ["*" : "*"] matches any key and value but - does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - Namespaces is a list of namespaces names. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - operations: - description: Operations can contain values ["CREATE, - "UPDATE", "CONNECT", "DELETE"], which are used - to match a specific action. - items: - description: AdmissionOperation can have one - of the values CREATE, UPDATE, CONNECT, DELETE, - which are used to match a specific action. - enum: - - CREATE - - CONNECT - - UPDATE - - DELETE - type: string - type: array - selector: - description: |- - Selector is a label selector. Label keys and values in `matchLabels` support the wildcard - characters `*` (matches zero or many characters) and `?` (matches one character). - Wildcards allows writing label selectors like ["storage.k8s.io/*": "*"]. Note that - using ["*" : "*"] matches any key and value but does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: object - roles: - description: Roles is the list of namespaced role - names for the user. - items: - type: string - type: array - subjects: - description: Subjects is the list of subject names - like users, user groups, and service accounts. - items: - description: |- - Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, - or a value for non-objects such as user and group names. - properties: - apiGroup: - description: |- - APIGroup holds the API group of the referenced subject. - Defaults to "" for ServiceAccount subjects. - Defaults to "rbac.authorization.k8s.io" for User and Group subjects. - type: string - kind: - description: |- - Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". - If the Authorizer does not recognized the kind value, the Authorizer should report an error. - type: string - name: - description: Name of the object being referenced. - type: string - namespace: - description: |- - Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty - the Authorizer should report an error. - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - type: array - type: object - type: array - clusterRoles: - description: ClusterRoles is the list of cluster-wide role - names for the user. - items: - type: string - type: array - resources: - description: |- - ResourceDescription contains information about the resource being created or modified. - Requires at least one tag to be specified when under MatchResources. - Specifying ResourceDescription directly under match is being deprecated. - Please specify under "any" or "all" instead. - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations is a map of annotations (key-value pairs of type string). Annotation keys - and values support the wildcard characters "*" (matches zero or many characters) and - "?" (matches at least one character). - type: object - kinds: - description: Kinds is a list of resource kinds. - items: - type: string - type: array - name: - description: |- - Name is the name of the resource. The name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - NOTE: "Name" is being deprecated in favor of "Names". - type: string - names: - description: |- - Names are the names of the resources. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - namespaceSelector: - description: |- - NamespaceSelector is a label selector for the resource namespace. Label keys and values - in `matchLabels` support the wildcard characters `*` (matches zero or many characters) - and `?` (matches one character).Wildcards allows writing label selectors like - ["storage.k8s.io/*": "*"]. Note that using ["*" : "*"] matches any key and value but - does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - Namespaces is a list of namespaces names. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - operations: - description: Operations can contain values ["CREATE, - "UPDATE", "CONNECT", "DELETE"], which are used to - match a specific action. - items: - description: AdmissionOperation can have one of the - values CREATE, UPDATE, CONNECT, DELETE, which are - used to match a specific action. - enum: - - CREATE - - CONNECT - - UPDATE - - DELETE - type: string - type: array - selector: - description: |- - Selector is a label selector. Label keys and values in `matchLabels` support the wildcard - characters `*` (matches zero or many characters) and `?` (matches one character). - Wildcards allows writing label selectors like ["storage.k8s.io/*": "*"]. Note that - using ["*" : "*"] matches any key and value but does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: object - roles: - description: Roles is the list of namespaced role names - for the user. - items: - type: string - type: array - subjects: - description: Subjects is the list of subject names like - users, user groups, and service accounts. - items: - description: |- - Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, - or a value for non-objects such as user and group names. - properties: - apiGroup: - description: |- - APIGroup holds the API group of the referenced subject. - Defaults to "" for ServiceAccount subjects. - Defaults to "rbac.authorization.k8s.io" for User and Group subjects. - type: string - kind: - description: |- - Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". - If the Authorizer does not recognized the kind value, the Authorizer should report an error. - type: string - name: - description: Name of the object being referenced. - type: string - namespace: - description: |- - Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty - the Authorizer should report an error. - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - type: array - type: object - generate: - description: Generation is used to create new resources. - properties: - apiVersion: - description: APIVersion specifies resource apiVersion. - type: string - clone: - description: |- - Clone specifies the source resource used to populate each generated resource. - At most one of Data or Clone can be specified. If neither are provided, the generated - resource will be created with default data only. - properties: - name: - description: Name specifies name of the resource. - type: string - namespace: - description: Namespace specifies source resource namespace. - type: string - type: object - cloneList: - description: CloneList specifies the list of source resource - used to populate each generated resource. - properties: - kinds: - description: Kinds is a list of resource kinds. - items: - type: string - type: array - namespace: - description: Namespace specifies source resource namespace. - type: string - selector: - description: |- - Selector is a label selector. Label keys and values in `matchLabels`. - wildcard characters are not supported. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: object - data: - description: |- - Data provides the resource declaration used to populate each generated resource. - At most one of Data or Clone must be specified. If neither are provided, the generated - resource will be created with default data only. - x-kubernetes-preserve-unknown-fields: true - kind: - description: Kind specifies resource kind. - type: string - name: - description: Name specifies the resource name. - type: string - namespace: - description: Namespace specifies resource namespace. - type: string - synchronize: - description: |- - Synchronize controls if generated resources should be kept in-sync with their source resource. - If Synchronize is set to "true" changes to generated resources will be overwritten with resource - data from Data or the resource specified in the Clone declaration. - Optional. Defaults to "false" if not specified. - type: boolean - uid: - description: UID specifies the resource uid. - type: string - type: object - imageExtractors: - additionalProperties: - items: - properties: - jmesPath: - description: |- - JMESPath is an optional JMESPath expression to apply to the image value. - This is useful when the extracted image begins with a prefix like 'docker://'. - The 'trim_prefix' function may be used to trim the prefix: trim_prefix(@, 'docker://'). - Note - Image digest mutation may not be used when applying a JMESPAth to an image. - type: string - key: - description: |- - Key is an optional name of the field within 'path' that will be used to uniquely identify an image. - Note - this field MUST be unique. - type: string - name: - description: |- - Name is the entry the image will be available under 'images.' in the context. - If this field is not defined, image entries will appear under 'images.custom'. - type: string - path: - description: |- - Path is the path to the object containing the image field in a custom resource. - It should be slash-separated. Each slash-separated key must be a valid YAML key or a wildcard '*'. - Wildcard keys are expanded in case of arrays or objects. - type: string - value: - description: |- - Value is an optional name of the field within 'path' that points to the image URI. - This is useful when a custom 'key' is also defined. - type: string - required: - - path - type: object - type: array - description: |- - ImageExtractors defines a mapping from kinds to ImageExtractorConfigs. - This config is only valid for verifyImages rules. - type: object - match: - description: |- - MatchResources defines when this policy rule should be applied. The match - criteria can include resource information (e.g. kind, name, namespace, labels) - and admission review request information like the user name or role. - At least one kind is required. - properties: - all: - description: All allows specifying resources which will - be ANDed - items: - description: ResourceFilter allow users to "AND" or "OR" - between resources - properties: - clusterRoles: - description: ClusterRoles is the list of cluster-wide - role names for the user. - items: - type: string - type: array - resources: - description: ResourceDescription contains information - about the resource being created or modified. - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations is a map of annotations (key-value pairs of type string). Annotation keys - and values support the wildcard characters "*" (matches zero or many characters) and - "?" (matches at least one character). - type: object - kinds: - description: Kinds is a list of resource kinds. - items: - type: string - type: array - name: - description: |- - Name is the name of the resource. The name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - NOTE: "Name" is being deprecated in favor of "Names". - type: string - names: - description: |- - Names are the names of the resources. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - namespaceSelector: - description: |- - NamespaceSelector is a label selector for the resource namespace. Label keys and values - in `matchLabels` support the wildcard characters `*` (matches zero or many characters) - and `?` (matches one character).Wildcards allows writing label selectors like - ["storage.k8s.io/*": "*"]. Note that using ["*" : "*"] matches any key and value but - does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - Namespaces is a list of namespaces names. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - operations: - description: Operations can contain values ["CREATE, - "UPDATE", "CONNECT", "DELETE"], which are used - to match a specific action. - items: - description: AdmissionOperation can have one - of the values CREATE, UPDATE, CONNECT, DELETE, - which are used to match a specific action. - enum: - - CREATE - - CONNECT - - UPDATE - - DELETE - type: string - type: array - selector: - description: |- - Selector is a label selector. Label keys and values in `matchLabels` support the wildcard - characters `*` (matches zero or many characters) and `?` (matches one character). - Wildcards allows writing label selectors like ["storage.k8s.io/*": "*"]. Note that - using ["*" : "*"] matches any key and value but does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: object - roles: - description: Roles is the list of namespaced role - names for the user. - items: - type: string - type: array - subjects: - description: Subjects is the list of subject names - like users, user groups, and service accounts. - items: - description: |- - Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, - or a value for non-objects such as user and group names. - properties: - apiGroup: - description: |- - APIGroup holds the API group of the referenced subject. - Defaults to "" for ServiceAccount subjects. - Defaults to "rbac.authorization.k8s.io" for User and Group subjects. - type: string - kind: - description: |- - Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". - If the Authorizer does not recognized the kind value, the Authorizer should report an error. - type: string - name: - description: Name of the object being referenced. - type: string - namespace: - description: |- - Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty - the Authorizer should report an error. - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - type: array - type: object - type: array - any: - description: Any allows specifying resources which will - be ORed - items: - description: ResourceFilter allow users to "AND" or "OR" - between resources - properties: - clusterRoles: - description: ClusterRoles is the list of cluster-wide - role names for the user. - items: - type: string - type: array - resources: - description: ResourceDescription contains information - about the resource being created or modified. - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations is a map of annotations (key-value pairs of type string). Annotation keys - and values support the wildcard characters "*" (matches zero or many characters) and - "?" (matches at least one character). - type: object - kinds: - description: Kinds is a list of resource kinds. - items: - type: string - type: array - name: - description: |- - Name is the name of the resource. The name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - NOTE: "Name" is being deprecated in favor of "Names". - type: string - names: - description: |- - Names are the names of the resources. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - namespaceSelector: - description: |- - NamespaceSelector is a label selector for the resource namespace. Label keys and values - in `matchLabels` support the wildcard characters `*` (matches zero or many characters) - and `?` (matches one character).Wildcards allows writing label selectors like - ["storage.k8s.io/*": "*"]. Note that using ["*" : "*"] matches any key and value but - does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - Namespaces is a list of namespaces names. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - operations: - description: Operations can contain values ["CREATE, - "UPDATE", "CONNECT", "DELETE"], which are used - to match a specific action. - items: - description: AdmissionOperation can have one - of the values CREATE, UPDATE, CONNECT, DELETE, - which are used to match a specific action. - enum: - - CREATE - - CONNECT - - UPDATE - - DELETE - type: string - type: array - selector: - description: |- - Selector is a label selector. Label keys and values in `matchLabels` support the wildcard - characters `*` (matches zero or many characters) and `?` (matches one character). - Wildcards allows writing label selectors like ["storage.k8s.io/*": "*"]. Note that - using ["*" : "*"] matches any key and value but does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: object - roles: - description: Roles is the list of namespaced role - names for the user. - items: - type: string - type: array - subjects: - description: Subjects is the list of subject names - like users, user groups, and service accounts. - items: - description: |- - Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, - or a value for non-objects such as user and group names. - properties: - apiGroup: - description: |- - APIGroup holds the API group of the referenced subject. - Defaults to "" for ServiceAccount subjects. - Defaults to "rbac.authorization.k8s.io" for User and Group subjects. - type: string - kind: - description: |- - Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". - If the Authorizer does not recognized the kind value, the Authorizer should report an error. - type: string - name: - description: Name of the object being referenced. - type: string - namespace: - description: |- - Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty - the Authorizer should report an error. - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - type: array - type: object - type: array - clusterRoles: - description: ClusterRoles is the list of cluster-wide role - names for the user. - items: - type: string - type: array - resources: - description: |- - ResourceDescription contains information about the resource being created or modified. - Requires at least one tag to be specified when under MatchResources. - Specifying ResourceDescription directly under match is being deprecated. - Please specify under "any" or "all" instead. - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations is a map of annotations (key-value pairs of type string). Annotation keys - and values support the wildcard characters "*" (matches zero or many characters) and - "?" (matches at least one character). - type: object - kinds: - description: Kinds is a list of resource kinds. - items: - type: string - type: array - name: - description: |- - Name is the name of the resource. The name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - NOTE: "Name" is being deprecated in favor of "Names". - type: string - names: - description: |- - Names are the names of the resources. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - namespaceSelector: - description: |- - NamespaceSelector is a label selector for the resource namespace. Label keys and values - in `matchLabels` support the wildcard characters `*` (matches zero or many characters) - and `?` (matches one character).Wildcards allows writing label selectors like - ["storage.k8s.io/*": "*"]. Note that using ["*" : "*"] matches any key and value but - does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - Namespaces is a list of namespaces names. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - operations: - description: Operations can contain values ["CREATE, - "UPDATE", "CONNECT", "DELETE"], which are used to - match a specific action. - items: - description: AdmissionOperation can have one of the - values CREATE, UPDATE, CONNECT, DELETE, which are - used to match a specific action. - enum: - - CREATE - - CONNECT - - UPDATE - - DELETE - type: string - type: array - selector: - description: |- - Selector is a label selector. Label keys and values in `matchLabels` support the wildcard - characters `*` (matches zero or many characters) and `?` (matches one character). - Wildcards allows writing label selectors like ["storage.k8s.io/*": "*"]. Note that - using ["*" : "*"] matches any key and value but does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: object - roles: - description: Roles is the list of namespaced role names - for the user. - items: - type: string - type: array - subjects: - description: Subjects is the list of subject names like - users, user groups, and service accounts. - items: - description: |- - Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, - or a value for non-objects such as user and group names. - properties: - apiGroup: - description: |- - APIGroup holds the API group of the referenced subject. - Defaults to "" for ServiceAccount subjects. - Defaults to "rbac.authorization.k8s.io" for User and Group subjects. - type: string - kind: - description: |- - Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". - If the Authorizer does not recognized the kind value, the Authorizer should report an error. - type: string - name: - description: Name of the object being referenced. - type: string - namespace: - description: |- - Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty - the Authorizer should report an error. - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - type: array - type: object - mutate: - description: Mutation is used to modify matching resources. - properties: - foreach: - description: ForEach applies mutation rules to a list of - sub-elements by creating a context for each entry in the - list and looping over it to apply the specified logic. - items: - description: ForEachMutation applies mutation rules to - a list of sub-elements by creating a context for each - entry in the list and looping over it to apply the specified - logic. - properties: - context: - description: Context defines variables and data sources - that can be used during rule execution. - items: - description: |- - ContextEntry adds variables and data sources to a rule Context. Either a - ConfigMap reference or a APILookup must be provided. - properties: - apiCall: - description: |- - APICall is an HTTP request to the Kubernetes API server, or other JSON web service. - The data returned is stored in the context with the name for the context entry. - properties: - data: - description: Data specifies the POST data - sent to the server. - items: - description: RequestData contains the - HTTP POST data - properties: - key: - description: Key is a unique identifier - for the data value - type: string - value: - description: Value is the data value - x-kubernetes-preserve-unknown-fields: true - required: - - key - - value - type: object - type: array - jmesPath: - description: |- - JMESPath is an optional JSON Match Expression that can be used to - transform the JSON response returned from the server. For example - a JMESPath of "items | length(@)" applied to the API server response - for the URLPath "/apis/apps/v1/deployments" will return the total count - of deployments across all namespaces. - type: string - method: - default: GET - description: Method is the HTTP request - type (GET or POST). - enum: - - GET - - POST - type: string - service: - description: Service is an API call to a - JSON web service - properties: - caBundle: - description: |- - CABundle is a PEM encoded CA bundle which will be used to validate - the server certificate. - type: string - url: - description: |- - URL is the JSON web service URL. A typical form is - `https://{service}.{namespace}:{port}/{path}`. - type: string - required: - - url - type: object - urlPath: - description: |- - URLPath is the URL path to be used in the HTTP GET or POST request to the - Kubernetes API server (e.g. "/api/v1/namespaces" or "/apis/apps/v1/deployments"). - The format required is the same format used by the `kubectl get --raw` command. - See https://kyverno.io/docs/writing-policies/external-data-sources/#variables-from-kubernetes-api-server-calls - for details. - type: string - type: object - configMap: - description: ConfigMap is the ConfigMap reference. - properties: - name: - description: Name is the ConfigMap name. - type: string - namespace: - description: Namespace is the ConfigMap - namespace. - type: string - required: - - name - type: object - imageRegistry: - description: |- - ImageRegistry defines requests to an OCI/Docker V2 registry to fetch image - details. - properties: - imageRegistryCredentials: - description: ImageRegistryCredentials provides - credentials that will be used for authentication - with registry - properties: - allowInsecureRegistry: - description: AllowInsecureRegistry allows - insecure access to a registry. - type: boolean - providers: - description: |- - Providers specifies a list of OCI Registry names, whose authentication providers are provided. - It can be of one of these values: default,google,azure,amazon,github. - items: - description: ImageRegistryCredentialsProvidersType - provides the list of credential - providers required. - enum: - - default - - amazon - - azure - - google - - github - type: string - type: array - secrets: - description: |- - Secrets specifies a list of secrets that are provided for credentials. - Secrets must live in the Kyverno namespace. - items: - type: string - type: array - type: object - jmesPath: - description: |- - JMESPath is an optional JSON Match Expression that can be used to - transform the ImageData struct returned as a result of processing - the image reference. - type: string - reference: - description: |- - Reference is image reference to a container image in the registry. - Example: ghcr.io/kyverno/kyverno:latest - type: string - required: - - reference - type: object - name: - description: Name is the variable name. - type: string - variable: - description: Variable defines an arbitrary JMESPath - context variable that can be defined inline. - properties: - default: - description: |- - Default is an optional arbitrary JSON object that the variable may take if the JMESPath - expression evaluates to nil - x-kubernetes-preserve-unknown-fields: true - jmesPath: - description: |- - JMESPath is an optional JMESPath Expression that can be used to - transform the variable. - type: string - value: - description: Value is any arbitrary JSON - object representable in YAML or JSON form. - x-kubernetes-preserve-unknown-fields: true - type: object - type: object - type: array - foreach: - description: Foreach declares a nested foreach iterator - x-kubernetes-preserve-unknown-fields: true - list: - description: |- - List specifies a JMESPath expression that results in one or more elements - to which the validation logic is applied. - type: string - order: - description: |- - Order defines the iteration order on the list. - Can be Ascending to iterate from first to last element or Descending to iterate in from last to first element. - enum: - - Ascending - - Descending - type: string - patchStrategicMerge: - description: |- - PatchStrategicMerge is a strategic merge patch used to modify resources. - See https://kubernetes.io/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/ - and https://kubectl.docs.kubernetes.io/references/kustomize/patchesstrategicmerge/. - x-kubernetes-preserve-unknown-fields: true - patchesJson6902: - description: |- - PatchesJSON6902 is a list of RFC 6902 JSON Patch declarations used to modify resources. - See https://tools.ietf.org/html/rfc6902 and https://kubectl.docs.kubernetes.io/references/kustomize/patchesjson6902/. - type: string - preconditions: - description: |- - AnyAllConditions are used to determine if a policy rule should be applied by evaluating a - set of conditions. The declaration can contain nested `any` or `all` statements. - See: https://kyverno.io/docs/writing-policies/preconditions/ - properties: - all: - description: |- - AllConditions enable variable-based conditional rule execution. This is useful for - finer control of when an rule is applied. A condition can reference object data - using JMESPath notation. - Here, all of the conditions need to pass - items: - description: Condition defines variable-based - conditional criteria for rule execution. - properties: - key: - description: Key is the context entry (using - JMESPath) for conditional rule evaluation. - x-kubernetes-preserve-unknown-fields: true - message: - description: Message is an optional display - message - type: string - operator: - description: |- - Operator is the conditional operation to perform. Valid operators are: - Equals, NotEquals, In, AnyIn, AllIn, NotIn, AnyNotIn, AllNotIn, GreaterThanOrEquals, - GreaterThan, LessThanOrEquals, LessThan, DurationGreaterThanOrEquals, DurationGreaterThan, - DurationLessThanOrEquals, DurationLessThan - enum: - - Equals - - NotEquals - - In - - AnyIn - - AllIn - - NotIn - - AnyNotIn - - AllNotIn - - GreaterThanOrEquals - - GreaterThan - - LessThanOrEquals - - LessThan - - DurationGreaterThanOrEquals - - DurationGreaterThan - - DurationLessThanOrEquals - - DurationLessThan - type: string - value: - description: |- - Value is the conditional value, or set of values. The values can be fixed set - or can be variables declared using JMESPath. - x-kubernetes-preserve-unknown-fields: true - type: object - type: array - any: - description: |- - AnyConditions enable variable-based conditional rule execution. This is useful for - finer control of when an rule is applied. A condition can reference object data - using JMESPath notation. - Here, at least one of the conditions need to pass - items: - description: Condition defines variable-based - conditional criteria for rule execution. - properties: - key: - description: Key is the context entry (using - JMESPath) for conditional rule evaluation. - x-kubernetes-preserve-unknown-fields: true - message: - description: Message is an optional display - message - type: string - operator: - description: |- - Operator is the conditional operation to perform. Valid operators are: - Equals, NotEquals, In, AnyIn, AllIn, NotIn, AnyNotIn, AllNotIn, GreaterThanOrEquals, - GreaterThan, LessThanOrEquals, LessThan, DurationGreaterThanOrEquals, DurationGreaterThan, - DurationLessThanOrEquals, DurationLessThan - enum: - - Equals - - NotEquals - - In - - AnyIn - - AllIn - - NotIn - - AnyNotIn - - AllNotIn - - GreaterThanOrEquals - - GreaterThan - - LessThanOrEquals - - LessThan - - DurationGreaterThanOrEquals - - DurationGreaterThan - - DurationLessThanOrEquals - - DurationLessThan - type: string - value: - description: |- - Value is the conditional value, or set of values. The values can be fixed set - or can be variables declared using JMESPath. - x-kubernetes-preserve-unknown-fields: true - type: object - type: array - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - type: array - patchStrategicMerge: - description: |- - PatchStrategicMerge is a strategic merge patch used to modify resources. - See https://kubernetes.io/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/ - and https://kubectl.docs.kubernetes.io/references/kustomize/patchesstrategicmerge/. - x-kubernetes-preserve-unknown-fields: true - patchesJson6902: - description: |- - PatchesJSON6902 is a list of RFC 6902 JSON Patch declarations used to modify resources. - See https://tools.ietf.org/html/rfc6902 and https://kubectl.docs.kubernetes.io/references/kustomize/patchesjson6902/. - type: string - targets: - description: Targets defines the target resources to be - mutated. - items: - description: TargetResourceSpec defines targets for mutating - existing resources. - properties: - apiVersion: - description: APIVersion specifies resource apiVersion. - type: string - context: - description: Context defines variables and data sources - that can be used during rule execution. - items: - description: |- - ContextEntry adds variables and data sources to a rule Context. Either a - ConfigMap reference or a APILookup must be provided. - properties: - apiCall: - description: |- - APICall is an HTTP request to the Kubernetes API server, or other JSON web service. - The data returned is stored in the context with the name for the context entry. - properties: - data: - description: Data specifies the POST data - sent to the server. - items: - description: RequestData contains the - HTTP POST data - properties: - key: - description: Key is a unique identifier - for the data value - type: string - value: - description: Value is the data value - x-kubernetes-preserve-unknown-fields: true - required: - - key - - value - type: object - type: array - jmesPath: - description: |- - JMESPath is an optional JSON Match Expression that can be used to - transform the JSON response returned from the server. For example - a JMESPath of "items | length(@)" applied to the API server response - for the URLPath "/apis/apps/v1/deployments" will return the total count - of deployments across all namespaces. - type: string - method: - default: GET - description: Method is the HTTP request - type (GET or POST). - enum: - - GET - - POST - type: string - service: - description: Service is an API call to a - JSON web service - properties: - caBundle: - description: |- - CABundle is a PEM encoded CA bundle which will be used to validate - the server certificate. - type: string - url: - description: |- - URL is the JSON web service URL. A typical form is - `https://{service}.{namespace}:{port}/{path}`. - type: string - required: - - url - type: object - urlPath: - description: |- - URLPath is the URL path to be used in the HTTP GET or POST request to the - Kubernetes API server (e.g. "/api/v1/namespaces" or "/apis/apps/v1/deployments"). - The format required is the same format used by the `kubectl get --raw` command. - See https://kyverno.io/docs/writing-policies/external-data-sources/#variables-from-kubernetes-api-server-calls - for details. - type: string - type: object - configMap: - description: ConfigMap is the ConfigMap reference. - properties: - name: - description: Name is the ConfigMap name. - type: string - namespace: - description: Namespace is the ConfigMap - namespace. - type: string - required: - - name - type: object - imageRegistry: - description: |- - ImageRegistry defines requests to an OCI/Docker V2 registry to fetch image - details. - properties: - imageRegistryCredentials: - description: ImageRegistryCredentials provides - credentials that will be used for authentication - with registry - properties: - allowInsecureRegistry: - description: AllowInsecureRegistry allows - insecure access to a registry. - type: boolean - providers: - description: |- - Providers specifies a list of OCI Registry names, whose authentication providers are provided. - It can be of one of these values: default,google,azure,amazon,github. - items: - description: ImageRegistryCredentialsProvidersType - provides the list of credential - providers required. - enum: - - default - - amazon - - azure - - google - - github - type: string - type: array - secrets: - description: |- - Secrets specifies a list of secrets that are provided for credentials. - Secrets must live in the Kyverno namespace. - items: - type: string - type: array - type: object - jmesPath: - description: |- - JMESPath is an optional JSON Match Expression that can be used to - transform the ImageData struct returned as a result of processing - the image reference. - type: string - reference: - description: |- - Reference is image reference to a container image in the registry. - Example: ghcr.io/kyverno/kyverno:latest - type: string - required: - - reference - type: object - name: - description: Name is the variable name. - type: string - variable: - description: Variable defines an arbitrary JMESPath - context variable that can be defined inline. - properties: - default: - description: |- - Default is an optional arbitrary JSON object that the variable may take if the JMESPath - expression evaluates to nil - x-kubernetes-preserve-unknown-fields: true - jmesPath: - description: |- - JMESPath is an optional JMESPath Expression that can be used to - transform the variable. - type: string - value: - description: Value is any arbitrary JSON - object representable in YAML or JSON form. - x-kubernetes-preserve-unknown-fields: true - type: object - type: object - type: array - kind: - description: Kind specifies resource kind. - type: string - name: - description: Name specifies the resource name. - type: string - namespace: - description: Namespace specifies resource namespace. - type: string - preconditions: - description: |- - Preconditions are used to determine if a policy rule should be applied by evaluating a - set of conditions. The declaration can contain nested `any` or `all` statements. A direct list - of conditions (without `any` or `all` statements is supported for backwards compatibility but - will be deprecated in the next major release. - See: https://kyverno.io/docs/writing-policies/preconditions/ - x-kubernetes-preserve-unknown-fields: true - uid: - description: UID specifies the resource uid. - type: string - type: object - type: array - type: object - name: - description: Name is a label to identify the rule, It must be - unique within the policy. - maxLength: 63 - type: string - preconditions: - description: |- - Preconditions are used to determine if a policy rule should be applied by evaluating a - set of conditions. The declaration can contain nested `any` or `all` statements. A direct list - of conditions (without `any` or `all` statements is supported for backwards compatibility but - will be deprecated in the next major release. - See: https://kyverno.io/docs/writing-policies/preconditions/ - x-kubernetes-preserve-unknown-fields: true - skipBackgroundRequests: - default: true - description: |- - SkipBackgroundRequests bypasses admission requests that are sent by the background controller. - The default value is set to "true", it must be set to "false" to apply - generate and mutateExisting rules to those requests. - type: boolean - validate: - description: Validation is used to validate matching resources. - properties: - anyPattern: - description: |- - AnyPattern specifies list of validation patterns. At least one of the patterns - must be satisfied for the validation rule to succeed. - x-kubernetes-preserve-unknown-fields: true - cel: - description: CEL allows validation checks using the Common - Expression Language (https://kubernetes.io/docs/reference/using-api/cel/). - properties: - auditAnnotations: - description: AuditAnnotations contains CEL expressions - which are used to produce audit annotations for the - audit event of the API request. - items: - description: AuditAnnotation describes how to produce - an audit annotation for an API request. - properties: - key: - description: |- - key specifies the audit annotation key. The audit annotation keys of - a ValidatingAdmissionPolicy must be unique. The key must be a qualified - name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length. - - The key is combined with the resource name of the - ValidatingAdmissionPolicy to construct an audit annotation key: - "{ValidatingAdmissionPolicy name}/{key}". - - If an admission webhook uses the same resource name as this ValidatingAdmissionPolicy - and the same audit annotation key, the annotation key will be identical. - In this case, the first annotation written with the key will be included - in the audit event and all subsequent annotations with the same key - will be discarded. - - Required. - type: string - valueExpression: - description: |- - valueExpression represents the expression which is evaluated by CEL to - produce an audit annotation value. The expression must evaluate to either - a string or null value. If the expression evaluates to a string, the - audit annotation is included with the string value. If the expression - evaluates to null or empty string the audit annotation will be omitted. - The valueExpression may be no longer than 5kb in length. - If the result of the valueExpression is more than 10kb in length, it - will be truncated to 10kb. - - If multiple ValidatingAdmissionPolicyBinding resources match an - API request, then the valueExpression will be evaluated for - each binding. All unique values produced by the valueExpressions - will be joined together in a comma-separated list. - - Required. - type: string - required: - - key - - valueExpression - type: object - type: array - expressions: - description: Expressions is a list of CELExpression - types. - items: - description: Validation specifies the CEL expression - which is used to apply the validation. - properties: - expression: - description: "Expression represents the expression - which will be evaluated by CEL.\nref: https://github.com/google/cel-spec\nCEL - expressions have access to the contents of the - API request/response, organized into CEL variables - as well as some other useful variables:\n\n- - 'object' - The object from the incoming request. - The value is null for DELETE requests.\n- 'oldObject' - - The existing object. The value is null for - CREATE requests.\n- 'request' - Attributes of - the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).\n- - 'params' - Parameter resource referred to by - the policy binding being evaluated. Only populated - if the policy has a ParamKind.\n- 'namespaceObject' - - The namespace object that the incoming object - belongs to. The value is null for cluster-scoped - resources.\n- 'variables' - Map of composited - variables, from its name to its lazily evaluated - value.\n For example, a variable named 'foo' - can be accessed as 'variables.foo'.\n- 'authorizer' - - A CEL Authorizer. May be used to perform authorization - checks for the principal (user or service account) - of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- - 'authorizer.requestResource' - A CEL ResourceCheck - constructed from the 'authorizer' and configured - with the\n request resource.\n\nThe `apiVersion`, - `kind`, `metadata.name` and `metadata.generateName` - are always accessible from the root of the\nobject. - No other metadata properties are accessible.\n\nOnly - property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` - are accessible.\nAccessible property names are - escaped according to the following rules when - accessed in the expression:\n- '__' escapes - to '__underscores__'\n- '.' escapes to '__dot__'\n- - '-' escapes to '__dash__'\n- '/' escapes to - '__slash__'\n- Property names that exactly match - a CEL RESERVED keyword escape to '__{keyword}__'. - The keywords are:\n\t \"true\", \"false\", - \"null\", \"in\", \"as\", \"break\", \"const\", - \"continue\", \"else\", \"for\", \"function\", - \"if\",\n\t \"import\", \"let\", \"loop\", - \"package\", \"namespace\", \"return\".\nExamples:\n - \ - Expression accessing a property named \"namespace\": - {\"Expression\": \"object.__namespace__ > 0\"}\n - \ - Expression accessing a property named \"x-prop\": - {\"Expression\": \"object.x__dash__prop > 0\"}\n - \ - Expression accessing a property named \"redact__d\": - {\"Expression\": \"object.redact__underscores__d - > 0\"}\n\nEquality on arrays with list type - of 'set' or 'map' ignores element order, i.e. - [1, 2] == [2, 1].\nConcatenation on arrays with - x-kubernetes-list-type use the semantics of - the list type:\n - 'set': `X + Y` performs - a union where the array positions of all elements - in `X` are preserved and\n non-intersecting - elements in `Y` are appended, retaining their - partial order.\n - 'map': `X + Y` performs - a merge where the array positions of all keys - in `X` are preserved but the values\n are - overwritten by values in `Y` when the key sets - of `X` and `Y` intersect. Elements in `Y` with\n - \ non-intersecting keys are appended, retaining - their partial order.\nRequired." - type: string - message: - description: |- - Message represents the message displayed when validation fails. The message is required if the Expression contains - line breaks. The message must not contain line breaks. - If unset, the message is "failed rule: {Rule}". - e.g. "must be a URL with the host matching spec.host" - If the Expression contains line breaks. Message is required. - The message must not contain line breaks. - If unset, the message is "failed Expression: {Expression}". - type: string - messageExpression: - description: |- - messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. - Since messageExpression is used as a failure message, it must evaluate to a string. - If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. - If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced - as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string - that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and - the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. - messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. - Example: - "object.x must be less than max ("+string(params.max)+")" - type: string - reason: - description: |- - Reason represents a machine-readable description of why this validation failed. - If this is the first validation in the list to fail, this reason, as well as the - corresponding HTTP response code, are used in the - HTTP response to the client. - The currently supported reasons are: "Unauthorized", "Forbidden", "Invalid", "RequestEntityTooLarge". - If not set, StatusReasonInvalid is used in the response to the client. - type: string - required: - - expression - type: object - type: array - paramKind: - description: ParamKind is a tuple of Group Kind and - Version. - properties: - apiVersion: - description: |- - APIVersion is the API group version the resources belong to. - In format of "group/version". - Required. - type: string - kind: - description: |- - Kind is the API kind the resources belong to. - Required. - type: string - type: object - x-kubernetes-map-type: atomic - paramRef: - description: ParamRef references a parameter resource. - properties: - name: - description: |- - `name` is the name of the resource being referenced. - - `name` and `selector` are mutually exclusive properties. If one is set, - the other must be unset. - type: string - namespace: - description: |- - namespace is the namespace of the referenced resource. Allows limiting - the search for params to a specific namespace. Applies to both `name` and - `selector` fields. - - A per-namespace parameter may be used by specifying a namespace-scoped - `paramKind` in the policy and leaving this field empty. - - - If `paramKind` is cluster-scoped, this field MUST be unset. Setting this - field results in a configuration error. - - - If `paramKind` is namespace-scoped, the namespace of the object being - evaluated for admission will be used when this field is left unset. Take - care that if this is left empty the binding must not match any cluster-scoped - resources, which will result in an error. - type: string - parameterNotFoundAction: - description: |- - `parameterNotFoundAction` controls the behavior of the binding when the resource - exists, and name or selector is valid, but there are no parameters - matched by the binding. If the value is set to `Allow`, then no - matched parameters will be treated as successful validation by the binding. - If set to `Deny`, then no matched parameters will be subject to the - `failurePolicy` of the policy. - - Allowed values are `Allow` or `Deny` - Default to `Deny` - type: string - selector: - description: |- - selector can be used to match multiple param objects based on their labels. - Supply selector: {} to match all resources of the ParamKind. - - If multiple params are found, they are all evaluated with the policy expressions - and the results are ANDed together. - - One of `name` or `selector` must be set, but `name` and `selector` are - mutually exclusive properties. If one is set, the other must be unset. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: object - x-kubernetes-map-type: atomic - variables: - description: |- - Variables contain definitions of variables that can be used in composition of other expressions. - Each variable is defined as a named CEL expression. - The variables defined here will be available under `variables` in other expressions of the policy. - items: - description: Variable is the definition of a variable - that is used for composition. - properties: - expression: - description: |- - Expression is the expression that will be evaluated as the value of the variable. - The CEL expression has access to the same identifiers as the CEL expressions in Validation. - type: string - name: - description: |- - Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. - The variable can be accessed in other expressions through `variables` - For example, if name is "foo", the variable will be available as `variables.foo` - type: string - required: - - expression - - name - type: object - type: array - type: object - deny: - description: Deny defines conditions used to pass or fail - a validation rule. - properties: - conditions: - description: |- - Multiple conditions can be declared under an `any` or `all` statement. A direct list - of conditions (without `any` or `all` statements) is also supported for backwards compatibility - but will be deprecated in the next major release. - See: https://kyverno.io/docs/writing-policies/validate/#deny-rules - x-kubernetes-preserve-unknown-fields: true - type: object - foreach: - description: ForEach applies validate rules to a list of - sub-elements by creating a context for each entry in the - list and looping over it to apply the specified logic. - items: - description: ForEachValidation applies validate rules - to a list of sub-elements by creating a context for - each entry in the list and looping over it to apply - the specified logic. - properties: - anyPattern: - description: |- - AnyPattern specifies list of validation patterns. At least one of the patterns - must be satisfied for the validation rule to succeed. - x-kubernetes-preserve-unknown-fields: true - context: - description: Context defines variables and data sources - that can be used during rule execution. - items: - description: |- - ContextEntry adds variables and data sources to a rule Context. Either a - ConfigMap reference or a APILookup must be provided. - properties: - apiCall: - description: |- - APICall is an HTTP request to the Kubernetes API server, or other JSON web service. - The data returned is stored in the context with the name for the context entry. - properties: - data: - description: Data specifies the POST data - sent to the server. - items: - description: RequestData contains the - HTTP POST data - properties: - key: - description: Key is a unique identifier - for the data value - type: string - value: - description: Value is the data value - x-kubernetes-preserve-unknown-fields: true - required: - - key - - value - type: object - type: array - jmesPath: - description: |- - JMESPath is an optional JSON Match Expression that can be used to - transform the JSON response returned from the server. For example - a JMESPath of "items | length(@)" applied to the API server response - for the URLPath "/apis/apps/v1/deployments" will return the total count - of deployments across all namespaces. - type: string - method: - default: GET - description: Method is the HTTP request - type (GET or POST). - enum: - - GET - - POST - type: string - service: - description: Service is an API call to a - JSON web service - properties: - caBundle: - description: |- - CABundle is a PEM encoded CA bundle which will be used to validate - the server certificate. - type: string - url: - description: |- - URL is the JSON web service URL. A typical form is - `https://{service}.{namespace}:{port}/{path}`. - type: string - required: - - url - type: object - urlPath: - description: |- - URLPath is the URL path to be used in the HTTP GET or POST request to the - Kubernetes API server (e.g. "/api/v1/namespaces" or "/apis/apps/v1/deployments"). - The format required is the same format used by the `kubectl get --raw` command. - See https://kyverno.io/docs/writing-policies/external-data-sources/#variables-from-kubernetes-api-server-calls - for details. - type: string - type: object - configMap: - description: ConfigMap is the ConfigMap reference. - properties: - name: - description: Name is the ConfigMap name. - type: string - namespace: - description: Namespace is the ConfigMap - namespace. - type: string - required: - - name - type: object - imageRegistry: - description: |- - ImageRegistry defines requests to an OCI/Docker V2 registry to fetch image - details. - properties: - imageRegistryCredentials: - description: ImageRegistryCredentials provides - credentials that will be used for authentication - with registry - properties: - allowInsecureRegistry: - description: AllowInsecureRegistry allows - insecure access to a registry. - type: boolean - providers: - description: |- - Providers specifies a list of OCI Registry names, whose authentication providers are provided. - It can be of one of these values: default,google,azure,amazon,github. - items: - description: ImageRegistryCredentialsProvidersType - provides the list of credential - providers required. - enum: - - default - - amazon - - azure - - google - - github - type: string - type: array - secrets: - description: |- - Secrets specifies a list of secrets that are provided for credentials. - Secrets must live in the Kyverno namespace. - items: - type: string - type: array - type: object - jmesPath: - description: |- - JMESPath is an optional JSON Match Expression that can be used to - transform the ImageData struct returned as a result of processing - the image reference. - type: string - reference: - description: |- - Reference is image reference to a container image in the registry. - Example: ghcr.io/kyverno/kyverno:latest - type: string - required: - - reference - type: object - name: - description: Name is the variable name. - type: string - variable: - description: Variable defines an arbitrary JMESPath - context variable that can be defined inline. - properties: - default: - description: |- - Default is an optional arbitrary JSON object that the variable may take if the JMESPath - expression evaluates to nil - x-kubernetes-preserve-unknown-fields: true - jmesPath: - description: |- - JMESPath is an optional JMESPath Expression that can be used to - transform the variable. - type: string - value: - description: Value is any arbitrary JSON - object representable in YAML or JSON form. - x-kubernetes-preserve-unknown-fields: true - type: object - type: object - type: array - deny: - description: Deny defines conditions used to pass - or fail a validation rule. - properties: - conditions: - description: |- - Multiple conditions can be declared under an `any` or `all` statement. A direct list - of conditions (without `any` or `all` statements) is also supported for backwards compatibility - but will be deprecated in the next major release. - See: https://kyverno.io/docs/writing-policies/validate/#deny-rules - x-kubernetes-preserve-unknown-fields: true - type: object - elementScope: - description: |- - ElementScope specifies whether to use the current list element as the scope for validation. Defaults to "true" if not specified. - When set to "false", "request.object" is used as the validation scope within the foreach - block to allow referencing other elements in the subtree. - type: boolean - foreach: - description: Foreach declares a nested foreach iterator - x-kubernetes-preserve-unknown-fields: true - list: - description: |- - List specifies a JMESPath expression that results in one or more elements - to which the validation logic is applied. - type: string - pattern: - description: Pattern specifies an overlay-style pattern - used to check resources. - x-kubernetes-preserve-unknown-fields: true - preconditions: - description: |- - AnyAllConditions are used to determine if a policy rule should be applied by evaluating a - set of conditions. The declaration can contain nested `any` or `all` statements. - See: https://kyverno.io/docs/writing-policies/preconditions/ - properties: - all: - description: |- - AllConditions enable variable-based conditional rule execution. This is useful for - finer control of when an rule is applied. A condition can reference object data - using JMESPath notation. - Here, all of the conditions need to pass - items: - description: Condition defines variable-based - conditional criteria for rule execution. - properties: - key: - description: Key is the context entry (using - JMESPath) for conditional rule evaluation. - x-kubernetes-preserve-unknown-fields: true - message: - description: Message is an optional display - message - type: string - operator: - description: |- - Operator is the conditional operation to perform. Valid operators are: - Equals, NotEquals, In, AnyIn, AllIn, NotIn, AnyNotIn, AllNotIn, GreaterThanOrEquals, - GreaterThan, LessThanOrEquals, LessThan, DurationGreaterThanOrEquals, DurationGreaterThan, - DurationLessThanOrEquals, DurationLessThan - enum: - - Equals - - NotEquals - - In - - AnyIn - - AllIn - - NotIn - - AnyNotIn - - AllNotIn - - GreaterThanOrEquals - - GreaterThan - - LessThanOrEquals - - LessThan - - DurationGreaterThanOrEquals - - DurationGreaterThan - - DurationLessThanOrEquals - - DurationLessThan - type: string - value: - description: |- - Value is the conditional value, or set of values. The values can be fixed set - or can be variables declared using JMESPath. - x-kubernetes-preserve-unknown-fields: true - type: object - type: array - any: - description: |- - AnyConditions enable variable-based conditional rule execution. This is useful for - finer control of when an rule is applied. A condition can reference object data - using JMESPath notation. - Here, at least one of the conditions need to pass - items: - description: Condition defines variable-based - conditional criteria for rule execution. - properties: - key: - description: Key is the context entry (using - JMESPath) for conditional rule evaluation. - x-kubernetes-preserve-unknown-fields: true - message: - description: Message is an optional display - message - type: string - operator: - description: |- - Operator is the conditional operation to perform. Valid operators are: - Equals, NotEquals, In, AnyIn, AllIn, NotIn, AnyNotIn, AllNotIn, GreaterThanOrEquals, - GreaterThan, LessThanOrEquals, LessThan, DurationGreaterThanOrEquals, DurationGreaterThan, - DurationLessThanOrEquals, DurationLessThan - enum: - - Equals - - NotEquals - - In - - AnyIn - - AllIn - - NotIn - - AnyNotIn - - AllNotIn - - GreaterThanOrEquals - - GreaterThan - - LessThanOrEquals - - LessThan - - DurationGreaterThanOrEquals - - DurationGreaterThan - - DurationLessThanOrEquals - - DurationLessThan - type: string - value: - description: |- - Value is the conditional value, or set of values. The values can be fixed set - or can be variables declared using JMESPath. - x-kubernetes-preserve-unknown-fields: true - type: object - type: array - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - type: array - manifests: - description: Manifest specifies conditions for manifest - verification - properties: - annotationDomain: - description: AnnotationDomain is custom domain of annotation - for message and signature. Default is "cosign.sigstore.dev". - type: string - attestors: - description: Attestors specified the required attestors - (i.e. authorities) - items: - properties: - count: - description: |- - Count specifies the required number of entries that must match. If the count is null, all entries must match - (a logical AND). If the count is 1, at least one entry must match (a logical OR). If the count contains a - value N, then N must be less than or equal to the size of entries, and at least N entries must match. - minimum: 1 - type: integer - entries: - description: |- - Entries contains the available attestors. An attestor can be a static key, - attributes for keyless verification, or a nested attestor declaration. - items: - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations are used for image verification. - Every specified key-value pair must exist and match in the verified payload. - The payload may contain other key-value pairs. - type: object - attestor: - description: Attestor is a nested set of - Attestor used to specify a more complex - set of match authorities. - x-kubernetes-preserve-unknown-fields: true - certificates: - description: Certificates specifies one - or more certificates. - properties: - cert: - description: Cert is an optional PEM-encoded - public certificate. - type: string - certChain: - description: CertChain is an optional - PEM encoded set of certificates used - to verify. - type: string - ctlog: - description: |- - CTLog (certificate timestamp log) provides a configuration for validation of Signed Certificate - Timestamps (SCTs). If the value is unset, the default behavior by Cosign is used. - properties: - ignoreSCT: - description: |- - IgnoreSCT defines whether to use the Signed Certificate Timestamp (SCT) log to check for a certificate - timestamp. Default is false. Set to true if this was opted out during signing. - type: boolean - pubkey: - description: PubKey, if set, is - used to validate SCTs against - a custom source. - type: string - type: object - rekor: - description: |- - Rekor provides configuration for the Rekor transparency log service. If an empty object - is provided the public instance of Rekor (https://rekor.sigstore.dev) is used. - properties: - ignoreTlog: - description: IgnoreTlog skips transparency - log verification. - type: boolean - pubkey: - description: |- - RekorPubKey is an optional PEM-encoded public key to use for a custom Rekor. - If set, this will be used to validate transparency log signatures from a custom Rekor. - type: string - url: - description: URL is the address - of the transparency log. Defaults - to the public Rekor log instance - https://rekor.sigstore.dev. - type: string - required: - - url - type: object - type: object - keyless: - description: |- - Keyless is a set of attribute used to verify a Sigstore keyless attestor. - See https://github.com/sigstore/cosign/blob/main/KEYLESS.md. - properties: - additionalExtensions: - additionalProperties: - type: string - description: AdditionalExtensions are - certificate-extensions used for keyless - signing. - type: object - ctlog: - description: |- - CTLog (certificate timestamp log) provides a configuration for validation of Signed Certificate - Timestamps (SCTs). If the value is unset, the default behavior by Cosign is used. - properties: - ignoreSCT: - description: |- - IgnoreSCT defines whether to use the Signed Certificate Timestamp (SCT) log to check for a certificate - timestamp. Default is false. Set to true if this was opted out during signing. - type: boolean - pubkey: - description: PubKey, if set, is - used to validate SCTs against - a custom source. - type: string - type: object - issuer: - description: Issuer is the certificate - issuer used for keyless signing. - type: string - rekor: - description: |- - Rekor provides configuration for the Rekor transparency log service. If an empty object - is provided the public instance of Rekor (https://rekor.sigstore.dev) is used. - properties: - ignoreTlog: - description: IgnoreTlog skips transparency - log verification. - type: boolean - pubkey: - description: |- - RekorPubKey is an optional PEM-encoded public key to use for a custom Rekor. - If set, this will be used to validate transparency log signatures from a custom Rekor. - type: string - url: - description: URL is the address - of the transparency log. Defaults - to the public Rekor log instance - https://rekor.sigstore.dev. - type: string - required: - - url - type: object - roots: - description: |- - Roots is an optional set of PEM encoded trusted root certificates. - If not provided, the system roots are used. - type: string - subject: - description: Subject is the verified - identity used for keyless signing, - for example the email address. - type: string - type: object - keys: - description: Keys specifies one or more - public keys. - properties: - ctlog: - description: |- - CTLog (certificate timestamp log) provides a configuration for validation of Signed Certificate - Timestamps (SCTs). If the value is unset, the default behavior by Cosign is used. - properties: - ignoreSCT: - description: |- - IgnoreSCT defines whether to use the Signed Certificate Timestamp (SCT) log to check for a certificate - timestamp. Default is false. Set to true if this was opted out during signing. - type: boolean - pubkey: - description: PubKey, if set, is - used to validate SCTs against - a custom source. - type: string - type: object - kms: - description: |- - KMS provides the URI to the public key stored in a Key Management System. See: - https://github.com/sigstore/cosign/blob/main/KMS.md - type: string - publicKeys: - description: |- - Keys is a set of X.509 public keys used to verify image signatures. The keys can be directly - specified or can be a variable reference to a key specified in a ConfigMap (see - https://kyverno.io/docs/writing-policies/variables/), or reference a standard Kubernetes Secret - elsewhere in the cluster by specifying it in the format "k8s:///". - The named Secret must specify a key `cosign.pub` containing the public key used for - verification, (see https://github.com/sigstore/cosign/blob/main/KMS.md#kubernetes-secret). - When multiple keys are specified each key is processed as a separate staticKey entry - (.attestors[*].entries.keys) within the set of attestors and the count is applied across the keys. - type: string - rekor: - description: |- - Rekor provides configuration for the Rekor transparency log service. If an empty object - is provided the public instance of Rekor (https://rekor.sigstore.dev) is used. - properties: - ignoreTlog: - description: IgnoreTlog skips transparency - log verification. - type: boolean - pubkey: - description: |- - RekorPubKey is an optional PEM-encoded public key to use for a custom Rekor. - If set, this will be used to validate transparency log signatures from a custom Rekor. - type: string - url: - description: URL is the address - of the transparency log. Defaults - to the public Rekor log instance - https://rekor.sigstore.dev. - type: string - required: - - url - type: object - secret: - description: Reference to a Secret resource - that contains a public key - properties: - name: - description: Name of the secret. - The provided secret must contain - a key named cosign.pub. - type: string - namespace: - description: Namespace name where - the Secret exists. - type: string - required: - - name - - namespace - type: object - signatureAlgorithm: - default: sha256 - description: Specify signature algorithm - for public keys. Supported values - are sha224, sha256, sha384 and sha512. - type: string - type: object - repository: - description: |- - Repository is an optional alternate OCI repository to use for signatures and attestations that match this rule. - If specified Repository will override other OCI image repository locations for this Attestor. - type: string - type: object - type: array - type: object - type: array - dryRun: - description: DryRun configuration - properties: - enable: - type: boolean - namespace: - type: string - type: object - ignoreFields: - description: Fields which will be ignored while comparing - manifests. - items: - properties: - fields: - items: - type: string - type: array - objects: - items: - properties: - group: - type: string - kind: - type: string - name: - type: string - namespace: - type: string - version: - type: string - type: object - type: array - type: object - type: array - repository: - description: |- - Repository is an optional alternate OCI repository to use for resource bundle reference. - The repository can be overridden per Attestor or Attestation. - type: string - type: object - message: - description: Message specifies a custom message to be displayed - on failure. - type: string - pattern: - description: Pattern specifies an overlay-style pattern - used to check resources. - x-kubernetes-preserve-unknown-fields: true - podSecurity: - description: |- - PodSecurity applies exemptions for Kubernetes Pod Security admission - by specifying exclusions for Pod Security Standards controls. - properties: - exclude: - description: Exclude specifies the Pod Security Standard - controls to be excluded. - items: - description: PodSecurityStandard specifies the Pod - Security Standard controls to be excluded. - properties: - controlName: - description: |- - ControlName specifies the name of the Pod Security Standard control. - See: https://kubernetes.io/docs/concepts/security/pod-security-standards/ - enum: - - HostProcess - - Host Namespaces - - Privileged Containers - - Capabilities - - HostPath Volumes - - Host Ports - - AppArmor - - SELinux - - /proc Mount Type - - Seccomp - - Sysctls - - Volume Types - - Privilege Escalation - - Running as Non-root - - Running as Non-root user - type: string - images: - description: |- - Images selects matching containers and applies the container level PSS. - Each image is the image name consisting of the registry address, repository, image, and tag. - Empty list matches no containers, PSS checks are applied at the pod level only. - Wildcards ('*' and '?') are allowed. See: https://kubernetes.io/docs/concepts/containers/images. - items: - type: string - type: array - required: - - controlName - type: object - type: array - level: - description: |- - Level defines the Pod Security Standard level to be applied to workloads. - Allowed values are privileged, baseline, and restricted. - enum: - - privileged - - baseline - - restricted - type: string - version: - description: |- - Version defines the Pod Security Standard versions that Kubernetes supports. - Allowed values are v1.19, v1.20, v1.21, v1.22, v1.23, v1.24, v1.25, v1.26, latest. Defaults to latest. - enum: - - v1.19 - - v1.20 - - v1.21 - - v1.22 - - v1.23 - - v1.24 - - v1.25 - - v1.26 - - latest - type: string - type: object - type: object - verifyImages: - description: VerifyImages is used to verify image signatures - and mutate them to add a digest - items: - description: |- - ImageVerification validates that images that match the specified pattern - are signed with the supplied public key. Once the image is verified it is - mutated to include the SHA digest retrieved during the registration. - properties: - additionalExtensions: - additionalProperties: - type: string - description: Deprecated. - type: object - annotations: - additionalProperties: - type: string - description: Deprecated. Use annotations per Attestor - instead. - type: object - attestations: - description: |- - Attestations are optional checks for signed in-toto Statements used to verify the image. - See https://github.com/in-toto/attestation. Kyverno fetches signed attestations from the - OCI registry and decodes them into a list of Statement declarations. - items: - description: |- - Attestation are checks for signed in-toto Statements that are used to verify the image. - See https://github.com/in-toto/attestation. Kyverno fetches signed attestations from the - OCI registry and decodes them into a list of Statements. - properties: - attestors: - description: Attestors specify the required attestors - (i.e. authorities). - items: - properties: - count: - description: |- - Count specifies the required number of entries that must match. If the count is null, all entries must match - (a logical AND). If the count is 1, at least one entry must match (a logical OR). If the count contains a - value N, then N must be less than or equal to the size of entries, and at least N entries must match. - minimum: 1 - type: integer - entries: - description: |- - Entries contains the available attestors. An attestor can be a static key, - attributes for keyless verification, or a nested attestor declaration. - items: - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations are used for image verification. - Every specified key-value pair must exist and match in the verified payload. - The payload may contain other key-value pairs. - type: object - attestor: - description: Attestor is a nested set - of Attestor used to specify a more - complex set of match authorities. - x-kubernetes-preserve-unknown-fields: true - certificates: - description: Certificates specifies - one or more certificates. - properties: - cert: - description: Cert is an optional - PEM-encoded public certificate. - type: string - certChain: - description: CertChain is an optional - PEM encoded set of certificates - used to verify. - type: string - ctlog: - description: |- - CTLog (certificate timestamp log) provides a configuration for validation of Signed Certificate - Timestamps (SCTs). If the value is unset, the default behavior by Cosign is used. - properties: - ignoreSCT: - description: |- - IgnoreSCT defines whether to use the Signed Certificate Timestamp (SCT) log to check for a certificate - timestamp. Default is false. Set to true if this was opted out during signing. - type: boolean - pubkey: - description: PubKey, if set, - is used to validate SCTs against - a custom source. - type: string - type: object - rekor: - description: |- - Rekor provides configuration for the Rekor transparency log service. If an empty object - is provided the public instance of Rekor (https://rekor.sigstore.dev) is used. - properties: - ignoreTlog: - description: IgnoreTlog skips - transparency log verification. - type: boolean - pubkey: - description: |- - RekorPubKey is an optional PEM-encoded public key to use for a custom Rekor. - If set, this will be used to validate transparency log signatures from a custom Rekor. - type: string - url: - description: URL is the address - of the transparency log. Defaults - to the public Rekor log instance - https://rekor.sigstore.dev. - type: string - required: - - url - type: object - type: object - keyless: - description: |- - Keyless is a set of attribute used to verify a Sigstore keyless attestor. - See https://github.com/sigstore/cosign/blob/main/KEYLESS.md. - properties: - additionalExtensions: - additionalProperties: - type: string - description: AdditionalExtensions - are certificate-extensions used - for keyless signing. - type: object - ctlog: - description: |- - CTLog (certificate timestamp log) provides a configuration for validation of Signed Certificate - Timestamps (SCTs). If the value is unset, the default behavior by Cosign is used. - properties: - ignoreSCT: - description: |- - IgnoreSCT defines whether to use the Signed Certificate Timestamp (SCT) log to check for a certificate - timestamp. Default is false. Set to true if this was opted out during signing. - type: boolean - pubkey: - description: PubKey, if set, - is used to validate SCTs against - a custom source. - type: string - type: object - issuer: - description: Issuer is the certificate - issuer used for keyless signing. - type: string - rekor: - description: |- - Rekor provides configuration for the Rekor transparency log service. If an empty object - is provided the public instance of Rekor (https://rekor.sigstore.dev) is used. - properties: - ignoreTlog: - description: IgnoreTlog skips - transparency log verification. - type: boolean - pubkey: - description: |- - RekorPubKey is an optional PEM-encoded public key to use for a custom Rekor. - If set, this will be used to validate transparency log signatures from a custom Rekor. - type: string - url: - description: URL is the address - of the transparency log. Defaults - to the public Rekor log instance - https://rekor.sigstore.dev. - type: string - required: - - url - type: object - roots: - description: |- - Roots is an optional set of PEM encoded trusted root certificates. - If not provided, the system roots are used. - type: string - subject: - description: Subject is the verified - identity used for keyless signing, - for example the email address. - type: string - type: object - keys: - description: Keys specifies one or more - public keys. - properties: - ctlog: - description: |- - CTLog (certificate timestamp log) provides a configuration for validation of Signed Certificate - Timestamps (SCTs). If the value is unset, the default behavior by Cosign is used. - properties: - ignoreSCT: - description: |- - IgnoreSCT defines whether to use the Signed Certificate Timestamp (SCT) log to check for a certificate - timestamp. Default is false. Set to true if this was opted out during signing. - type: boolean - pubkey: - description: PubKey, if set, - is used to validate SCTs against - a custom source. - type: string - type: object - kms: - description: |- - KMS provides the URI to the public key stored in a Key Management System. See: - https://github.com/sigstore/cosign/blob/main/KMS.md - type: string - publicKeys: - description: |- - Keys is a set of X.509 public keys used to verify image signatures. The keys can be directly - specified or can be a variable reference to a key specified in a ConfigMap (see - https://kyverno.io/docs/writing-policies/variables/), or reference a standard Kubernetes Secret - elsewhere in the cluster by specifying it in the format "k8s:///". - The named Secret must specify a key `cosign.pub` containing the public key used for - verification, (see https://github.com/sigstore/cosign/blob/main/KMS.md#kubernetes-secret). - When multiple keys are specified each key is processed as a separate staticKey entry - (.attestors[*].entries.keys) within the set of attestors and the count is applied across the keys. - type: string - rekor: - description: |- - Rekor provides configuration for the Rekor transparency log service. If an empty object - is provided the public instance of Rekor (https://rekor.sigstore.dev) is used. - properties: - ignoreTlog: - description: IgnoreTlog skips - transparency log verification. - type: boolean - pubkey: - description: |- - RekorPubKey is an optional PEM-encoded public key to use for a custom Rekor. - If set, this will be used to validate transparency log signatures from a custom Rekor. - type: string - url: - description: URL is the address - of the transparency log. Defaults - to the public Rekor log instance - https://rekor.sigstore.dev. - type: string - required: - - url - type: object - secret: - description: Reference to a Secret - resource that contains a public - key - properties: - name: - description: Name of the secret. - The provided secret must contain - a key named cosign.pub. - type: string - namespace: - description: Namespace name - where the Secret exists. - type: string - required: - - name - - namespace - type: object - signatureAlgorithm: - default: sha256 - description: Specify signature algorithm - for public keys. Supported values - are sha224, sha256, sha384 and - sha512. - type: string - type: object - repository: - description: |- - Repository is an optional alternate OCI repository to use for signatures and attestations that match this rule. - If specified Repository will override other OCI image repository locations for this Attestor. - type: string - type: object - type: array - type: object - type: array - conditions: - description: |- - Conditions are used to verify attributes within a Predicate. If no Conditions are specified - the attestation check is satisfied as long there are predicates that match the predicate type. - items: - description: |- - AnyAllConditions consists of conditions wrapped denoting a logical criteria to be fulfilled. - AnyConditions get fulfilled when at least one of its sub-conditions passes. - AllConditions get fulfilled only when all of its sub-conditions pass. - properties: - all: - description: |- - AllConditions enable variable-based conditional rule execution. This is useful for - finer control of when an rule is applied. A condition can reference object data - using JMESPath notation. - Here, all of the conditions need to pass - items: - description: Condition defines variable-based - conditional criteria for rule execution. - properties: - key: - description: Key is the context entry - (using JMESPath) for conditional rule - evaluation. - x-kubernetes-preserve-unknown-fields: true - message: - description: Message is an optional - display message - type: string - operator: - description: |- - Operator is the conditional operation to perform. Valid operators are: - Equals, NotEquals, In, AnyIn, AllIn, NotIn, AnyNotIn, AllNotIn, GreaterThanOrEquals, - GreaterThan, LessThanOrEquals, LessThan, DurationGreaterThanOrEquals, DurationGreaterThan, - DurationLessThanOrEquals, DurationLessThan - enum: - - Equals - - NotEquals - - In - - AnyIn - - AllIn - - NotIn - - AnyNotIn - - AllNotIn - - GreaterThanOrEquals - - GreaterThan - - LessThanOrEquals - - LessThan - - DurationGreaterThanOrEquals - - DurationGreaterThan - - DurationLessThanOrEquals - - DurationLessThan - type: string - value: - description: |- - Value is the conditional value, or set of values. The values can be fixed set - or can be variables declared using JMESPath. - x-kubernetes-preserve-unknown-fields: true - type: object - type: array - any: - description: |- - AnyConditions enable variable-based conditional rule execution. This is useful for - finer control of when an rule is applied. A condition can reference object data - using JMESPath notation. - Here, at least one of the conditions need to pass - items: - description: Condition defines variable-based - conditional criteria for rule execution. - properties: - key: - description: Key is the context entry - (using JMESPath) for conditional rule - evaluation. - x-kubernetes-preserve-unknown-fields: true - message: - description: Message is an optional - display message - type: string - operator: - description: |- - Operator is the conditional operation to perform. Valid operators are: - Equals, NotEquals, In, AnyIn, AllIn, NotIn, AnyNotIn, AllNotIn, GreaterThanOrEquals, - GreaterThan, LessThanOrEquals, LessThan, DurationGreaterThanOrEquals, DurationGreaterThan, - DurationLessThanOrEquals, DurationLessThan - enum: - - Equals - - NotEquals - - In - - AnyIn - - AllIn - - NotIn - - AnyNotIn - - AllNotIn - - GreaterThanOrEquals - - GreaterThan - - LessThanOrEquals - - LessThan - - DurationGreaterThanOrEquals - - DurationGreaterThan - - DurationLessThanOrEquals - - DurationLessThan - type: string - value: - description: |- - Value is the conditional value, or set of values. The values can be fixed set - or can be variables declared using JMESPath. - x-kubernetes-preserve-unknown-fields: true - type: object - type: array - type: object - type: array - predicateType: - description: Deprecated in favour of 'Type', to - be removed soon - type: string - type: - description: Type defines the type of attestation - contained within the Statement. - type: string - type: object - type: array - attestors: - description: Attestors specified the required attestors - (i.e. authorities) - items: - properties: - count: - description: |- - Count specifies the required number of entries that must match. If the count is null, all entries must match - (a logical AND). If the count is 1, at least one entry must match (a logical OR). If the count contains a - value N, then N must be less than or equal to the size of entries, and at least N entries must match. - minimum: 1 - type: integer - entries: - description: |- - Entries contains the available attestors. An attestor can be a static key, - attributes for keyless verification, or a nested attestor declaration. - items: - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations are used for image verification. - Every specified key-value pair must exist and match in the verified payload. - The payload may contain other key-value pairs. - type: object - attestor: - description: Attestor is a nested set of Attestor - used to specify a more complex set of match - authorities. - x-kubernetes-preserve-unknown-fields: true - certificates: - description: Certificates specifies one or - more certificates. - properties: - cert: - description: Cert is an optional PEM-encoded - public certificate. - type: string - certChain: - description: CertChain is an optional - PEM encoded set of certificates used - to verify. - type: string - ctlog: - description: |- - CTLog (certificate timestamp log) provides a configuration for validation of Signed Certificate - Timestamps (SCTs). If the value is unset, the default behavior by Cosign is used. - properties: - ignoreSCT: - description: |- - IgnoreSCT defines whether to use the Signed Certificate Timestamp (SCT) log to check for a certificate - timestamp. Default is false. Set to true if this was opted out during signing. - type: boolean - pubkey: - description: PubKey, if set, is used - to validate SCTs against a custom - source. - type: string - type: object - rekor: - description: |- - Rekor provides configuration for the Rekor transparency log service. If an empty object - is provided the public instance of Rekor (https://rekor.sigstore.dev) is used. - properties: - ignoreTlog: - description: IgnoreTlog skips transparency - log verification. - type: boolean - pubkey: - description: |- - RekorPubKey is an optional PEM-encoded public key to use for a custom Rekor. - If set, this will be used to validate transparency log signatures from a custom Rekor. - type: string - url: - description: URL is the address of - the transparency log. Defaults to - the public Rekor log instance https://rekor.sigstore.dev. - type: string - required: - - url - type: object - type: object - keyless: - description: |- - Keyless is a set of attribute used to verify a Sigstore keyless attestor. - See https://github.com/sigstore/cosign/blob/main/KEYLESS.md. - properties: - additionalExtensions: - additionalProperties: - type: string - description: AdditionalExtensions are - certificate-extensions used for keyless - signing. - type: object - ctlog: - description: |- - CTLog (certificate timestamp log) provides a configuration for validation of Signed Certificate - Timestamps (SCTs). If the value is unset, the default behavior by Cosign is used. - properties: - ignoreSCT: - description: |- - IgnoreSCT defines whether to use the Signed Certificate Timestamp (SCT) log to check for a certificate - timestamp. Default is false. Set to true if this was opted out during signing. - type: boolean - pubkey: - description: PubKey, if set, is used - to validate SCTs against a custom - source. - type: string - type: object - issuer: - description: Issuer is the certificate - issuer used for keyless signing. - type: string - rekor: - description: |- - Rekor provides configuration for the Rekor transparency log service. If an empty object - is provided the public instance of Rekor (https://rekor.sigstore.dev) is used. - properties: - ignoreTlog: - description: IgnoreTlog skips transparency - log verification. - type: boolean - pubkey: - description: |- - RekorPubKey is an optional PEM-encoded public key to use for a custom Rekor. - If set, this will be used to validate transparency log signatures from a custom Rekor. - type: string - url: - description: URL is the address of - the transparency log. Defaults to - the public Rekor log instance https://rekor.sigstore.dev. - type: string - required: - - url - type: object - roots: - description: |- - Roots is an optional set of PEM encoded trusted root certificates. - If not provided, the system roots are used. - type: string - subject: - description: Subject is the verified identity - used for keyless signing, for example - the email address. - type: string - type: object - keys: - description: Keys specifies one or more public - keys. - properties: - ctlog: - description: |- - CTLog (certificate timestamp log) provides a configuration for validation of Signed Certificate - Timestamps (SCTs). If the value is unset, the default behavior by Cosign is used. - properties: - ignoreSCT: - description: |- - IgnoreSCT defines whether to use the Signed Certificate Timestamp (SCT) log to check for a certificate - timestamp. Default is false. Set to true if this was opted out during signing. - type: boolean - pubkey: - description: PubKey, if set, is used - to validate SCTs against a custom - source. - type: string - type: object - kms: - description: |- - KMS provides the URI to the public key stored in a Key Management System. See: - https://github.com/sigstore/cosign/blob/main/KMS.md - type: string - publicKeys: - description: |- - Keys is a set of X.509 public keys used to verify image signatures. The keys can be directly - specified or can be a variable reference to a key specified in a ConfigMap (see - https://kyverno.io/docs/writing-policies/variables/), or reference a standard Kubernetes Secret - elsewhere in the cluster by specifying it in the format "k8s:///". - The named Secret must specify a key `cosign.pub` containing the public key used for - verification, (see https://github.com/sigstore/cosign/blob/main/KMS.md#kubernetes-secret). - When multiple keys are specified each key is processed as a separate staticKey entry - (.attestors[*].entries.keys) within the set of attestors and the count is applied across the keys. - type: string - rekor: - description: |- - Rekor provides configuration for the Rekor transparency log service. If an empty object - is provided the public instance of Rekor (https://rekor.sigstore.dev) is used. - properties: - ignoreTlog: - description: IgnoreTlog skips transparency - log verification. - type: boolean - pubkey: - description: |- - RekorPubKey is an optional PEM-encoded public key to use for a custom Rekor. - If set, this will be used to validate transparency log signatures from a custom Rekor. - type: string - url: - description: URL is the address of - the transparency log. Defaults to - the public Rekor log instance https://rekor.sigstore.dev. - type: string - required: - - url - type: object - secret: - description: Reference to a Secret resource - that contains a public key - properties: - name: - description: Name of the secret. The - provided secret must contain a key - named cosign.pub. - type: string - namespace: - description: Namespace name where - the Secret exists. - type: string - required: - - name - - namespace - type: object - signatureAlgorithm: - default: sha256 - description: Specify signature algorithm - for public keys. Supported values are - sha224, sha256, sha384 and sha512. - type: string - type: object - repository: - description: |- - Repository is an optional alternate OCI repository to use for signatures and attestations that match this rule. - If specified Repository will override other OCI image repository locations for this Attestor. - type: string - type: object - type: array - type: object - type: array - image: - description: Deprecated. Use ImageReferences instead. - type: string - imageReferences: - description: |- - ImageReferences is a list of matching image reference patterns. At least one pattern in the - list must match the image for the rule to apply. Each image reference consists of a registry - address (defaults to docker.io), repository, image, and tag (defaults to latest). - Wildcards ('*' and '?') are allowed. See: https://kubernetes.io/docs/concepts/containers/images. - items: - type: string - type: array - imageRegistryCredentials: - description: ImageRegistryCredentials provides credentials - that will be used for authentication with registry. - properties: - allowInsecureRegistry: - description: AllowInsecureRegistry allows insecure - access to a registry. - type: boolean - providers: - description: |- - Providers specifies a list of OCI Registry names, whose authentication providers are provided. - It can be of one of these values: default,google,azure,amazon,github. - items: - description: ImageRegistryCredentialsProvidersType - provides the list of credential providers required. - enum: - - default - - amazon - - azure - - google - - github - type: string - type: array - secrets: - description: |- - Secrets specifies a list of secrets that are provided for credentials. - Secrets must live in the Kyverno namespace. - items: - type: string - type: array - type: object - issuer: - description: Deprecated. Use KeylessAttestor instead. - type: string - key: - description: Deprecated. Use StaticKeyAttestor instead. - type: string - mutateDigest: - default: true - description: |- - MutateDigest enables replacement of image tags with digests. - Defaults to true. - type: boolean - repository: - description: |- - Repository is an optional alternate OCI repository to use for image signatures and attestations that match this rule. - If specified Repository will override the default OCI image repository configured for the installation. - The repository can also be overridden per Attestor or Attestation. - type: string - required: - default: true - description: Required validates that images are verified - i.e. have matched passed a signature or attestation - check. - type: boolean - roots: - description: Deprecated. Use KeylessAttestor instead. - type: string - subject: - description: Deprecated. Use KeylessAttestor instead. - type: string - type: - description: |- - Type specifies the method of signature validation. The allowed options - are Cosign and Notary. By default Cosign is used if a type is not specified. - enum: - - Cosign - - Notary - type: string - useCache: - default: true - description: UseCache enables caching of image verify - responses for this rule. - type: boolean - verifyDigest: - default: true - description: VerifyDigest validates that images have a - digest. - type: boolean - type: object - type: array - required: - - name - type: object - type: array - schemaValidation: - description: |- - SchemaValidation skips validation checks for policies as well as patched resources. - Optional. The default value is set to "true", it must be set to "false" to disable the validation checks. - type: boolean - useServerSideApply: - description: |- - UseServerSideApply controls whether to use server-side apply for generate rules - If is set to "true" create & update for generate rules will use apply instead of create/update. - Defaults to "false" if not specified. - type: boolean - validationFailureAction: - default: Audit - description: |- - ValidationFailureAction defines if a validation policy rule violation should block - the admission review request (enforce), or allow (audit) the admission review request - and report an error in a policy report. Optional. - Allowed values are audit or enforce. The default value is "Audit". - enum: - - audit - - enforce - - Audit - - Enforce - type: string - validationFailureActionOverrides: - description: |- - ValidationFailureActionOverrides is a Cluster Policy attribute that specifies ValidationFailureAction - namespace-wise. It overrides ValidationFailureAction for the specified namespaces. - items: - properties: - action: - description: ValidationFailureAction defines the policy validation - failure action - enum: - - audit - - enforce - - Audit - - Enforce - type: string - namespaceSelector: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - items: - type: string - type: array - type: object - type: array - webhookTimeoutSeconds: - description: |- - WebhookTimeoutSeconds specifies the maximum time in seconds allowed to apply this policy. - After the configured time expires, the admission request may fail, or may simply ignore the policy results, - based on the failure policy. The default timeout is 10s, the value must be between 1 and 30 seconds. - format: int32 - type: integer - required: - - controlPlaneSelector - type: object - x-kubernetes-validations: - - message: policyName is immutable - rule: has(self.policyName) == has(oldSelf.policyName) - status: - description: SharedUpboundPolicyStatus defines the observed state of the - projected polcies. - properties: - failed: - description: list of provisioning failures. - items: - description: SharedUpboundPolicyProvisioningFailure defines policy - provisioning failure. - properties: - conditions: - description: List of conditions. - items: - description: Condition contains details for one aspect of - the current state of this API Resource. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, - Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - controlPlane: - description: ControlPlane name where the failure occurred. - type: string - required: - - controlPlane - type: object - type: array - x-kubernetes-list-map-keys: - - controlPlane - x-kubernetes-list-type: map - observedGeneration: - description: observed resource generation. - format: int64 - type: integer - provisioned: - description: List of successfully provisioned targets. - items: - description: SharedUpboundPolicyProvisioningSuccess defines policy - provisioning success. - properties: - controlPlane: - description: ControlPlane name where the external secret got - successfully projected. - type: string - required: - - controlPlane - type: object - type: array - x-kubernetes-list-map-keys: - - controlPlane - x-kubernetes-list-type: map - type: object - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.11/spaces.upbound.io_backups.yaml b/static/crds/space/v1.11/spaces.upbound.io_backups.yaml deleted file mode 100644 index 9f338e5c9..000000000 --- a/static/crds/space/v1.11/spaces.upbound.io_backups.yaml +++ /dev/null @@ -1,200 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: backups.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - spaces - kind: Backup - listKind: BackupList - plural: backups - singular: backup - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .status.phase - name: Phase - type: string - - jsonPath: .status.retries - name: Retries - type: integer - - jsonPath: .spec.ttl - name: TTL - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: Backup represents a single backup of a ControlPlane. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: BackupSpec defines a backup over a set of ControlPlanes. - properties: - configRef: - description: |- - ConfigRef is a reference to the backup configuration. - ApiGroup is optional and defaults to "spaces.upbound.io". - Kind is required, and the only supported value is "SharedBackupConfig" at - the moment. - Name is required. - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. - type: string - kind: - description: Kind is the type of resource being referenced. - minLength: 1 - type: string - name: - description: Name is the name of resource being referenced. - minLength: 1 - type: string - type: object - x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: backup config ref must be a reference to a SharedBackupConfig - rule: (!has(self.apiGroup) || self.apiGroup == 'spaces.upbound.io') - && self.kind == 'SharedBackupConfig' - - message: backup config ref must have a name - rule: size(self.name) > 0 - controlPlane: - description: |- - ControlPlane is the name of the ControlPlane to backup. - Requires "backup" permission on the referenced ControlPlane. - minLength: 1 - type: string - deletionPolicy: - default: Orphan - description: DeletionPolicy is the policy for the backup. - enum: - - Orphan - - Delete - type: string - excludedResources: - description: |- - ExcludedResources is a slice of resource names that are not - included in the backup. Used to filter the included extra resources. - items: - type: string - type: array - x-kubernetes-list-type: set - ttl: - description: |- - TTL is the time to live for the backup. After this time, the backup - will be eligible for garbage collection. If not set, the backup will - not be garbage collected. - type: string - required: - - configRef - - controlPlane - type: object - x-kubernetes-validations: - - message: backup target controlplane can not be changed after creation - rule: self.controlPlane == oldSelf.controlPlane - - message: backup excluded resources can not be changed after creation - rule: (!has(self.excludedResources) && !has(oldSelf.excludedResources)) - || self.excludedResources == oldSelf.excludedResources - - message: backup config ref can not be changed after creation - rule: self.configRef == oldSelf.configRef - status: - description: BackupStatus represents the observed state of a Backup. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - phase: - default: Pending - description: Phase is the current phase of the backup. - enum: - - Pending - - InProgress - - Failed - - Completed - - Deleted - type: string - retries: - description: Retries is the number of times the backup has been retried. - format: int32 - type: integer - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.11/spaces.upbound.io_backupschedules.yaml b/static/crds/space/v1.11/spaces.upbound.io_backupschedules.yaml deleted file mode 100644 index e3dd879ee..000000000 --- a/static/crds/space/v1.11/spaces.upbound.io_backupschedules.yaml +++ /dev/null @@ -1,213 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: backupschedules.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - spaces - kind: BackupSchedule - listKind: BackupScheduleList - plural: backupschedules - singular: backupschedule - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .spec.schedule - name: Schedule - type: string - - jsonPath: .status.lastBackup - name: LastBackup - type: date - - jsonPath: .spec.ttl - name: TTL - type: string - - jsonPath: .spec.controlPlane - name: ControlPlane - type: string - - jsonPath: .spec.suspend - name: Suspended - type: boolean - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: BackupSchedule represents a single ControlPlane schedule for - Backups. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: BackupScheduleSpec defines a backup schedule over a set of - ControlPlanes. - properties: - configRef: - description: |- - ConfigRef is a reference to the backup configuration. - ApiGroup is optional and defaults to "spaces.upbound.io". - Kind is required, and the only supported value is "SharedBackupConfig" at - the moment. - Name is required. - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. - type: string - kind: - description: Kind is the type of resource being referenced. - minLength: 1 - type: string - name: - description: Name is the name of resource being referenced. - minLength: 1 - type: string - type: object - x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: backup config ref must be a reference to a SharedBackupConfig - rule: (!has(self.apiGroup) || self.apiGroup == 'spaces.upbound.io') - && self.kind == 'SharedBackupConfig' - - message: backup config ref must have a name - rule: size(self.name) > 0 - controlPlane: - description: |- - ControlPlane is the name of the ControlPlane to which the schedule - applies. - Requires "get" permission on the referenced ControlPlane. - type: string - x-kubernetes-validations: - - message: target can not be changed after creation - rule: self == oldSelf - deletionPolicy: - default: Orphan - description: DeletionPolicy is the policy for the backup. - enum: - - Orphan - - Delete - type: string - excludedResources: - description: |- - ExcludedResources is a slice of resource names that are not - included in the backup. Used to filter the included extra resources. - items: - type: string - type: array - x-kubernetes-list-type: set - schedule: - description: Schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - minLength: 1 - type: string - suspend: - description: |- - Suspend specifies whether the schedule is suspended. If true, no - Backups will be created, but running backups will be allowed to - complete. - type: boolean - ttl: - description: |- - TTL is the time to live for the backup. After this time, the backup - will be eligible for garbage collection. If not set, the backup will - not be garbage collected. - type: string - useOwnerReferencesInBackup: - description: |- - UseOwnerReferencesBackup specifies whether an ownership chain should be - established between this resource and the Backup it creates. - If set to true, the Backup will be garbage collected when this resource - is deleted. - type: boolean - required: - - configRef - - controlPlane - - schedule - type: object - status: - description: BackupScheduleStatus represents the observed state of a BackupSchedule. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - lastBackup: - description: |- - LastBackup is the last time a Backup was run for this - Schedule schedule - format: date-time - type: string - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.11/spaces.upbound.io_controlplanes.yaml b/static/crds/space/v1.11/spaces.upbound.io_controlplanes.yaml deleted file mode 100644 index 6285bf83d..000000000 --- a/static/crds/space/v1.11/spaces.upbound.io_controlplanes.yaml +++ /dev/null @@ -1,279 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: controlplanes.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - spaces - kind: ControlPlane - listKind: ControlPlaneList - plural: controlplanes - shortNames: - - ctp - - ctps - singular: controlplane - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .spec.crossplane.version - name: Crossplane - type: string - - jsonPath: .status.conditions[?(@.type=='Ready')].status - name: Ready - type: string - - jsonPath: .status.message - name: Message - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1beta1 - schema: - openAPIV3Schema: - description: ControlPlane defines a managed Crossplane instance. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: A ControlPlaneSpec represents the desired state of the ControlPlane. - properties: - class: - default: default - description: |- - [[GATE:EnableControlPlaneClasses]] - Class specifies the class of the control plane. This affects the - control plane sizing, including component replicas and resource - requirements. There are multiple predefined classes, with "default" - being the standard Spaces control plane without any additional class - configuration. Check the Upbound Cloud documentation for a list of all - available classes. Defaults to "default". - type: string - x-kubernetes-validations: - - message: class is immutable - rule: self == oldSelf - crossplane: - description: Crossplane defines the configuration for Crossplane. - properties: - autoUpgrade: - default: - channel: Stable - description: AutoUpgrades defines the auto upgrade configuration - for Crossplane. - properties: - channel: - default: Stable - description: |- - Channel defines the upgrade channels for Crossplane. We support the following channels where 'Stable' is the - default: - - None: disables auto-upgrades and keeps the control plane at its current version of Crossplane. - - Patch: automatically upgrades the control plane to the latest supported patch version when it - becomes available while keeping the minor version the same. - - Stable: automatically upgrades the control plane to the latest supported patch release on minor - version N-1, where N is the latest supported minor version. - - Rapid: automatically upgrades the cluster to the latest supported patch release on the latest - supported minor version. - enum: - - None - - Patch - - Stable - - Rapid - type: string - type: object - state: - default: Running - description: |- - State defines the state for crossplane and provider workloads. We support - the following states where 'Running' is the default: - - Running: Starts/Scales up all crossplane and provider workloads in the ControlPlane - - Paused: Pauses/Scales down all crossplane and provider workloads in the ControlPlane - enum: - - Running - - Paused - type: string - version: - description: Version is the version of Universal Crossplane to - install. - type: string - x-kubernetes-validations: - - message: The version must not start with a leading 'v' - rule: (self.matches('^[^v].*')) - type: object - restore: - description: |- - [[GATE:EnableSharedBackup]] THIS IS AN ALPHA FIELD. Do not use it in production. - Restore specifies details about the control planes restore configuration. - properties: - finishedAt: - description: |- - FinishedAt is the time at which the control plane was restored, it's not - meant to be set by the user, but rather by the system when the control - plane is restored. - format: date-time - type: string - source: - description: |- - Source of the Backup or BackupSchedule to restore from. - Require "restore" permission on the referenced Backup or BackupSchedule. - ApiGroup is optional and defaults to "spaces.upbound.io". - Kind is required, and the only supported kinds are Backup and - BackupSchedule at the moment. - Name is required. - properties: - apiGroup: - description: APIGroup is the group for the resource being - referenced. - type: string - kind: - description: Kind is the type of resource being referenced. - minLength: 1 - type: string - name: - description: Name is the name of resource being referenced. - minLength: 1 - type: string - type: object - x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: source must be a reference to a Backup or BackupSchedule - (v1alpha1) - rule: (!has(self.apiGroup) || self.apiGroup == 'spaces.upbound.io') - && (self.kind == 'Backup' || self.kind == 'BackupSchedule') - - message: source is immutable - rule: oldSelf == self - required: - - source - type: object - x-kubernetes-validations: - - message: finishedAt is immutable once set - rule: '!has(oldSelf.finishedAt) || oldSelf.finishedAt == self.finishedAt' - writeConnectionSecretToRef: - description: |- - WriteConnectionSecretToReference specifies the namespace and name of a - Secret to which any connection details for this managed resource should - be written. Connection details frequently include the endpoint, username, - and password required to connect to the managed resource. - This field is planned to be replaced in a future release in favor of - PublishConnectionDetailsTo. Currently, both could be set independently - and connection details would be published to both without affecting - each other. - - If omitted, it is defaulted to the namespace of the ControlPlane. - Deprecated: Use Hub or Upbound identities instead. - properties: - name: - description: Name of the secret. - type: string - namespace: - description: |- - Namespace of the secret. If omitted, it is equal to - the namespace of the resource containing this reference as a field. - type: string - required: - - name - type: object - type: object - x-kubernetes-validations: - - message: '[[GATE:EnableSharedBackup]] restore source can not be unset' - rule: '!has(oldSelf.restore) || has(self.restore)' - - message: '[[GATE:EnableSharedBackup]] restore source can not be set - after creation' - rule: has(oldSelf.restore) || !has(self.restore) - - message: '"version" cannot be empty when upgrade channel is "None"' - rule: '!has(self.crossplane.autoUpgrade) || self.crossplane.autoUpgrade.channel - != "None" || self.crossplane.version != ""' - status: - description: A ControlPlaneStatus represents the observed state of a ControlPlane. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - controlPlaneID: - type: string - firstAvailableAt: - description: FirstAvailableAt is the time at which the control plane - was available for the first time. - format: date-time - type: string - message: - description: |- - Message is a human-readable message indicating details about why the - ControlPlane is in this condition. - type: string - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.11/spaces.upbound.io_incontrolplaneoverrides.yaml b/static/crds/space/v1.11/spaces.upbound.io_incontrolplaneoverrides.yaml deleted file mode 100644 index 295fc05d0..000000000 --- a/static/crds/space/v1.11/spaces.upbound.io_incontrolplaneoverrides.yaml +++ /dev/null @@ -1,256 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: incontrolplaneoverrides.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - spaces - kind: InControlPlaneOverride - listKind: InControlPlaneOverrideList - plural: incontrolplaneoverrides - singular: incontrolplaneoverride - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .status.conditions[?(@.type=='Synced')].status - name: SYNCED - type: string - - jsonPath: .status.conditions[?(@.type=='Ready')].status - name: READY - type: string - name: v1alpha1 - schema: - openAPIV3Schema: - description: |- - InControlPlaneOverride represents resource configuration overrides in - a ControlPlane. The specified override can be applied on single objects - as well as claim/XR object hierarchies. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: |- - InControlPlaneOverrideSpec defines a configuration override - on a target object hierarchy in a target ControlPlane with the - given name. - properties: - controlPlaneName: - description: |- - ControlPlaneName is the name of the target ControlPlane where - the resource configuration overrides will be applied. - minLength: 1 - type: string - x-kubernetes-validations: - - message: controlPlaneName is immutable - rule: self == oldSelf - deletionPolicy: - default: RollBack - description: |- - DeletionPolicy specifies whether when the InControlPlaneOverride object - is deleted, the configuration override should be kept (Keep) or - rolled back (RollBack). - enum: - - RollBack - - Keep - type: string - override: - description: |- - Override denotes the configuration override to be applied on the target - object hierarchy. The fully specified intent is obtained by serializing - the Override. - properties: - metadata: - description: Metadata specifies the patch metadata. - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations represents the Kube object annotations. - Only the following annotations are allowed to be patched: - - crossplane.io/paused - - spaces.upbound.io/force-reconcile-at - type: object - x-kubernetes-validations: - - message: Only the crossplane.io/paused and spaces.upbound.io/force-reconcile-at - annotations are allowed - rule: self.all(k, k == 'crossplane.io/paused' || k == 'spaces.upbound.io/force-reconcile-at') - type: object - type: object - propagationPolicy: - default: None - description: |- - PropagationPolicy specifies whether the configuration override will be - applied only to the object referenced in TargetRef (None), after an - ascending or descending hierarchy traversal will be done starting with - the target object. - enum: - - None - - Ascending - - Descending - type: string - targetRef: - description: |- - TargetRef is the object reference to a Kubernetes API object where the - configuration override will start. The controller will traverse the - target object's hierarchy depending on the PropagationPolicy. If - PropagationPolicy is None, then only the target object will be updated. - properties: - apiVersion: - description: APIVersion of the referenced object. - minLength: 1 - type: string - kind: - description: Kind of the referenced object. - minLength: 1 - type: string - name: - description: Name of the referenced object. - minLength: 1 - type: string - namespace: - description: Namespace of the referenced object. - type: string - required: - - apiVersion - - kind - - name - type: object - required: - - controlPlaneName - - override - - targetRef - type: object - status: - description: |- - InControlPlaneOverrideStatus defines the status of an InControlPlaneOverride - object. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - objectRefs: - items: - description: |- - PatchedObjectStatus represents the state of an applied patch to an object - in the target hierarchy. - properties: - apiVersion: - description: APIVersion of the referenced object. - minLength: 1 - type: string - kind: - description: Kind of the referenced object. - minLength: 1 - type: string - message: - description: Message holds an optional detail message detailing - the observed state. - type: string - name: - description: Name of the referenced object. - minLength: 1 - type: string - namespace: - description: Namespace of the referenced object. - type: string - reason: - description: Reason is the reason for the target objects override - Status. - type: string - status: - description: Status of the configuration override. - enum: - - Success - - Skipped - - Error - type: string - uid: - description: Metadata UID of the patch target object. - type: string - required: - - apiVersion - - kind - - name - - reason - - status - type: object - type: array - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.11/spaces.upbound.io_sharedbackupconfigs.yaml b/static/crds/space/v1.11/spaces.upbound.io_sharedbackupconfigs.yaml deleted file mode 100644 index d716be334..000000000 --- a/static/crds/space/v1.11/spaces.upbound.io_sharedbackupconfigs.yaml +++ /dev/null @@ -1,143 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: sharedbackupconfigs.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - spaces - kind: SharedBackupConfig - listKind: SharedBackupConfigList - plural: sharedbackupconfigs - singular: sharedbackupconfig - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .spec.objectStorage.provider - name: Provider - type: string - - jsonPath: .spec.objectStorage.bucket - name: Bucket - type: string - - jsonPath: .spec.objectStorage.credentials.source - name: Auth - type: string - - jsonPath: .spec.objectStorage.credentials.secretRef.name - name: Secret - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: SharedBackupConfig defines the configuration to backup and restore - ControlPlanes. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: |- - A SharedBackupConfigSpec represents the configuration to backup or restore - ControlPlanes. - properties: - objectStorage: - description: ObjectStorage specifies the object storage configuration - for the given provider. - properties: - bucket: - description: Bucket is the name of the bucket to store backups - in. - minLength: 1 - type: string - config: - description: |- - Config is a free-form map of configuration options for the object storage provider. - See https://github.com/thanos-io/objstore?tab=readme-ov-file for more - information on the formats for each supported cloud provider. Bucket and - Provider will override the required values in the config. - type: object - x-kubernetes-preserve-unknown-fields: true - credentials: - description: Credentials specifies the credentials to access the - object storage. - properties: - secretRef: - description: |- - A SecretRef is a reference to a secret key that contains the credentials - that must be used to connect to the provider. - properties: - key: - default: credentials - description: The key to select. - type: string - name: - description: Name of the secret. - type: string - required: - - key - - name - type: object - source: - description: |- - Source of the credentials. - Source "Secret" requires "get" permissions on the referenced Secret. - enum: - - Secret - - InjectedIdentity - type: string - required: - - source - type: object - prefix: - description: |- - Prefix is the prefix to use for all backups using this - SharedBackupConfig, e.g. "prod/cluster1", resulting in backups for - controlplane "ctp1" in namespace "ns1" being stored in - "prod/cluster1/ns1/ctp1". - type: string - provider: - description: Provider is the name of the object storage provider. - enum: - - AWS - - Azure - - GCP - type: string - required: - - bucket - - credentials - - provider - type: object - x-kubernetes-validations: - - message: credentials.secretRef.name must be set when source is Secret - rule: self.credentials.source != 'Secret' || (has(self.credentials.secretRef) - && has(self.credentials.secretRef.name)) - required: - - objectStorage - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.11/spaces.upbound.io_sharedbackups.yaml b/static/crds/space/v1.11/spaces.upbound.io_sharedbackups.yaml deleted file mode 100644 index ffa7b41c5..000000000 --- a/static/crds/space/v1.11/spaces.upbound.io_sharedbackups.yaml +++ /dev/null @@ -1,291 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: sharedbackups.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - spaces - kind: SharedBackup - listKind: SharedBackupList - plural: sharedbackups - singular: sharedbackup - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.annotations.sharedbackup\.internal\.spaces\.upbound\.io/completed - name: Completed - type: string - - jsonPath: .metadata.annotations.sharedbackup\.internal\.spaces\.upbound\.io/failed - name: Failed - type: string - - jsonPath: .metadata.annotations.sharedbackup\.internal\.spaces\.upbound\.io/provisioned - name: Provisioned - type: string - - jsonPath: .status.phase - name: Phase - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: SharedBackup defines a backup over a set of ControlPlanes. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: SharedBackupSpec defines a backup over a set of ControlPlanes. - properties: - configRef: - description: |- - ConfigRef is a reference to the backup configuration. - ApiGroup is optional and defaults to "spaces.upbound.io". - Kind is required, and the only supported value is "SharedBackupConfig" at - the moment. - Name is required. - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. - type: string - kind: - description: Kind is the type of resource being referenced. - minLength: 1 - type: string - name: - description: Name is the name of resource being referenced. - minLength: 1 - type: string - type: object - x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: backup config ref must be a reference to a SharedBackupConfig - rule: (!has(self.apiGroup) || self.apiGroup == 'spaces.upbound.io') - && self.kind == 'SharedBackupConfig' - - message: backup config ref must have a name - rule: size(self.name) > 0 - controlPlaneSelector: - description: |- - ControlPlaneSelector defines the selector for ControlPlanes to backup. - Requires "backup" permission on all ControlPlanes in the same namespace. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: either names or a labelSelector must be specified - rule: (has(self.labelSelectors) || has(self.names)) && (size(self.labelSelectors) - > 0 || size(self.names) > 0) - deletionPolicy: - default: Orphan - description: DeletionPolicy is the policy for the backup. - enum: - - Orphan - - Delete - type: string - excludedResources: - description: |- - ExcludedResources is a slice of resource names that are not - included in the backup. Used to filter the included extra resources. - items: - type: string - type: array - x-kubernetes-list-type: set - ttl: - description: |- - TTL is the time to live for the backup. After this time, the backup - will be eligible for garbage collection. If not set, the backup will - not be garbage collected. - type: string - useOwnerReferencesInBackup: - description: |- - UseOwnerReferencesBackup specifies whether an ownership chain should be - established between this resource and the Backup it creates. - If set to true, the Backup will be garbage collected when this resource - is deleted. - type: boolean - required: - - configRef - - controlPlaneSelector - type: object - x-kubernetes-validations: - - message: shared backup ControlPlane selectors can not be changed after - creation - rule: self.controlPlaneSelector == oldSelf.controlPlaneSelector - - message: shared backup excluded resources can not be changed after creation - rule: (!has(self.excludedResources) && !has(oldSelf.excludedResources)) - || self.excludedResources == oldSelf.excludedResources - status: - description: SharedBackupStatus represents the observed state of a SharedBackup. - properties: - completed: - description: Completed is the list of ControlPlanes for which the - backup completed successfully. - items: - type: string - type: array - x-kubernetes-list-type: set - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - failed: - description: Failed is the list of ControlPlanes for which the backup - failed. - items: - type: string - type: array - x-kubernetes-list-type: set - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - phase: - default: Pending - description: Phase represents the current phase of the SharedBackup. - enum: - - Pending - - InProgress - - Failed - - Completed - type: string - selectedControlPlanes: - description: SelectedControlPlanes represents the names of the selected - ControlPlanes. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.11/spaces.upbound.io_sharedbackupschedules.yaml b/static/crds/space/v1.11/spaces.upbound.io_sharedbackupschedules.yaml deleted file mode 100644 index 1c173c0a8..000000000 --- a/static/crds/space/v1.11/spaces.upbound.io_sharedbackupschedules.yaml +++ /dev/null @@ -1,273 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: sharedbackupschedules.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - spaces - kind: SharedBackupSchedule - listKind: SharedBackupScheduleList - plural: sharedbackupschedules - singular: sharedbackupschedule - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .spec.schedule - name: Schedule - type: string - - jsonPath: .spec.suspend - name: Suspended - type: boolean - - jsonPath: .metadata.annotations.sharedbackupschedule\.internal\.spaces\.upbound\.io/provisioned-total - name: Provisioned - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: |- - SharedBackupSchedule defines a schedule for SharedBackup on a set of - ControlPlanes. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: SharedBackupScheduleSpec defines the desired state of a SharedBackupSchedule. - properties: - configRef: - description: |- - ConfigRef is a reference to the backup configuration. - ApiGroup is optional and defaults to "spaces.upbound.io". - Kind is required, and the only supported value is "SharedBackupConfig" at - the moment. - Name is required. - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. - type: string - kind: - description: Kind is the type of resource being referenced. - minLength: 1 - type: string - name: - description: Name is the name of resource being referenced. - minLength: 1 - type: string - type: object - x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: backup config ref must be a reference to a SharedBackupConfig - rule: (!has(self.apiGroup) || self.apiGroup == 'spaces.upbound.io') - && self.kind == 'SharedBackupConfig' - - message: backup config ref must have a name - rule: size(self.name) > 0 - controlPlaneSelector: - description: |- - ControlPlaneSelector defines the selector for ControlPlanes to backup. - Requires "backup" permission on all ControlPlanes in the same namespace. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: either names or a labelSelector must be specified - rule: (has(self.labelSelectors) || has(self.names)) && (size(self.labelSelectors) - > 0 || size(self.names) > 0) - deletionPolicy: - default: Orphan - description: DeletionPolicy is the policy for the backup. - enum: - - Orphan - - Delete - type: string - excludedResources: - description: |- - ExcludedResources is a slice of resource names that are not - included in the backup. Used to filter the included extra resources. - items: - type: string - type: array - x-kubernetes-list-type: set - schedule: - description: Schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - minLength: 1 - type: string - suspend: - description: |- - Suspend specifies whether the schedule is suspended. If true, no - Backups will be created, but running backups will be allowed to - complete. - type: boolean - ttl: - description: |- - TTL is the time to live for the backup. After this time, the backup - will be eligible for garbage collection. If not set, the backup will - not be garbage collected. - type: string - useOwnerReferencesInBackup: - description: |- - UseOwnerReferencesBackup specifies whether an ownership chain should be - established between this resource and the Backup it creates. - If set to true, the Backup will be garbage collected when this resource - is deleted. - type: boolean - required: - - configRef - - controlPlaneSelector - - schedule - type: object - status: - description: SharedBackupScheduleStatus represents the observed state - of a SharedBackupSchedule. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - selectedControlPlanes: - description: |- - SelectedControlPlanes is the list of ControlPlanes that are selected - for backup. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.11/spaces.upbound.io_sharedexternalsecrets.yaml b/static/crds/space/v1.11/spaces.upbound.io_sharedexternalsecrets.yaml deleted file mode 100644 index 00c2dd3ab..000000000 --- a/static/crds/space/v1.11/spaces.upbound.io_sharedexternalsecrets.yaml +++ /dev/null @@ -1,745 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: sharedexternalsecrets.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - externalsecrets - kind: SharedExternalSecret - listKind: SharedExternalSecretList - plural: sharedexternalsecrets - shortNames: - - ses - singular: sharedexternalsecret - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.annotations.sharedexternalsecrets\.internal\.spaces\.upbound\.io/provisioned-total - name: Provisioned - type: string - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: |- - SharedExternalSecret specifies a shared ExternalSecret projected into the specified - ControlPlanes of the same namespace as ClusterExternalSecret and with that - propagated into the specified namespaces. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: SharedExternalSecretSpec defines the desired state of SharedExternalSecret. - properties: - controlPlaneSelector: - description: |- - The secret is projected only to control planes - matching the provided selector. Either names or a labelSelector must be specified. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: either names or a labelSelector must be specified - rule: (has(self.labelSelectors) || has(self.names)) && (size(self.labelSelectors) - > 0 || size(self.names) > 0) - externalSecretMetadata: - description: The metadata of the secret store to be created. - properties: - annotations: - additionalProperties: - type: string - description: Annotations that are set on projected resource. - type: object - labels: - additionalProperties: - type: string - description: Labels that are set on projected resource. - type: object - type: object - externalSecretName: - description: |- - ExternalSecretName is the name to use when creating external secret within a control plane. - optional, if not set, SharedExternalSecret name will be used. - When set, it is immutable. - maxLength: 253 - minLength: 1 - type: string - x-kubernetes-validations: - - message: externalSecretName is immutable - rule: self == oldSelf - externalSecretSpec: - description: The spec for the ExternalSecrets to be created. - properties: - data: - description: Data defines the connection between the Kubernetes - Secret keys and the Provider data - items: - description: ExternalSecretData defines the connection between - the Kubernetes Secret key (spec.data.) and the Provider - data. - properties: - remoteRef: - description: |- - RemoteRef points to the remote secret and defines - which secret (version/property/..) to fetch. - properties: - conversionStrategy: - default: Default - description: Used to define a conversion Strategy - enum: - - Default - - Unicode - type: string - decodingStrategy: - default: None - description: Used to define a decoding Strategy - enum: - - Auto - - Base64 - - Base64URL - - None - type: string - key: - description: Key is the key used in the Provider, mandatory - type: string - metadataPolicy: - default: None - description: Policy for fetching tags/labels from provider - secrets, possible options are Fetch, None. Defaults - to None - enum: - - None - - Fetch - type: string - property: - description: Used to select a specific property of the - Provider value (if a map), if supported - type: string - version: - description: Used to select a specific version of the - Provider value, if supported - type: string - required: - - key - type: object - secretKey: - description: |- - SecretKey defines the key in which the controller stores - the value. This is the key in the Kind=Secret - type: string - sourceRef: - description: |- - SourceRef allows you to override the source - from which the value will pulled from. - maxProperties: 1 - properties: - generatorRef: - description: |- - GeneratorRef points to a generator custom resource. - - Deprecated: The generatorRef is not implemented in .data[]. - this will be removed with v1. - properties: - apiVersion: - default: generators.external-secrets.io/v1alpha1 - description: Specify the apiVersion of the generator - resource - type: string - kind: - description: Specify the Kind of the resource, e.g. - Password, ACRAccessToken etc. - type: string - name: - description: Specify the name of the generator resource - type: string - required: - - kind - - name - type: object - storeRef: - description: SecretStoreRef defines which SecretStore - to fetch the ExternalSecret data. - properties: - kind: - description: |- - Kind of the SecretStore resource (SecretStore or ClusterSecretStore) - Defaults to `SecretStore` - type: string - name: - description: Name of the SecretStore resource - type: string - required: - - name - type: object - type: object - required: - - remoteRef - - secretKey - type: object - type: array - dataFrom: - description: |- - DataFrom is used to fetch all properties from a specific Provider data - If multiple entries are specified, the Secret keys are merged in the specified order - items: - properties: - extract: - description: |- - Used to extract multiple key/value pairs from one secret - Note: Extract does not support sourceRef.Generator or sourceRef.GeneratorRef. - properties: - conversionStrategy: - default: Default - description: Used to define a conversion Strategy - enum: - - Default - - Unicode - type: string - decodingStrategy: - default: None - description: Used to define a decoding Strategy - enum: - - Auto - - Base64 - - Base64URL - - None - type: string - key: - description: Key is the key used in the Provider, mandatory - type: string - metadataPolicy: - default: None - description: Policy for fetching tags/labels from provider - secrets, possible options are Fetch, None. Defaults - to None - enum: - - None - - Fetch - type: string - property: - description: Used to select a specific property of the - Provider value (if a map), if supported - type: string - version: - description: Used to select a specific version of the - Provider value, if supported - type: string - required: - - key - type: object - find: - description: |- - Used to find secrets based on tags or regular expressions - Note: Find does not support sourceRef.Generator or sourceRef.GeneratorRef. - properties: - conversionStrategy: - default: Default - description: Used to define a conversion Strategy - enum: - - Default - - Unicode - type: string - decodingStrategy: - default: None - description: Used to define a decoding Strategy - enum: - - Auto - - Base64 - - Base64URL - - None - type: string - name: - description: Finds secrets based on the name. - properties: - regexp: - description: Finds secrets base - type: string - type: object - path: - description: A root path to start the find operations. - type: string - tags: - additionalProperties: - type: string - description: Find secrets based on tags. - type: object - type: object - rewrite: - description: |- - Used to rewrite secret Keys after getting them from the secret Provider - Multiple Rewrite operations can be provided. They are applied in a layered order (first to last) - items: - properties: - regexp: - description: |- - Used to rewrite with regular expressions. - The resulting key will be the output of a regexp.ReplaceAll operation. - properties: - source: - description: Used to define the regular expression - of a re.Compiler. - type: string - target: - description: Used to define the target pattern - of a ReplaceAll operation. - type: string - required: - - source - - target - type: object - transform: - description: |- - Used to apply string transformation on the secrets. - The resulting key will be the output of the template applied by the operation. - properties: - template: - description: |- - Used to define the template to apply on the secret name. - `.value ` will specify the secret name in the template. - type: string - required: - - template - type: object - type: object - type: array - sourceRef: - description: |- - SourceRef points to a store or generator - which contains secret values ready to use. - Use this in combination with Extract or Find pull values out of - a specific SecretStore. - When sourceRef points to a generator Extract or Find is not supported. - The generator returns a static map of values - maxProperties: 1 - properties: - generatorRef: - description: GeneratorRef points to a generator custom - resource. - properties: - apiVersion: - default: generators.external-secrets.io/v1alpha1 - description: Specify the apiVersion of the generator - resource - type: string - kind: - description: Specify the Kind of the resource, e.g. - Password, ACRAccessToken etc. - type: string - name: - description: Specify the name of the generator resource - type: string - required: - - kind - - name - type: object - storeRef: - description: SecretStoreRef defines which SecretStore - to fetch the ExternalSecret data. - properties: - kind: - description: |- - Kind of the SecretStore resource (SecretStore or ClusterSecretStore) - Defaults to `SecretStore` - type: string - name: - description: Name of the SecretStore resource - type: string - required: - - name - type: object - type: object - type: object - type: array - refreshInterval: - default: 1h - description: |- - RefreshInterval is the amount of time before the values are read again from the SecretStore provider - Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h" - May be set to zero to fetch and create it once. Defaults to 1h. - type: string - secretStoreRef: - description: SecretStoreRef defines which SecretStore to fetch - the ExternalSecret data. - properties: - kind: - description: |- - Kind of the SecretStore resource (SecretStore or ClusterSecretStore) - Defaults to `SecretStore` - type: string - name: - description: Name of the SecretStore resource - type: string - required: - - name - type: object - target: - default: - creationPolicy: Owner - deletionPolicy: Retain - description: |- - ExternalSecretTarget defines the Kubernetes Secret to be created - There can be only one target per ExternalSecret. - properties: - creationPolicy: - default: Owner - description: |- - CreationPolicy defines rules on how to create the resulting Secret - Defaults to 'Owner' - enum: - - Owner - - Orphan - - Merge - - None - type: string - deletionPolicy: - default: Retain - description: |- - DeletionPolicy defines rules on how to delete the resulting Secret - Defaults to 'Retain' - enum: - - Delete - - Merge - - Retain - type: string - immutable: - description: Immutable defines if the final secret will be - immutable - type: boolean - name: - description: |- - Name defines the name of the Secret resource to be managed - This field is immutable - Defaults to the .metadata.name of the ExternalSecret resource - type: string - template: - description: Template defines a blueprint for the created - Secret resource. - properties: - data: - additionalProperties: - type: string - type: object - engineVersion: - default: v2 - description: |- - EngineVersion specifies the template engine version - that should be used to compile/execute the - template specified in .data and .templateFrom[]. - enum: - - v1 - - v2 - type: string - mergePolicy: - default: Replace - enum: - - Replace - - Merge - type: string - metadata: - description: ExternalSecretTemplateMetadata defines metadata - fields for the Secret blueprint. - properties: - annotations: - additionalProperties: - type: string - type: object - labels: - additionalProperties: - type: string - type: object - type: object - templateFrom: - items: - properties: - configMap: - properties: - items: - items: - properties: - key: - type: string - templateAs: - default: Values - enum: - - Values - - KeysAndValues - type: string - required: - - key - type: object - type: array - name: - type: string - required: - - items - - name - type: object - literal: - type: string - secret: - properties: - items: - items: - properties: - key: - type: string - templateAs: - default: Values - enum: - - Values - - KeysAndValues - type: string - required: - - key - type: object - type: array - name: - type: string - required: - - items - - name - type: object - target: - default: Data - enum: - - Data - - Annotations - - Labels - type: string - type: object - type: array - type: - type: string - type: object - type: object - type: object - namespaceSelector: - description: |- - The projected secret can be consumed - only within namespaces matching the provided selector. - Either names or a labelSelector must be specified. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: either names or a labelSelector must be specified - rule: (has(self.labelSelectors) || has(self.names)) && (size(self.labelSelectors) - > 0 || size(self.names) > 0) - refreshTime: - description: Used to configure secret refresh interval in seconds. - type: string - required: - - controlPlaneSelector - - externalSecretSpec - - namespaceSelector - type: object - x-kubernetes-validations: - - message: externalSecretName is immutable - rule: has(self.externalSecretName) == has(oldSelf.externalSecretName) - status: - description: SharedExternalSecretStatus defines the observed state of - the ExternalSecret. - properties: - failed: - description: list of provisioning failures. - items: - description: |- - SharedExternalSecretProvisioningFailure describes a external secret provisioning - failure in a specific control plane. - properties: - conditions: - description: List of conditions. - items: - properties: - message: - type: string - status: - type: string - type: - type: string - required: - - status - - type - type: object - type: array - controlPlane: - description: ControlPlane name where the failure occurred. - type: string - required: - - controlPlane - type: object - type: array - x-kubernetes-list-map-keys: - - controlPlane - x-kubernetes-list-type: map - observedGeneration: - description: observed resource generation. - format: int64 - type: integer - provisioned: - description: List of successfully provisioned targets. - items: - description: SharedExternalSecretProvisioningSuccess defines external - secret provisioning success. - properties: - controlPlane: - description: ControlPlane name where the external secret got - successfully projected. - type: string - required: - - controlPlane - type: object - type: array - x-kubernetes-list-map-keys: - - controlPlane - x-kubernetes-list-type: map - type: object - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.11/spaces.upbound.io_sharedsecretstores.yaml b/static/crds/space/v1.11/spaces.upbound.io_sharedsecretstores.yaml deleted file mode 100644 index 499a2208f..000000000 --- a/static/crds/space/v1.11/spaces.upbound.io_sharedsecretstores.yaml +++ /dev/null @@ -1,2702 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: sharedsecretstores.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - externalsecrets - kind: SharedSecretStore - listKind: SharedSecretStoreList - plural: sharedsecretstores - shortNames: - - sss - singular: sharedsecretstore - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.annotations.sharedsecretstores\.internal\.spaces\.upbound\.io/provisioned-total - name: Provisioned - type: string - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: |- - SharedSecretStore represents a shared SecretStore projected as ClusterSecretStore - into matching ControlPlanes in the same namespace. Once projected into a ControlPlane, - it can be referenced from ExternalSecret instances, as part of `storeRef` fields. - The secret store configuration including referenced credential are not leaked into the - ControlPlanes and in that sense can be called secure as they are invisible to the - ControlPlane workloads. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: SharedSecretStoreSpec defines the desired state of SecretStore. - properties: - controlPlaneSelector: - description: |- - The store is projected only to control planes - matching the provided selector. Either names or a labelSelector must be specified. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: either names or a labelSelector must be specified - rule: (has(self.labelSelectors) || has(self.names)) && (size(self.labelSelectors) - > 0 || size(self.names) > 0) - namespaceSelector: - description: |- - The projected secret store can be consumed - only within namespaces matching the provided selector. - Either names or a labelSelector must be specified. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: either names or a labelSelector must be specified - rule: (has(self.labelSelectors) || has(self.names)) && (size(self.labelSelectors) - > 0 || size(self.names) > 0) - provider: - description: Used to configure the provider. Only one provider may - be set. - maxProperties: 1 - minProperties: 1 - properties: - akeyless: - description: Akeyless configures this store to sync secrets using - Akeyless Vault provider - properties: - akeylessGWApiURL: - description: Akeyless GW API Url from which the secrets to - be fetched from. - type: string - authSecretRef: - description: Auth configures how the operator authenticates - with Akeyless. - properties: - kubernetesAuth: - description: |- - Kubernetes authenticates with Akeyless by passing the ServiceAccount - token stored in the named Secret resource. - properties: - accessID: - description: the Akeyless Kubernetes auth-method access-id - type: string - k8sConfName: - description: Kubernetes-auth configuration name in - Akeyless-Gateway - type: string - secretRef: - description: |- - Optional secret field containing a Kubernetes ServiceAccount JWT used - for authenticating with Akeyless. If a name is specified without a key, - `token` is the default. If one is not specified, the one bound to - the controller will be used. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - serviceAccountRef: - description: |- - Optional service account field containing the name of a kubernetes ServiceAccount. - If the service account is specified, the service account secret token JWT will be used - for authenticating with Akeyless. If the service account selector is not supplied, - the secretRef will be used instead. - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount resource - being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - required: - - accessID - - k8sConfName - type: object - secretRef: - description: |- - Reference to a Secret that contains the details - to authenticate with Akeyless. - properties: - accessID: - description: The SecretAccessID is used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - accessType: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - accessTypeParam: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - type: object - caBundle: - description: |- - PEM/base64 encoded CA bundle used to validate Akeyless Gateway certificate. Only used - if the AkeylessGWApiURL URL is using HTTPS protocol. If not set the system root certificates - are used to validate the TLS connection. - format: byte - type: string - caProvider: - description: The provider for the CA bundle to use to validate - Akeyless Gateway certificate. - properties: - key: - description: The key where the CA certificate can be found - in the Secret or ConfigMap. - type: string - name: - description: The name of the object located at the provider - type. - type: string - namespace: - description: |- - The namespace the Provider type is in. - Can only be defined when used in a ClusterSecretStore. - type: string - type: - description: The type of provider to use such as "Secret", - or "ConfigMap". - enum: - - Secret - - ConfigMap - type: string - required: - - name - - type - type: object - required: - - akeylessGWApiURL - - authSecretRef - type: object - alibaba: - description: Alibaba configures this store to sync secrets using - Alibaba Cloud provider - properties: - auth: - description: AlibabaAuth contains a secretRef for credentials. - properties: - rrsa: - description: Authenticate against Alibaba using RRSA. - properties: - oidcProviderArn: - type: string - oidcTokenFilePath: - type: string - roleArn: - type: string - sessionName: - type: string - required: - - oidcProviderArn - - oidcTokenFilePath - - roleArn - - sessionName - type: object - secretRef: - description: AlibabaAuthSecretRef holds secret references - for Alibaba credentials. - properties: - accessKeyIDSecretRef: - description: The AccessKeyID is used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - accessKeySecretSecretRef: - description: The AccessKeySecret is used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - required: - - accessKeyIDSecretRef - - accessKeySecretSecretRef - type: object - type: object - regionID: - description: Alibaba Region to be used for the provider - type: string - required: - - auth - - regionID - type: object - aws: - description: AWS configures this store to sync secrets using AWS - Secret Manager provider - properties: - additionalRoles: - description: AdditionalRoles is a chained list of Role ARNs - which the provider will sequentially assume before assuming - the Role - items: - type: string - type: array - auth: - description: |- - Auth defines the information necessary to authenticate against AWS - if not set aws sdk will infer credentials from your environment - see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials - properties: - jwt: - description: Authenticate against AWS using service account - tokens. - properties: - serviceAccountRef: - description: A reference to a ServiceAccount resource. - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount resource - being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - type: object - secretRef: - description: |- - AWSAuthSecretRef holds secret references for AWS credentials - both AccessKeyID and SecretAccessKey must be defined in order to properly authenticate. - properties: - accessKeyIDSecretRef: - description: The AccessKeyID is used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - secretAccessKeySecretRef: - description: The SecretAccessKey is used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - sessionTokenSecretRef: - description: |- - The SessionToken used for authentication - This must be defined if AccessKeyID and SecretAccessKey are temporary credentials - see: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - type: object - externalID: - description: AWS External ID set on assumed IAM roles - type: string - region: - description: AWS Region to be used for the provider - type: string - role: - description: Role is a Role ARN which the provider will assume - type: string - secretsManager: - description: SecretsManager defines how the provider behaves - when interacting with AWS SecretsManager - properties: - forceDeleteWithoutRecovery: - description: |- - Specifies whether to delete the secret without any recovery window. You - can't use both this parameter and RecoveryWindowInDays in the same call. - If you don't use either, then by default Secrets Manager uses a 30 day - recovery window. - see: https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_DeleteSecret.html#SecretsManager-DeleteSecret-request-ForceDeleteWithoutRecovery - type: boolean - recoveryWindowInDays: - description: |- - The number of days from 7 to 30 that Secrets Manager waits before - permanently deleting the secret. You can't use both this parameter and - ForceDeleteWithoutRecovery in the same call. If you don't use either, - then by default Secrets Manager uses a 30 day recovery window. - see: https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_DeleteSecret.html#SecretsManager-DeleteSecret-request-RecoveryWindowInDays - format: int64 - type: integer - type: object - service: - description: Service defines which service should be used - to fetch the secrets - enum: - - SecretsManager - - ParameterStore - type: string - sessionTags: - description: AWS STS assume role session tags - items: - properties: - key: - type: string - value: - type: string - required: - - key - - value - type: object - type: array - transitiveTagKeys: - description: AWS STS assume role transitive session tags. - Required when multiple rules are used with the provider - items: - type: string - type: array - required: - - region - - service - type: object - azurekv: - description: AzureKV configures this store to sync secrets using - Azure Key Vault provider - properties: - authSecretRef: - description: Auth configures how the operator authenticates - with Azure. Required for ServicePrincipal auth type. - properties: - clientId: - description: The Azure clientId of the service principle - used for authentication. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - clientSecret: - description: The Azure ClientSecret of the service principle - used for authentication. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - authType: - default: ServicePrincipal - description: |- - Auth type defines how to authenticate to the keyvault service. - Valid values are: - - "ServicePrincipal" (default): Using a service principal (tenantId, clientId, clientSecret) - - "ManagedIdentity": Using Managed Identity assigned to the pod (see aad-pod-identity) - enum: - - ServicePrincipal - - ManagedIdentity - - WorkloadIdentity - type: string - environmentType: - default: PublicCloud - description: |- - EnvironmentType specifies the Azure cloud environment endpoints to use for - connecting and authenticating with Azure. By default it points to the public cloud AAD endpoint. - The following endpoints are available, also see here: https://github.com/Azure/go-autorest/blob/main/autorest/azure/environments.go#L152 - PublicCloud, USGovernmentCloud, ChinaCloud, GermanCloud - enum: - - PublicCloud - - USGovernmentCloud - - ChinaCloud - - GermanCloud - type: string - identityId: - description: If multiple Managed Identity is assigned to the - pod, you can select the one to be used - type: string - serviceAccountRef: - description: |- - ServiceAccountRef specified the service account - that should be used when authenticating with WorkloadIdentity. - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - tenantId: - description: TenantID configures the Azure Tenant to send - requests to. Required for ServicePrincipal auth type. - type: string - vaultUrl: - description: Vault Url from which the secrets to be fetched - from. - type: string - required: - - vaultUrl - type: object - conjur: - description: Conjur configures this store to sync secrets using - conjur provider - properties: - auth: - properties: - apikey: - properties: - account: - type: string - apiKeyRef: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - userRef: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - required: - - account - - apiKeyRef - - userRef - type: object - jwt: - properties: - account: - type: string - secretRef: - description: |- - Optional SecretRef that refers to a key in a Secret resource containing JWT token to - authenticate with Conjur using the JWT authentication method. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - serviceAccountRef: - description: |- - Optional ServiceAccountRef specifies the Kubernetes service account for which to request - a token for with the `TokenRequest` API. - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount resource - being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - serviceID: - description: The conjur authn jwt webservice id - type: string - required: - - account - - serviceID - type: object - type: object - caBundle: - type: string - caProvider: - description: |- - Used to provide custom certificate authority (CA) certificates - for a secret store. The CAProvider points to a Secret or ConfigMap resource - that contains a PEM-encoded certificate. - properties: - key: - description: The key where the CA certificate can be found - in the Secret or ConfigMap. - type: string - name: - description: The name of the object located at the provider - type. - type: string - namespace: - description: |- - The namespace the Provider type is in. - Can only be defined when used in a ClusterSecretStore. - type: string - type: - description: The type of provider to use such as "Secret", - or "ConfigMap". - enum: - - Secret - - ConfigMap - type: string - required: - - name - - type - type: object - url: - type: string - required: - - auth - - url - type: object - delinea: - description: |- - Delinea DevOps Secrets Vault - https://docs.delinea.com/online-help/products/devops-secrets-vault/current - properties: - clientId: - description: ClientID is the non-secret part of the credential. - properties: - secretRef: - description: SecretRef references a key in a secret that - will be used as value. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - value: - description: Value can be specified directly to set a - value without using a secret. - type: string - type: object - clientSecret: - description: ClientSecret is the secret part of the credential. - properties: - secretRef: - description: SecretRef references a key in a secret that - will be used as value. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - value: - description: Value can be specified directly to set a - value without using a secret. - type: string - type: object - tenant: - description: Tenant is the chosen hostname / site name. - type: string - tld: - description: |- - TLD is based on the server location that was chosen during provisioning. - If unset, defaults to "com". - type: string - urlTemplate: - description: |- - URLTemplate - If unset, defaults to "https://%s.secretsvaultcloud.%s/v1/%s%s". - type: string - required: - - clientId - - clientSecret - - tenant - type: object - doppler: - description: Doppler configures this store to sync secrets using - the Doppler provider - properties: - auth: - description: Auth configures how the Operator authenticates - with the Doppler API - properties: - secretRef: - properties: - dopplerToken: - description: |- - The DopplerToken is used for authentication. - See https://docs.doppler.com/reference/api#authentication for auth token types. - The Key attribute defaults to dopplerToken if not specified. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - required: - - dopplerToken - type: object - required: - - secretRef - type: object - config: - description: Doppler config (required if not using a Service - Token) - type: string - format: - description: Format enables the downloading of secrets as - a file (string) - enum: - - json - - dotnet-json - - env - - yaml - - docker - type: string - nameTransformer: - description: Environment variable compatible name transforms - that change secret names to a different format - enum: - - upper-camel - - camel - - lower-snake - - tf-var - - dotnet-env - - lower-kebab - type: string - project: - description: Doppler project (required if not using a Service - Token) - type: string - required: - - auth - type: object - fake: - description: Fake configures a store with static key/value pairs - properties: - data: - items: - properties: - key: - type: string - value: - type: string - valueMap: - additionalProperties: - type: string - description: 'Deprecated: ValueMap is deprecated and - is intended to be removed in the future, use the `value` - field instead.' - type: object - version: - type: string - required: - - key - type: object - type: array - required: - - data - type: object - gcpsm: - description: GCPSM configures this store to sync secrets using - Google Cloud Platform Secret Manager provider - properties: - auth: - description: Auth defines the information necessary to authenticate - against GCP - properties: - secretRef: - properties: - secretAccessKeySecretRef: - description: The SecretAccessKey is used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - workloadIdentity: - properties: - clusterLocation: - type: string - clusterName: - type: string - clusterProjectID: - type: string - serviceAccountRef: - description: A reference to a ServiceAccount resource. - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount resource - being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - required: - - clusterLocation - - clusterName - - serviceAccountRef - type: object - type: object - projectID: - description: ProjectID project where secret is located - type: string - type: object - gitlab: - description: GitLab configures this store to sync secrets using - GitLab Variables provider - properties: - auth: - description: Auth configures how secret-manager authenticates - with a GitLab instance. - properties: - SecretRef: - properties: - accessToken: - description: AccessToken is used for authentication. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - required: - - SecretRef - type: object - environment: - description: Environment environment_scope of gitlab CI/CD - variables (Please see https://docs.gitlab.com/ee/ci/environments/#create-a-static-environment - on how to create environments) - type: string - groupIDs: - description: GroupIDs specify, which gitlab groups to pull - secrets from. Group secrets are read from left to right - followed by the project variables. - items: - type: string - type: array - inheritFromGroups: - description: InheritFromGroups specifies whether parent groups - should be discovered and checked for secrets. - type: boolean - projectID: - description: ProjectID specifies a project where secrets are - located. - type: string - url: - description: URL configures the GitLab instance URL. Defaults - to https://gitlab.com/. - type: string - required: - - auth - type: object - ibm: - description: IBM configures this store to sync secrets using IBM - Cloud provider - properties: - auth: - description: Auth configures how secret-manager authenticates - with the IBM secrets manager. - maxProperties: 1 - minProperties: 1 - properties: - containerAuth: - description: IBM Container-based auth with IAM Trusted - Profile. - properties: - iamEndpoint: - type: string - profile: - description: the IBM Trusted Profile - type: string - tokenLocation: - description: Location the token is mounted on the - pod - type: string - required: - - profile - type: object - secretRef: - properties: - secretApiKeySecretRef: - description: The SecretAccessKey is used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - type: object - serviceUrl: - description: ServiceURL is the Endpoint URL that is specific - to the Secrets Manager service instance - type: string - required: - - auth - type: object - keepersecurity: - description: KeeperSecurity configures this store to sync secrets - using the KeeperSecurity provider - properties: - authRef: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being referred - to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - folderID: - type: string - required: - - authRef - - folderID - type: object - kubernetes: - description: Kubernetes configures this store to sync secrets - using a Kubernetes cluster provider - properties: - auth: - description: Auth configures how secret-manager authenticates - with a Kubernetes instance. - maxProperties: 1 - minProperties: 1 - properties: - cert: - description: has both clientCert and clientKey as secretKeySelector - properties: - clientCert: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - clientKey: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - serviceAccount: - description: points to a service account that should be - used for authentication - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount resource - being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - token: - description: use static token to authenticate with - properties: - bearerToken: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - type: object - remoteNamespace: - default: default - description: Remote namespace to fetch the secrets from - type: string - server: - description: configures the Kubernetes server Address. - properties: - caBundle: - description: CABundle is a base64-encoded CA certificate - format: byte - type: string - caProvider: - description: 'see: https://external-secrets.io/v0.4.1/spec/#external-secrets.io/v1alpha1.CAProvider' - properties: - key: - description: The key where the CA certificate can - be found in the Secret or ConfigMap. - type: string - name: - description: The name of the object located at the - provider type. - type: string - namespace: - description: |- - The namespace the Provider type is in. - Can only be defined when used in a ClusterSecretStore. - type: string - type: - description: The type of provider to use such as "Secret", - or "ConfigMap". - enum: - - Secret - - ConfigMap - type: string - required: - - name - - type - type: object - url: - default: kubernetes.default - description: configures the Kubernetes server Address. - type: string - type: object - required: - - auth - type: object - onepassword: - description: OnePassword configures this store to sync secrets - using the 1Password Cloud provider - properties: - auth: - description: Auth defines the information necessary to authenticate - against OnePassword Connect Server - properties: - secretRef: - description: OnePasswordAuthSecretRef holds secret references - for 1Password credentials. - properties: - connectTokenSecretRef: - description: The ConnectToken is used for authentication - to a 1Password Connect Server. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - required: - - connectTokenSecretRef - type: object - required: - - secretRef - type: object - connectHost: - description: ConnectHost defines the OnePassword Connect Server - to connect to - type: string - vaults: - additionalProperties: - type: integer - description: Vaults defines which OnePassword vaults to search - in which order - type: object - required: - - auth - - connectHost - - vaults - type: object - oracle: - description: Oracle configures this store to sync secrets using - Oracle Vault provider - properties: - auth: - description: |- - Auth configures how secret-manager authenticates with the Oracle Vault. - If empty, use the instance principal, otherwise the user credentials specified in Auth. - properties: - secretRef: - description: SecretRef to pass through sensitive information. - properties: - fingerprint: - description: Fingerprint is the fingerprint of the - API private key. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - privatekey: - description: PrivateKey is the user's API Signing - Key in PEM format, used for authentication. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - required: - - fingerprint - - privatekey - type: object - tenancy: - description: Tenancy is the tenancy OCID where user is - located. - type: string - user: - description: User is an access OCID specific to the account. - type: string - required: - - secretRef - - tenancy - - user - type: object - compartment: - description: |- - Compartment is the vault compartment OCID. - Required for PushSecret - type: string - encryptionKey: - description: |- - EncryptionKey is the OCID of the encryption key within the vault. - Required for PushSecret - type: string - principalType: - description: |- - The type of principal to use for authentication. If left blank, the Auth struct will - determine the principal type. This optional field must be specified if using - workload identity. - enum: - - "" - - UserPrincipal - - InstancePrincipal - - Workload - type: string - region: - description: Region is the region where vault is located. - type: string - serviceAccountRef: - description: |- - ServiceAccountRef specified the service account - that should be used when authenticating with WorkloadIdentity. - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - vault: - description: Vault is the vault's OCID of the specific vault - where secret is located. - type: string - required: - - region - - vault - type: object - scaleway: - description: Scaleway - properties: - accessKey: - description: AccessKey is the non-secret part of the api key. - properties: - secretRef: - description: SecretRef references a key in a secret that - will be used as value. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - value: - description: Value can be specified directly to set a - value without using a secret. - type: string - type: object - apiUrl: - description: APIURL is the url of the api to use. Defaults - to https://api.scaleway.com - type: string - projectId: - description: 'ProjectID is the id of your project, which you - can find in the console: https://console.scaleway.com/project/settings' - type: string - region: - description: 'Region where your secrets are located: https://developers.scaleway.com/en/quickstart/#region-and-zone' - type: string - secretKey: - description: SecretKey is the non-secret part of the api key. - properties: - secretRef: - description: SecretRef references a key in a secret that - will be used as value. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - value: - description: Value can be specified directly to set a - value without using a secret. - type: string - type: object - required: - - accessKey - - projectId - - region - - secretKey - type: object - senhasegura: - description: Senhasegura configures this store to sync secrets - using senhasegura provider - properties: - auth: - description: Auth defines parameters to authenticate in senhasegura - properties: - clientId: - type: string - clientSecretSecretRef: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - required: - - clientId - - clientSecretSecretRef - type: object - ignoreSslCertificate: - default: false - description: IgnoreSslCertificate defines if SSL certificate - must be ignored - type: boolean - module: - description: Module defines which senhasegura module should - be used to get secrets - type: string - url: - description: URL of senhasegura - type: string - required: - - auth - - module - - url - type: object - upboundspaces: - description: UpboundProvider configures a store to sync secrets - with Upbound Spaces. - properties: - storeRef: - description: StoreRef holds ref to Upbound Spaces secret store - properties: - name: - description: Name of the secret store on Upbound Spaces - type: string - required: - - name - type: object - required: - - storeRef - type: object - vault: - description: Vault configures this store to sync secrets using - Hashi provider - properties: - auth: - description: Auth configures how secret-manager authenticates - with the Vault server. - properties: - appRole: - description: |- - AppRole authenticates with Vault using the App Role auth mechanism, - with the role and secret stored in a Kubernetes Secret resource. - properties: - path: - default: approle - description: |- - Path where the App Role authentication backend is mounted - in Vault, e.g: "approle" - type: string - roleId: - description: |- - RoleID configured in the App Role authentication backend when setting - up the authentication backend in Vault. - type: string - roleRef: - description: |- - Reference to a key in a Secret that contains the App Role ID used - to authenticate with Vault. - The `key` field must be specified and denotes which entry within the Secret - resource is used as the app role id. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - secretRef: - description: |- - Reference to a key in a Secret that contains the App Role secret used - to authenticate with Vault. - The `key` field must be specified and denotes which entry within the Secret - resource is used as the app role secret. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - required: - - path - - secretRef - type: object - cert: - description: |- - Cert authenticates with TLS Certificates by passing client certificate, private key and ca certificate - Cert authentication method - properties: - clientCert: - description: |- - ClientCert is a certificate to authenticate using the Cert Vault - authentication method - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - secretRef: - description: |- - SecretRef to a key in a Secret resource containing client private key to - authenticate with Vault using the Cert authentication method - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - iam: - description: |- - Iam authenticates with vault by passing a special AWS request signed with AWS IAM credentials - AWS IAM authentication method - properties: - externalID: - description: AWS External ID set on assumed IAM roles - type: string - jwt: - description: Specify a service account with IRSA enabled - properties: - serviceAccountRef: - description: A reference to a ServiceAccount resource. - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount - resource being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - type: object - path: - description: 'Path where the AWS auth method is enabled - in Vault, e.g: "aws"' - type: string - region: - description: AWS region - type: string - role: - description: This is the AWS role to be assumed before - talking to vault - type: string - secretRef: - description: Specify credentials in a Secret object - properties: - accessKeyIDSecretRef: - description: The AccessKeyID is used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource - being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - secretAccessKeySecretRef: - description: The SecretAccessKey is used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource - being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - sessionTokenSecretRef: - description: |- - The SessionToken used for authentication - This must be defined if AccessKeyID and SecretAccessKey are temporary credentials - see: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource - being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - vaultAwsIamServerID: - description: 'X-Vault-AWS-IAM-Server-ID is an additional - header used by Vault IAM auth method to mitigate - against different types of replay attacks. More - details here: https://developer.hashicorp.com/vault/docs/auth/aws' - type: string - vaultRole: - description: Vault Role. In vault, a role describes - an identity with a set of permissions, groups, or - policies you want to attach a user of the secrets - engine - type: string - required: - - vaultRole - type: object - jwt: - description: |- - Jwt authenticates with Vault by passing role and JWT token using the - JWT/OIDC authentication method - properties: - kubernetesServiceAccountToken: - description: |- - Optional ServiceAccountToken specifies the Kubernetes service account for which to request - a token for with the `TokenRequest` API. - properties: - audiences: - description: |- - Optional audiences field that will be used to request a temporary Kubernetes service - account token for the service account referenced by `serviceAccountRef`. - Defaults to a single audience `vault` it not specified. - Deprecated: use serviceAccountRef.Audiences instead - items: - type: string - type: array - expirationSeconds: - description: |- - Optional expiration time in seconds that will be used to request a temporary - Kubernetes service account token for the service account referenced by - `serviceAccountRef`. - Deprecated: this will be removed in the future. - Defaults to 10 minutes. - format: int64 - type: integer - serviceAccountRef: - description: Service account field containing - the name of a kubernetes ServiceAccount. - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount - resource being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - required: - - serviceAccountRef - type: object - path: - default: jwt - description: |- - Path where the JWT authentication backend is mounted - in Vault, e.g: "jwt" - type: string - role: - description: |- - Role is a JWT role to authenticate using the JWT/OIDC Vault - authentication method - type: string - secretRef: - description: |- - Optional SecretRef that refers to a key in a Secret resource containing JWT token to - authenticate with Vault using the JWT/OIDC authentication method. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - required: - - path - type: object - kubernetes: - description: |- - Kubernetes authenticates with Vault by passing the ServiceAccount - token stored in the named Secret resource to the Vault server. - properties: - mountPath: - default: kubernetes - description: |- - Path where the Kubernetes authentication backend is mounted in Vault, e.g: - "kubernetes" - type: string - role: - description: |- - A required field containing the Vault Role to assume. A Role binds a - Kubernetes ServiceAccount with a set of Vault policies. - type: string - secretRef: - description: |- - Optional secret field containing a Kubernetes ServiceAccount JWT used - for authenticating with Vault. If a name is specified without a key, - `token` is the default. If one is not specified, the one bound to - the controller will be used. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - serviceAccountRef: - description: |- - Optional service account field containing the name of a kubernetes ServiceAccount. - If the service account is specified, the service account secret token JWT will be used - for authenticating with Vault. If the service account selector is not supplied, - the secretRef will be used instead. - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount resource - being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - required: - - mountPath - - role - type: object - ldap: - description: |- - Ldap authenticates with Vault by passing username/password pair using - the LDAP authentication method - properties: - path: - default: ldap - description: |- - Path where the LDAP authentication backend is mounted - in Vault, e.g: "ldap" - type: string - secretRef: - description: |- - SecretRef to a key in a Secret resource containing password for the LDAP - user used to authenticate with Vault using the LDAP authentication - method - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - username: - description: |- - Username is a LDAP user name used to authenticate using the LDAP Vault - authentication method - type: string - required: - - path - - username - type: object - tokenSecretRef: - description: TokenSecretRef authenticates with Vault by - presenting a token. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - userPass: - description: UserPass authenticates with Vault by passing - username/password pair - properties: - path: - default: user - description: |- - Path where the UserPassword authentication backend is mounted - in Vault, e.g: "user" - type: string - secretRef: - description: |- - SecretRef to a key in a Secret resource containing password for the - user used to authenticate with Vault using the UserPass authentication - method - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - username: - description: |- - Username is a user name used to authenticate using the UserPass Vault - authentication method - type: string - required: - - path - - username - type: object - type: object - caBundle: - description: |- - PEM encoded CA bundle used to validate Vault server certificate. Only used - if the Server URL is using HTTPS protocol. This parameter is ignored for - plain HTTP protocol connection. If not set the system root certificates - are used to validate the TLS connection. - format: byte - type: string - caProvider: - description: The provider for the CA bundle to use to validate - Vault server certificate. - properties: - key: - description: The key where the CA certificate can be found - in the Secret or ConfigMap. - type: string - name: - description: The name of the object located at the provider - type. - type: string - namespace: - description: |- - The namespace the Provider type is in. - Can only be defined when used in a ClusterSecretStore. - type: string - type: - description: The type of provider to use such as "Secret", - or "ConfigMap". - enum: - - Secret - - ConfigMap - type: string - required: - - name - - type - type: object - forwardInconsistent: - description: |- - ForwardInconsistent tells Vault to forward read-after-write requests to the Vault - leader instead of simply retrying within a loop. This can increase performance if - the option is enabled serverside. - https://www.vaultproject.io/docs/configuration/replication#allow_forwarding_via_header - type: boolean - namespace: - description: |- - Name of the vault namespace. Namespaces is a set of features within Vault Enterprise that allows - Vault environments to support Secure Multi-tenancy. e.g: "ns1". - More about namespaces can be found here https://www.vaultproject.io/docs/enterprise/namespaces - type: string - path: - description: |- - Path is the mount path of the Vault KV backend endpoint, e.g: - "secret". The v2 KV secret engine version specific "/data" path suffix - for fetching secrets from Vault is optional and will be appended - if not present in specified path. - type: string - readYourWrites: - description: |- - ReadYourWrites ensures isolated read-after-write semantics by - providing discovered cluster replication states in each request. - More information about eventual consistency in Vault can be found here - https://www.vaultproject.io/docs/enterprise/consistency - type: boolean - server: - description: 'Server is the connection address for the Vault - server, e.g: "https://vault.example.com:8200".' - type: string - version: - default: v2 - description: |- - Version is the Vault KV secret engine version. This can be either "v1" or - "v2". Version defaults to "v2". - enum: - - v1 - - v2 - type: string - required: - - auth - - server - type: object - webhook: - description: Webhook configures this store to sync secrets using - a generic templated webhook - properties: - body: - description: Body - type: string - caBundle: - description: |- - PEM encoded CA bundle used to validate webhook server certificate. Only used - if the Server URL is using HTTPS protocol. This parameter is ignored for - plain HTTP protocol connection. If not set the system root certificates - are used to validate the TLS connection. - format: byte - type: string - caProvider: - description: The provider for the CA bundle to use to validate - webhook server certificate. - properties: - key: - description: The key the value inside of the provider - type to use, only used with "Secret" type - type: string - name: - description: The name of the object located at the provider - type. - type: string - namespace: - description: The namespace the Provider type is in. - type: string - type: - description: The type of provider to use such as "Secret", - or "ConfigMap". - enum: - - Secret - - ConfigMap - type: string - required: - - name - - type - type: object - headers: - additionalProperties: - type: string - description: Headers - type: object - method: - description: Webhook Method - type: string - result: - description: Result formatting - properties: - jsonPath: - description: Json path of return value - type: string - type: object - secrets: - description: |- - Secrets to fill in templates - These secrets will be passed to the templating function as key value pairs under the given name - items: - properties: - name: - description: Name of this secret in templates - type: string - secretRef: - description: Secret ref to fill in credentials - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - required: - - name - - secretRef - type: object - type: array - timeout: - description: Timeout - type: string - url: - description: Webhook url to call - type: string - required: - - result - - url - type: object - yandexcertificatemanager: - description: YandexCertificateManager configures this store to - sync secrets using Yandex Certificate Manager provider - properties: - apiEndpoint: - description: Yandex.Cloud API endpoint (e.g. 'api.cloud.yandex.net:443') - type: string - auth: - description: Auth defines the information necessary to authenticate - against Yandex Certificate Manager - properties: - authorizedKeySecretRef: - description: The authorized key used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - caProvider: - description: The provider for the CA bundle to use to validate - Yandex.Cloud server certificate. - properties: - certSecretRef: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - required: - - auth - type: object - yandexlockbox: - description: YandexLockbox configures this store to sync secrets - using Yandex Lockbox provider - properties: - apiEndpoint: - description: Yandex.Cloud API endpoint (e.g. 'api.cloud.yandex.net:443') - type: string - auth: - description: Auth defines the information necessary to authenticate - against Yandex Lockbox - properties: - authorizedKeySecretRef: - description: The authorized key used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - caProvider: - description: The provider for the CA bundle to use to validate - Yandex.Cloud server certificate. - properties: - certSecretRef: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - required: - - auth - type: object - type: object - refreshInterval: - description: Used to configure store refresh interval in seconds. - type: integer - retrySettings: - description: Used to configure http retries if failed. - properties: - maxRetries: - format: int32 - type: integer - retryInterval: - type: string - type: object - secretStoreMetadata: - description: The metadata of the secret store to be created. - properties: - annotations: - additionalProperties: - type: string - description: Annotations that are set on projected resource. - type: object - labels: - additionalProperties: - type: string - description: Labels that are set on projected resource. - type: object - type: object - secretStoreName: - description: |- - SecretStoreName is the name to use when creating secret stores within a control plane. - optional, if not set, SharedSecretStore name will be used. - When set, it is immutable. - maxLength: 253 - minLength: 1 - type: string - x-kubernetes-validations: - - message: value is immutable - rule: self == oldSelf - required: - - controlPlaneSelector - - namespaceSelector - - provider - type: object - x-kubernetes-validations: - - message: secretStoreName is immutable - rule: has(self.secretStoreName) == has(oldSelf.secretStoreName) - status: - description: SharedSecretStoreStatus defines the observed state of the - SecretStore. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - failed: - description: List of provisioning failures. - items: - description: SecretStoreProvisioningFailure defines secret store - provisioning failure. - properties: - conditions: - description: List of occurred conditions. - items: - properties: - lastTransitionTime: - format: date-time - type: string - message: - type: string - reason: - type: string - status: - type: string - type: - type: string - required: - - status - - type - type: object - type: array - controlPlane: - description: ControlPlane name where the failure occurred. - type: string - required: - - controlPlane - type: object - type: array - x-kubernetes-list-map-keys: - - controlPlane - x-kubernetes-list-type: map - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - provisioned: - description: List of successfully provisioned targets. - items: - description: SecretStoreProvisioningSuccess defines secret store - provision success. - properties: - controlPlane: - description: ControlPlane name where the secret store got projected - type: string - required: - - controlPlane - type: object - type: array - x-kubernetes-list-map-keys: - - controlPlane - x-kubernetes-list-type: map - type: object - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.11/spaces.upbound.io_simulations.yaml b/static/crds/space/v1.11/spaces.upbound.io_simulations.yaml deleted file mode 100644 index 856d1a82b..000000000 --- a/static/crds/space/v1.11/spaces.upbound.io_simulations.yaml +++ /dev/null @@ -1,243 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: simulations.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - spaces - kind: Simulation - listKind: SimulationList - plural: simulations - singular: simulation - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .spec.controlPlaneName - name: SOURCE - type: string - - jsonPath: .status.simulatedControlPlaneName - name: SIMULATED - type: string - - jsonPath: .status.conditions[?(@.type=='AcceptingChanges')].status - name: ACCEPTING-CHANGES - type: string - - jsonPath: .status.conditions[?(@.type=='AcceptingChanges')].reason - name: STATE - type: string - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: |- - A Simulation creates a simulation of a source ControlPlane. You can apply a - change set to the simulated control plane. When the Simulation is complete it - will detect the changes and report the difference compared to the source - control plane. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: SimulationSpec specifies how to run the simulation. - properties: - completionCriteria: - description: |- - CompletionCriteria specify how Spaces should determine when the - simulation is complete. If any of the criteria are met, Spaces will set - the Simulation's desired state to complete. Omit the criteria if you want - to manually mark the Simulation complete. - items: - description: A CompletionCriterion specifies when a simulation is - complete. - properties: - duration: - description: Duration after which the simulation is complete. - type: string - type: - description: Type of criterion. - enum: - - Duration - type: string - required: - - duration - - type - type: object - type: array - controlPlaneName: - description: |- - ControlPlaneName is the name of the ControlPlane to simulate a change to. - This control plane is known as the Simulation's 'source' control plane. - minLength: 1 - type: string - x-kubernetes-validations: - - message: The source controlplane can't be changed - rule: self == oldSelf - desiredState: - default: AcceptingChanges - description: DesiredState of the simulation. - enum: - - AcceptingChanges - - Complete - - Terminated - type: string - x-kubernetes-validations: - - message: A complete Simulation can only be terminated - rule: oldSelf != 'Complete' || self == 'Complete' || self == 'Terminated' - - message: A Simulation can't be un-terminated - rule: oldSelf != 'Terminated' || self == oldSelf - required: - - controlPlaneName - - desiredState - type: object - status: - description: SimulationStatus represents the observed state of a Simulation. - properties: - changes: - description: |- - Changes detected by the simulation. Only changes that happen while the - simulation is in the AcceptingChanges state are included. - items: - description: |- - A SimulationChange represents an object that changed while the simulation was - in the AcceptingChanges state. - properties: - change: - description: Change type. - enum: - - Unknown - - Create - - Update - - Delete - type: string - objectRef: - description: ObjectReference to the changed object. - properties: - apiVersion: - description: APIVersion of the changed resource. - type: string - kind: - description: Kind of the changed resource. - type: string - name: - description: Name of the changed resource. - type: string - namespace: - description: Namespace of the changed resource. - type: string - required: - - apiVersion - - kind - - name - type: object - required: - - change - - objectRef - type: object - type: array - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - controlPlaneData: - description: |- - ControlPlaneData exported from the source control plane and imported to - the simulated control plane. - properties: - exportTimestamp: - description: |- - ExportTimestamp is the time at which the source control plane's resources - were exported. Resources are exported to temporary storage before they're - imported to the simulated control plane. - format: date-time - type: string - importTimestamp: - description: |- - ImportTiemstamp is the time at which the source control plane's resources - were imported to the simulated control plane. - format: date-time - type: string - type: object - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - simulatedControlPlaneName: - description: |- - SimulatedControlPlaneName is the name of the control plane used to run - the simulation. - minLength: 1 - type: string - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.12/index.md b/static/crds/space/v1.12/index.md deleted file mode 100644 index 1be013c1f..000000000 --- a/static/crds/space/v1.12/index.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -title: Spaces API Reference v1.12 -description: Documentation for the Spaces API resources ---- -import CrdDocViewer from '@site/src/components/CrdViewer'; - -# Spaces API Reference - -This page documents the Custom Resource Definitions (CRDs) for the Spaces API. - -## Control Planes -no -### Control Planes - - -### Control Plane Overrides - - -## Observability -### Shared Telemetry Configs - - -## `pkg` -### Controller Revisions - - -### Controller Runtime Configs - - -### Controllers - - -### Remote Configuration Revisions - - -### Remote Configurations - - -## Policy -### Shared Upbound Policies - - -## References -### Referenced Objects - - -## Scheduling -### Environments - - -## Secrets -### Shared External Secrets - - -### Shared Secret Stores - - -## Simulations - - -## Spaces Backups - -### Backups - - - -### Backup Schedules - - - -### Shared Backup Configs - - - -### Shared Backups - - - -### Shared Backup Schedules - - - - diff --git a/static/crds/space/v1.12/observability.spaces.upbound.io_sharedtelemetryconfigs.yaml b/static/crds/space/v1.12/observability.spaces.upbound.io_sharedtelemetryconfigs.yaml deleted file mode 100644 index e119b699c..000000000 --- a/static/crds/space/v1.12/observability.spaces.upbound.io_sharedtelemetryconfigs.yaml +++ /dev/null @@ -1,401 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: sharedtelemetryconfigs.observability.spaces.upbound.io -spec: - group: observability.spaces.upbound.io - names: - categories: - - observability - kind: SharedTelemetryConfig - listKind: SharedTelemetryConfigList - plural: sharedtelemetryconfigs - shortNames: - - stc - singular: sharedtelemetryconfig - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.annotations.sharedtelemetryconfig\.internal\.spaces\.upbound\.io/selected - name: Selected - type: string - - jsonPath: .metadata.annotations.sharedtelemetryconfig\.internal\.spaces\.upbound\.io/failed - name: Failed - type: string - - jsonPath: .metadata.annotations.sharedtelemetryconfig\.internal\.spaces\.upbound\.io/provisioned - name: Provisioned - type: string - - jsonPath: .status.conditions[?(@.type=='Validated')].status - name: Validated - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: SharedTelemetryConfig defines a telemetry configuration over - a set of ControlPlanes. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: SharedTelemetryConfigSpec defines a telemetry configuration - over a set of ControlPlanes. - properties: - configPatchSecretRefs: - description: |- - ConfigPatchSecretRefs allows defining patches sourced from secrets to be - applied to the telemetry configuration. - items: - description: |- - ConfigPatchSecretRef defines a config patch sourced from a secret to be - applied to the telemetry configuration. - properties: - key: - description: Key in the secret from which to source the patch. - type: string - name: - description: Name of the secret. - type: string - path: - description: |- - Path to the field in the telemetry configuration to patch. - Currently, we only support patching exporters, so the path - needs to start with "exporters". - type: string - x-kubernetes-validations: - - message: Only 'exporters' patching is supported, path must - start with 'exporters.' - rule: self.startsWith('exporters.') - required: - - key - - name - - path - type: object - type: array - controlPlaneSelector: - description: |- - ControlPlaneSelector defines the selector for ControlPlanes on which to - configure telemetry. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: either names or a labelSelector must be specified - rule: (has(self.labelSelectors) || has(self.names)) && (size(self.labelSelectors) - > 0 || size(self.names) > 0) - exportPipeline: - description: |- - ExportPipeline defines the telemetry exporter pipeline to configure on - the selected ControlPlanes. - properties: - logs: - description: |- - Logs defines the logs exporter pipeline to configure on the - selected ControlPlanes. The value has to be present in the - spec.exporters field. - items: - type: string - maxItems: 10 - type: array - metrics: - description: |- - Metrics defines the metrics exporter pipeline to configure on the - selected ControlPlanes. The value has to be present in the - spec.exporters field. - items: - type: string - maxItems: 10 - type: array - traces: - description: |- - Traces defines the traces exporter pipeline to configure on the - selected ControlPlanes. The value has to be present in the - spec.exporters field. - items: - type: string - maxItems: 10 - type: array - type: object - exporters: - description: |- - Exporters defines the exporters to configure on the selected ControlPlanes. - Untyped as we use the underlying OpenTelemetryOperator to configure the - OpenTelemetry collector's exporters. Use the OpenTelemetry Collector - documentation to configure the exporters. - Currently only supported exporters are push based exporters. - type: object - x-kubernetes-preserve-unknown-fields: true - processorPipeline: - description: |- - ProcessorPipeline defines the telemetry processor pipeline to configure on - the selected ControlPlanes. - properties: - logs: - description: |- - Logs defines the logs exporter pipeline to configure on the - selected ControlPlanes. The value has to be present in the - spec.exporters field. - items: - type: string - maxItems: 10 - type: array - metrics: - description: |- - Metrics defines the metrics exporter pipeline to configure on the - selected ControlPlanes. The value has to be present in the - spec.exporters field. - items: - type: string - maxItems: 10 - type: array - traces: - description: |- - Traces defines the traces exporter pipeline to configure on the - selected ControlPlanes. The value has to be present in the - spec.exporters field. - items: - type: string - maxItems: 10 - type: array - type: object - processors: - description: |- - Processors defines the processors to configure on the selected ControlPlanes. - Untyped as we use the underlying OpenTelemetryOperator to configure the - OpenTelemetry collector's processors. Use the OpenTelemetry Collector - documentation to configure the processors. - type: object - x-kubernetes-preserve-unknown-fields: true - required: - - controlPlaneSelector - - exportPipeline - - exporters - type: object - status: - description: SharedTelemetryConfigStatus represents the observed state - of a SharedTelemetryConfig. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - failed: - description: list of provisioning failures. - items: - description: SharedTelemetryConfigProvisioningFailure defines configuration - provisioning failure. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition - from one status to another. - type: string - status: - description: Status of this condition; is it currently - True, False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - controlPlane: - description: ControlPlane name where the failure occurred. - type: string - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - required: - - controlPlane - type: object - type: array - x-kubernetes-list-map-keys: - - controlPlane - x-kubernetes-list-type: map - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - provisioned: - description: List of successfully provisioned targets. - items: - type: string - type: array - x-kubernetes-list-type: set - selectedControlPlanes: - description: SelectedControlPlanes represents the names of the selected - ControlPlanes. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.12/pkg.upbound.io_controllerrevisions.yaml b/static/crds/space/v1.12/pkg.upbound.io_controllerrevisions.yaml deleted file mode 100644 index 3eb893d66..000000000 --- a/static/crds/space/v1.12/pkg.upbound.io_controllerrevisions.yaml +++ /dev/null @@ -1,331 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: controllerrevisions.pkg.upbound.io -spec: - group: pkg.upbound.io - names: - categories: - - upbound - - pkgrev - kind: ControllerRevision - listKind: ControllerRevisionList - plural: controllerrevisions - singular: controllerrevision - scope: Cluster - versions: - - additionalPrinterColumns: - - jsonPath: .status.conditions[?(@.type=='Healthy')].status - name: HEALTHY - type: string - - jsonPath: .spec.revision - name: REVISION - type: string - - jsonPath: .spec.image - name: IMAGE - type: string - - jsonPath: .spec.desiredState - name: STATE - type: string - - jsonPath: .status.foundDependencies - name: DEP-FOUND - type: string - - jsonPath: .status.installedDependencies - name: DEP-INSTALLED - type: string - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: An ControllerRevision represents a revision of an Controller. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: ControllerRevisionSpec specifies the configuration of an - ControllerRevision. - properties: - commonLabels: - additionalProperties: - type: string - description: |- - Map of string keys and values that can be used to organize and categorize - (scope and select) objects. May match selectors of replication controllers - and services. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - type: object - desiredState: - description: DesiredState of the PackageRevision. Can be either Active - or Inactive. - type: string - helm: - description: |- - Helm specific configuration for a controller revision. This field is - managed by the controller and should not be modified directly. - properties: - crdRefs: - description: CRDRefs is a list of CRDs that deployed by this controller. - items: - type: string - type: array - releaseName: - description: ReleaseName is the name of the Helm release. - type: string - releaseNamespace: - description: ReleaseNamespace is the namespace of the Helm release. - type: string - required: - - releaseName - - releaseNamespace - type: object - ignoreCrossplaneConstraints: - default: false - description: |- - IgnoreCrossplaneConstraints indicates to the package manager whether to - honor Crossplane version constrains specified by the package. - Default is false. - type: boolean - image: - description: Package image used by install Pod to extract package - contents. - type: string - packagePullPolicy: - default: IfNotPresent - description: |- - PackagePullPolicy defines the pull policy for the package. It is also - applied to any images pulled for the package, such as a provider's - controller image. - Default is IfNotPresent. - type: string - packagePullSecrets: - description: |- - PackagePullSecrets are named secrets in the same namespace that can be - used to fetch packages from private registries. They are also applied to - any images pulled for the package, such as a provider's controller image. - items: - description: |- - LocalObjectReference contains enough information to let you locate the - referenced object inside the same namespace. - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - type: array - revision: - description: |- - Revision number. Indicates when the revision will be garbage collected - based on the parent's RevisionHistoryLimit. - format: int64 - type: integer - runtimeConfigRef: - default: - name: default - description: |- - RuntimeConfigRef references a RuntimeConfig resource that will be used - to configure the package runtime. - properties: - apiVersion: - default: pkg.upbound.io/v1alpha1 - description: API version of the referent. - type: string - kind: - default: ControllerRuntimeConfig - description: Kind of the referent. - type: string - name: - description: Name of the RuntimeConfig. - type: string - required: - - name - type: object - skipDependencyResolution: - default: false - description: |- - SkipDependencyResolution indicates to the package manager whether to skip - resolving dependencies for a package. Setting this value to true may have - unintended consequences. - Default is false. - type: boolean - required: - - desiredState - - image - - revision - type: object - x-kubernetes-validations: - - message: helm specification is immutable - rule: '!has(oldSelf.helm) || self.helm == oldSelf.helm' - status: - description: ControllerRevisionStatus represents the observed state of - an ControllerRevision. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - foundDependencies: - description: Dependency information. - format: int64 - type: integer - installedDependencies: - format: int64 - type: integer - invalidDependencies: - format: int64 - type: integer - objectRefs: - description: References to objects owned by PackageRevision. - items: - description: |- - A TypedReference refers to an object by Name, Kind, and APIVersion. It is - commonly used to reference cluster-scoped objects or objects where the - namespace is already known. - properties: - apiVersion: - description: APIVersion of the referenced object. - type: string - kind: - description: Kind of the referenced object. - type: string - name: - description: Name of the referenced object. - type: string - uid: - description: UID of the referenced object. - type: string - required: - - apiVersion - - kind - - name - type: object - type: array - permissionRequests: - description: |- - PermissionRequests made by this package. The package declares that its - controller needs these permissions to run. The RBAC manager is - responsible for granting them. - items: - description: |- - PolicyRule holds information that describes a policy rule, but does not contain information - about who the rule applies to or which namespace the rule applies to. - properties: - apiGroups: - description: |- - APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of - the enumerated resources in any API group will be allowed. "" represents the core API group and "*" represents all API groups. - items: - type: string - type: array - x-kubernetes-list-type: atomic - nonResourceURLs: - description: |- - NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path - Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. - Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. - items: - type: string - type: array - x-kubernetes-list-type: atomic - resourceNames: - description: ResourceNames is an optional white list of names - that the rule applies to. An empty set means that everything - is allowed. - items: - type: string - type: array - x-kubernetes-list-type: atomic - resources: - description: Resources is a list of resources this rule applies - to. '*' represents all resources. - items: - type: string - type: array - x-kubernetes-list-type: atomic - verbs: - description: Verbs is a list of Verbs that apply to ALL the - ResourceKinds contained in this rule. '*' represents all verbs. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - verbs - type: object - type: array - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.12/pkg.upbound.io_controllerruntimeconfigs.yaml b/static/crds/space/v1.12/pkg.upbound.io_controllerruntimeconfigs.yaml deleted file mode 100644 index 70c5ef3b6..000000000 --- a/static/crds/space/v1.12/pkg.upbound.io_controllerruntimeconfigs.yaml +++ /dev/null @@ -1,65 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: controllerruntimeconfigs.pkg.upbound.io -spec: - group: pkg.upbound.io - names: - categories: - - upbound - kind: ControllerRuntimeConfig - listKind: ControllerRuntimeConfigList - plural: controllerruntimeconfigs - singular: controllerruntimeconfig - scope: Cluster - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: |- - An ControllerRuntimeConfig provides settings for configuring runtime of a - controller package. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: ControllerRuntimeConfigSpec specifies the configuration of - an ControllerRuntimeConfig. - properties: - helm: - description: HelmConfigSpec defines the Helm-specific configuration - for a controller runtime. - properties: - values: - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - type: object - required: - - spec - type: object - served: true - storage: true - subresources: {} diff --git a/static/crds/space/v1.12/pkg.upbound.io_controllers.yaml b/static/crds/space/v1.12/pkg.upbound.io_controllers.yaml deleted file mode 100644 index ff7060caf..000000000 --- a/static/crds/space/v1.12/pkg.upbound.io_controllers.yaml +++ /dev/null @@ -1,224 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: controllers.pkg.upbound.io -spec: - group: pkg.upbound.io - names: - categories: - - upbound - - pkg - kind: Controller - listKind: ControllerList - plural: controllers - singular: controller - scope: Cluster - versions: - - additionalPrinterColumns: - - jsonPath: .status.conditions[?(@.type=='Installed')].status - name: INSTALLED - type: string - - jsonPath: .status.conditions[?(@.type=='Healthy')].status - name: HEALTHY - type: string - - jsonPath: .spec.package - name: PACKAGE - type: string - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: |- - A Controller installs an OCI compatible Upbound package, extending a Control - Plane with new capabilities. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: ControllerSpec specifies the configuration of a Controller. - properties: - commonLabels: - additionalProperties: - type: string - description: |- - Map of string keys and values that can be used to organize and categorize - (scope and select) objects. May match selectors of replication controllers - and services. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - type: object - ignoreCrossplaneConstraints: - default: false - description: |- - IgnoreCrossplaneConstraints indicates to the package manager whether to - honor Crossplane version constrains specified by the package. - Default is false. - type: boolean - package: - description: Package is the name of the package that is being requested. - type: string - packagePullPolicy: - default: IfNotPresent - description: |- - PackagePullPolicy defines the pull policy for the package. - Default is IfNotPresent. - type: string - packagePullSecrets: - description: |- - PackagePullSecrets are named secrets in the same namespace that can be used - to fetch packages from private registries. - items: - description: |- - LocalObjectReference contains enough information to let you locate the - referenced object inside the same namespace. - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - type: array - revisionActivationPolicy: - default: Automatic - description: |- - RevisionActivationPolicy specifies how the package controller should - update from one revision to the next. Options are Automatic or Manual. - Default is Automatic. - type: string - revisionHistoryLimit: - default: 1 - description: |- - RevisionHistoryLimit dictates how the package controller cleans up old - inactive package revisions. - Defaults to 1. Can be disabled by explicitly setting to 0. - format: int64 - type: integer - runtimeConfigRef: - default: - name: default - description: |- - RuntimeConfigRef references a RuntimeConfig resource that will be used - to configure the package runtime. - properties: - apiVersion: - default: pkg.upbound.io/v1alpha1 - description: API version of the referent. - type: string - kind: - default: ControllerRuntimeConfig - description: Kind of the referent. - type: string - name: - description: Name of the RuntimeConfig. - type: string - required: - - name - type: object - skipDependencyResolution: - default: false - description: |- - SkipDependencyResolution indicates to the package manager whether to skip - resolving dependencies for a package. Setting this value to true may have - unintended consequences. - Default is false. - type: boolean - required: - - package - type: object - status: - description: ControllerStatus represents the observed state of an Controller. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - currentIdentifier: - description: |- - CurrentIdentifier is the most recent package source that was used to - produce a revision. The package manager uses this field to determine - whether to check for package updates for a given source when - packagePullPolicy is set to IfNotPresent. Manually removing this field - will cause the package manager to check that the current revision is - correct for the given package source. - type: string - currentRevision: - description: |- - CurrentRevision is the name of the current package revision. It will - reflect the most up to date revision, whether it has been activated or - not. - type: string - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.12/pkg.upbound.io_remoteconfigurationrevisions.yaml b/static/crds/space/v1.12/pkg.upbound.io_remoteconfigurationrevisions.yaml deleted file mode 100644 index 51e8fb230..000000000 --- a/static/crds/space/v1.12/pkg.upbound.io_remoteconfigurationrevisions.yaml +++ /dev/null @@ -1,281 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: remoteconfigurationrevisions.pkg.upbound.io -spec: - group: pkg.upbound.io - names: - categories: - - upbound - - pkgrev - kind: RemoteConfigurationRevision - listKind: RemoteConfigurationRevisionList - plural: remoteconfigurationrevisions - singular: remoteconfigurationrevision - scope: Cluster - versions: - - additionalPrinterColumns: - - jsonPath: .status.conditions[?(@.type=='Healthy')].status - name: HEALTHY - type: string - - jsonPath: .spec.revision - name: REVISION - type: string - - jsonPath: .spec.image - name: IMAGE - type: string - - jsonPath: .spec.desiredState - name: STATE - type: string - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: An RemoteConfigurationRevision represents a revision of an RemoteConfiguration. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: RemoteConfigurationRevisionSpec specifies the configuration - of an RemoteConfigurationRevision. - properties: - commonLabels: - additionalProperties: - type: string - description: |- - Map of string keys and values that can be used to organize and categorize - (scope and select) objects. May match selectors of replication controllers - and services. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - type: object - desiredState: - description: DesiredState of the PackageRevision. Can be either Active - or Inactive. - type: string - ignoreCrossplaneConstraints: - default: false - description: |- - IgnoreCrossplaneConstraints indicates to the package manager whether to - honor Crossplane version constrains specified by the package. - Default is false. - type: boolean - image: - description: Package image used by install Pod to extract package - contents. - type: string - packagePullPolicy: - default: IfNotPresent - description: |- - PackagePullPolicy defines the pull policy for the package. It is also - applied to any images pulled for the package, such as a provider's - controller image. - Default is IfNotPresent. - type: string - packagePullSecrets: - description: |- - PackagePullSecrets are named secrets in the same namespace that can be - used to fetch packages from private registries. They are also applied to - any images pulled for the package, such as a provider's controller image. - items: - description: |- - LocalObjectReference contains enough information to let you locate the - referenced object inside the same namespace. - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - type: array - revision: - description: |- - Revision number. Indicates when the revision will be garbage collected - based on the parent's RevisionHistoryLimit. - format: int64 - type: integer - skipDependencyResolution: - default: true - description: SkipDependencyResolution indicates to the package manager - whether to skip resolving dependencies for a package. This is always - true for RemoteConfigurationRevision. - type: boolean - required: - - desiredState - - image - - revision - type: object - x-kubernetes-validations: - - message: skipDependencyResolution must be true - rule: self.skipDependencyResolution - status: - description: RemoteConfigurationRevisionStatus represents the observed - state of an RemoteConfigurationRevision. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - foundDependencies: - description: Dependency information. - format: int64 - type: integer - installedDependencies: - format: int64 - type: integer - invalidDependencies: - format: int64 - type: integer - objectRefs: - description: References to objects owned by PackageRevision. - items: - description: |- - A TypedReference refers to an object by Name, Kind, and APIVersion. It is - commonly used to reference cluster-scoped objects or objects where the - namespace is already known. - properties: - apiVersion: - description: APIVersion of the referenced object. - type: string - kind: - description: Kind of the referenced object. - type: string - name: - description: Name of the referenced object. - type: string - uid: - description: UID of the referenced object. - type: string - required: - - apiVersion - - kind - - name - type: object - type: array - permissionRequests: - description: |- - PermissionRequests made by this package. The package declares that its - controller needs these permissions to run. The RBAC manager is - responsible for granting them. - items: - description: |- - PolicyRule holds information that describes a policy rule, but does not contain information - about who the rule applies to or which namespace the rule applies to. - properties: - apiGroups: - description: |- - APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of - the enumerated resources in any API group will be allowed. "" represents the core API group and "*" represents all API groups. - items: - type: string - type: array - x-kubernetes-list-type: atomic - nonResourceURLs: - description: |- - NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path - Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. - Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. - items: - type: string - type: array - x-kubernetes-list-type: atomic - resourceNames: - description: ResourceNames is an optional white list of names - that the rule applies to. An empty set means that everything - is allowed. - items: - type: string - type: array - x-kubernetes-list-type: atomic - resources: - description: Resources is a list of resources this rule applies - to. '*' represents all resources. - items: - type: string - type: array - x-kubernetes-list-type: atomic - verbs: - description: Verbs is a list of Verbs that apply to ALL the - ResourceKinds contained in this rule. '*' represents all verbs. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - verbs - type: object - type: array - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.12/pkg.upbound.io_remoteconfigurations.yaml b/static/crds/space/v1.12/pkg.upbound.io_remoteconfigurations.yaml deleted file mode 100644 index 28100e576..000000000 --- a/static/crds/space/v1.12/pkg.upbound.io_remoteconfigurations.yaml +++ /dev/null @@ -1,205 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: remoteconfigurations.pkg.upbound.io -spec: - group: pkg.upbound.io - names: - categories: - - upbound - - pkg - kind: RemoteConfiguration - listKind: RemoteConfigurationList - plural: remoteconfigurations - singular: remoteconfiguration - scope: Cluster - versions: - - additionalPrinterColumns: - - jsonPath: .status.conditions[?(@.type=='Installed')].status - name: INSTALLED - type: string - - jsonPath: .status.conditions[?(@.type=='Healthy')].status - name: HEALTHY - type: string - - jsonPath: .spec.package - name: PACKAGE - type: string - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: |- - A RemoteConfiguration installs an OCI compatible configuration package in - remote mode by creating the claim CustomResourceDefinitions only. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: RemoteConfigurationSpec specifies the configuration of a - RemoteConfiguration.. - properties: - commonLabels: - additionalProperties: - type: string - description: |- - Map of string keys and values that can be used to organize and categorize - (scope and select) objects. May match selectors of replication controllers - and services. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - type: object - ignoreCrossplaneConstraints: - default: false - description: |- - IgnoreCrossplaneConstraints indicates to the package manager whether to - honor Crossplane version constrains specified by the package. - Default is false. - type: boolean - package: - description: Package is the name of the package that is being requested. - type: string - packagePullPolicy: - default: IfNotPresent - description: |- - PackagePullPolicy defines the pull policy for the package. - Default is IfNotPresent. - type: string - packagePullSecrets: - description: |- - PackagePullSecrets are named secrets in the same namespace that can be used - to fetch packages from private registries. - items: - description: |- - LocalObjectReference contains enough information to let you locate the - referenced object inside the same namespace. - properties: - name: - default: "" - description: |- - Name of the referent. - This field is effectively required, but due to backwards compatibility is - allowed to be empty. Instances of this type with an empty value here are - almost certainly wrong. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - type: object - x-kubernetes-map-type: atomic - type: array - revisionActivationPolicy: - default: Automatic - description: |- - RevisionActivationPolicy specifies how the package controller should - update from one revision to the next. Options are Automatic or Manual. - Default is Automatic. - type: string - revisionHistoryLimit: - default: 1 - description: |- - RevisionHistoryLimit dictates how the package controller cleans up old - inactive package revisions. - Defaults to 1. Can be disabled by explicitly setting to 0. - format: int64 - type: integer - skipDependencyResolution: - default: true - description: SkipDependencyResolution indicates to the package manager - whether to skip resolving dependencies for a package. This is always - true for RemoteConfiguration. - type: boolean - required: - - package - type: object - x-kubernetes-validations: - - message: skipDependencyResolution must be true - rule: self.skipDependencyResolution - status: - description: RemoteConfigurationStatus represents the observed state of - a RemoteConfiguration. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - currentIdentifier: - description: |- - CurrentIdentifier is the most recent package source that was used to - produce a revision. The package manager uses this field to determine - whether to check for package updates for a given source when - packagePullPolicy is set to IfNotPresent. Manually removing this field - will cause the package manager to check that the current revision is - correct for the given package source. - type: string - currentRevision: - description: |- - CurrentRevision is the name of the current package revision. It will - reflect the most up to date revision, whether it has been activated or - not. - type: string - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.12/policy.spaces.upbound.io_sharedupboundpolicies.yaml b/static/crds/space/v1.12/policy.spaces.upbound.io_sharedupboundpolicies.yaml deleted file mode 100644 index 30f732e75..000000000 --- a/static/crds/space/v1.12/policy.spaces.upbound.io_sharedupboundpolicies.yaml +++ /dev/null @@ -1,4303 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: sharedupboundpolicies.policy.spaces.upbound.io -spec: - group: policy.spaces.upbound.io - names: - categories: - - policies - kind: SharedUpboundPolicy - listKind: SharedUpboundPolicyList - plural: sharedupboundpolicies - shortNames: - - sup - singular: sharedupboundpolicy - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.annotations.sharedupboundpolicies\.internal\.spaces\.upbound\.io/provisioned-total - name: Provisioned - type: string - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: |- - SharedUpboundPolicy specifies a shared Kyverno policy projected into the specified - ControlPlanes of the same namespace as SharedUpboundPolicy. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: SharedUpboundPolicySpec defines the desired state of SharedUpboundPolicy. - properties: - admission: - default: true - description: |- - Admission controls if rules are applied during admission. - Optional. Default value is "true". - type: boolean - applyRules: - description: |- - ApplyRules controls how rules in a policy are applied. Rule are processed in - the order of declaration. When set to `One` processing stops after a rule has - been applied i.e. the rule matches and results in a pass, fail, or error. When - set to `All` all rules in the policy are processed. The default is `All`. - enum: - - All - - One - type: string - background: - default: true - description: |- - Background controls if rules are applied to existing resources during a background scan. - Optional. Default value is "true". The value must be set to "false" if the policy rule - uses variables that are only available in the admission review request (e.g. user name). - type: boolean - controlPlaneSelector: - description: |- - The policy is projected only to control planes - matching the provided selector. Either names or a labelSelector must be specified. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: either names or a labelSelector must be specified - rule: (has(self.labelSelectors) || has(self.names)) && (size(self.labelSelectors) - > 0 || size(self.names) > 0) - failurePolicy: - description: |- - FailurePolicy defines how unexpected policy errors and webhook response timeout errors are handled. - Rules within the same policy share the same failure behavior. - This field should not be accessed directly, instead `GetFailurePolicy()` should be used. - Allowed values are Ignore or Fail. Defaults to Fail. - enum: - - Ignore - - Fail - type: string - generateExisting: - description: |- - GenerateExisting controls whether to trigger generate rule in existing resources - If is set to "true" generate rule will be triggered and applied to existing matched resources. - Defaults to "false" if not specified. - type: boolean - generateExistingOnPolicyUpdate: - description: Deprecated, use generateExisting instead - type: boolean - mutateExistingOnPolicyUpdate: - description: |- - MutateExistingOnPolicyUpdate controls if a mutateExisting policy is applied on policy events. - Default value is "false". - type: boolean - policyMetadata: - description: The metadata of the policy to be created. - properties: - annotations: - additionalProperties: - type: string - description: Annotations that are set on projected resource. - type: object - labels: - additionalProperties: - type: string - description: Labels that are set on projected resource. - type: object - type: object - policyName: - description: |- - PolicyName is the name to use when creating policy within a control plane. - optional, if not set, SharedUpboundPolicy name will be used. - When set, it is immutable. - maxLength: 253 - minLength: 1 - type: string - x-kubernetes-validations: - - message: policyName is immutable - rule: self == oldSelf - rules: - description: |- - Rules is a list of Rule instances. A Policy contains multiple rules and - each rule can validate, mutate, or generate resources. - items: - description: |- - Rule defines a validation, mutation, or generation control for matching resources. - Each rules contains a match declaration to select resources, and an optional exclude - declaration to specify which resources to exclude. - properties: - celPreconditions: - description: |- - CELPreconditions are used to determine if a policy rule should be applied by evaluating a - set of CEL conditions. It can only be used with the validate.cel subrule - items: - description: MatchCondition represents a condition which must - by fulfilled for a request to be sent to a webhook. - properties: - expression: - description: |- - Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. - CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables: - - 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. - See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the - request resource. - Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ - - Required. - type: string - name: - description: |- - Name is an identifier for this match condition, used for strategic merging of MatchConditions, - as well as providing an identifier for logging purposes. A good name should be descriptive of - the associated expression. - Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and - must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or - '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an - optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName') - - Required. - type: string - required: - - expression - - name - type: object - type: array - context: - description: Context defines variables and data sources that - can be used during rule execution. - items: - description: |- - ContextEntry adds variables and data sources to a rule Context. Either a - ConfigMap reference or a APILookup must be provided. - properties: - apiCall: - description: |- - APICall is an HTTP request to the Kubernetes API server, or other JSON web service. - The data returned is stored in the context with the name for the context entry. - properties: - data: - description: Data specifies the POST data sent to - the server. - items: - description: RequestData contains the HTTP POST - data - properties: - key: - description: Key is a unique identifier for - the data value - type: string - value: - description: Value is the data value - x-kubernetes-preserve-unknown-fields: true - required: - - key - - value - type: object - type: array - jmesPath: - description: |- - JMESPath is an optional JSON Match Expression that can be used to - transform the JSON response returned from the server. For example - a JMESPath of "items | length(@)" applied to the API server response - for the URLPath "/apis/apps/v1/deployments" will return the total count - of deployments across all namespaces. - type: string - method: - default: GET - description: Method is the HTTP request type (GET - or POST). - enum: - - GET - - POST - type: string - service: - description: Service is an API call to a JSON web - service - properties: - caBundle: - description: |- - CABundle is a PEM encoded CA bundle which will be used to validate - the server certificate. - type: string - url: - description: |- - URL is the JSON web service URL. A typical form is - `https://{service}.{namespace}:{port}/{path}`. - type: string - required: - - url - type: object - urlPath: - description: |- - URLPath is the URL path to be used in the HTTP GET or POST request to the - Kubernetes API server (e.g. "/api/v1/namespaces" or "/apis/apps/v1/deployments"). - The format required is the same format used by the `kubectl get --raw` command. - See https://kyverno.io/docs/writing-policies/external-data-sources/#variables-from-kubernetes-api-server-calls - for details. - type: string - type: object - configMap: - description: ConfigMap is the ConfigMap reference. - properties: - name: - description: Name is the ConfigMap name. - type: string - namespace: - description: Namespace is the ConfigMap namespace. - type: string - required: - - name - type: object - imageRegistry: - description: |- - ImageRegistry defines requests to an OCI/Docker V2 registry to fetch image - details. - properties: - imageRegistryCredentials: - description: ImageRegistryCredentials provides credentials - that will be used for authentication with registry - properties: - allowInsecureRegistry: - description: AllowInsecureRegistry allows insecure - access to a registry. - type: boolean - providers: - description: |- - Providers specifies a list of OCI Registry names, whose authentication providers are provided. - It can be of one of these values: default,google,azure,amazon,github. - items: - description: ImageRegistryCredentialsProvidersType - provides the list of credential providers - required. - enum: - - default - - amazon - - azure - - google - - github - type: string - type: array - secrets: - description: |- - Secrets specifies a list of secrets that are provided for credentials. - Secrets must live in the Kyverno namespace. - items: - type: string - type: array - type: object - jmesPath: - description: |- - JMESPath is an optional JSON Match Expression that can be used to - transform the ImageData struct returned as a result of processing - the image reference. - type: string - reference: - description: |- - Reference is image reference to a container image in the registry. - Example: ghcr.io/kyverno/kyverno:latest - type: string - required: - - reference - type: object - name: - description: Name is the variable name. - type: string - variable: - description: Variable defines an arbitrary JMESPath context - variable that can be defined inline. - properties: - default: - description: |- - Default is an optional arbitrary JSON object that the variable may take if the JMESPath - expression evaluates to nil - x-kubernetes-preserve-unknown-fields: true - jmesPath: - description: |- - JMESPath is an optional JMESPath Expression that can be used to - transform the variable. - type: string - value: - description: Value is any arbitrary JSON object representable - in YAML or JSON form. - x-kubernetes-preserve-unknown-fields: true - type: object - type: object - type: array - exclude: - description: |- - ExcludeResources defines when this policy rule should not be applied. The exclude - criteria can include resource information (e.g. kind, name, namespace, labels) - and admission review request information like the name or role. - properties: - all: - description: All allows specifying resources which will - be ANDed - items: - description: ResourceFilter allow users to "AND" or "OR" - between resources - properties: - clusterRoles: - description: ClusterRoles is the list of cluster-wide - role names for the user. - items: - type: string - type: array - resources: - description: ResourceDescription contains information - about the resource being created or modified. - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations is a map of annotations (key-value pairs of type string). Annotation keys - and values support the wildcard characters "*" (matches zero or many characters) and - "?" (matches at least one character). - type: object - kinds: - description: Kinds is a list of resource kinds. - items: - type: string - type: array - name: - description: |- - Name is the name of the resource. The name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - NOTE: "Name" is being deprecated in favor of "Names". - type: string - names: - description: |- - Names are the names of the resources. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - namespaceSelector: - description: |- - NamespaceSelector is a label selector for the resource namespace. Label keys and values - in `matchLabels` support the wildcard characters `*` (matches zero or many characters) - and `?` (matches one character).Wildcards allows writing label selectors like - ["storage.k8s.io/*": "*"]. Note that using ["*" : "*"] matches any key and value but - does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - Namespaces is a list of namespaces names. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - operations: - description: Operations can contain values ["CREATE, - "UPDATE", "CONNECT", "DELETE"], which are used - to match a specific action. - items: - description: AdmissionOperation can have one - of the values CREATE, UPDATE, CONNECT, DELETE, - which are used to match a specific action. - enum: - - CREATE - - CONNECT - - UPDATE - - DELETE - type: string - type: array - selector: - description: |- - Selector is a label selector. Label keys and values in `matchLabels` support the wildcard - characters `*` (matches zero or many characters) and `?` (matches one character). - Wildcards allows writing label selectors like ["storage.k8s.io/*": "*"]. Note that - using ["*" : "*"] matches any key and value but does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: object - roles: - description: Roles is the list of namespaced role - names for the user. - items: - type: string - type: array - subjects: - description: Subjects is the list of subject names - like users, user groups, and service accounts. - items: - description: |- - Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, - or a value for non-objects such as user and group names. - properties: - apiGroup: - description: |- - APIGroup holds the API group of the referenced subject. - Defaults to "" for ServiceAccount subjects. - Defaults to "rbac.authorization.k8s.io" for User and Group subjects. - type: string - kind: - description: |- - Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". - If the Authorizer does not recognized the kind value, the Authorizer should report an error. - type: string - name: - description: Name of the object being referenced. - type: string - namespace: - description: |- - Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty - the Authorizer should report an error. - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - type: array - type: object - type: array - any: - description: Any allows specifying resources which will - be ORed - items: - description: ResourceFilter allow users to "AND" or "OR" - between resources - properties: - clusterRoles: - description: ClusterRoles is the list of cluster-wide - role names for the user. - items: - type: string - type: array - resources: - description: ResourceDescription contains information - about the resource being created or modified. - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations is a map of annotations (key-value pairs of type string). Annotation keys - and values support the wildcard characters "*" (matches zero or many characters) and - "?" (matches at least one character). - type: object - kinds: - description: Kinds is a list of resource kinds. - items: - type: string - type: array - name: - description: |- - Name is the name of the resource. The name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - NOTE: "Name" is being deprecated in favor of "Names". - type: string - names: - description: |- - Names are the names of the resources. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - namespaceSelector: - description: |- - NamespaceSelector is a label selector for the resource namespace. Label keys and values - in `matchLabels` support the wildcard characters `*` (matches zero or many characters) - and `?` (matches one character).Wildcards allows writing label selectors like - ["storage.k8s.io/*": "*"]. Note that using ["*" : "*"] matches any key and value but - does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - Namespaces is a list of namespaces names. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - operations: - description: Operations can contain values ["CREATE, - "UPDATE", "CONNECT", "DELETE"], which are used - to match a specific action. - items: - description: AdmissionOperation can have one - of the values CREATE, UPDATE, CONNECT, DELETE, - which are used to match a specific action. - enum: - - CREATE - - CONNECT - - UPDATE - - DELETE - type: string - type: array - selector: - description: |- - Selector is a label selector. Label keys and values in `matchLabels` support the wildcard - characters `*` (matches zero or many characters) and `?` (matches one character). - Wildcards allows writing label selectors like ["storage.k8s.io/*": "*"]. Note that - using ["*" : "*"] matches any key and value but does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: object - roles: - description: Roles is the list of namespaced role - names for the user. - items: - type: string - type: array - subjects: - description: Subjects is the list of subject names - like users, user groups, and service accounts. - items: - description: |- - Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, - or a value for non-objects such as user and group names. - properties: - apiGroup: - description: |- - APIGroup holds the API group of the referenced subject. - Defaults to "" for ServiceAccount subjects. - Defaults to "rbac.authorization.k8s.io" for User and Group subjects. - type: string - kind: - description: |- - Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". - If the Authorizer does not recognized the kind value, the Authorizer should report an error. - type: string - name: - description: Name of the object being referenced. - type: string - namespace: - description: |- - Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty - the Authorizer should report an error. - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - type: array - type: object - type: array - clusterRoles: - description: ClusterRoles is the list of cluster-wide role - names for the user. - items: - type: string - type: array - resources: - description: |- - ResourceDescription contains information about the resource being created or modified. - Requires at least one tag to be specified when under MatchResources. - Specifying ResourceDescription directly under match is being deprecated. - Please specify under "any" or "all" instead. - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations is a map of annotations (key-value pairs of type string). Annotation keys - and values support the wildcard characters "*" (matches zero or many characters) and - "?" (matches at least one character). - type: object - kinds: - description: Kinds is a list of resource kinds. - items: - type: string - type: array - name: - description: |- - Name is the name of the resource. The name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - NOTE: "Name" is being deprecated in favor of "Names". - type: string - names: - description: |- - Names are the names of the resources. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - namespaceSelector: - description: |- - NamespaceSelector is a label selector for the resource namespace. Label keys and values - in `matchLabels` support the wildcard characters `*` (matches zero or many characters) - and `?` (matches one character).Wildcards allows writing label selectors like - ["storage.k8s.io/*": "*"]. Note that using ["*" : "*"] matches any key and value but - does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - Namespaces is a list of namespaces names. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - operations: - description: Operations can contain values ["CREATE, - "UPDATE", "CONNECT", "DELETE"], which are used to - match a specific action. - items: - description: AdmissionOperation can have one of the - values CREATE, UPDATE, CONNECT, DELETE, which are - used to match a specific action. - enum: - - CREATE - - CONNECT - - UPDATE - - DELETE - type: string - type: array - selector: - description: |- - Selector is a label selector. Label keys and values in `matchLabels` support the wildcard - characters `*` (matches zero or many characters) and `?` (matches one character). - Wildcards allows writing label selectors like ["storage.k8s.io/*": "*"]. Note that - using ["*" : "*"] matches any key and value but does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: object - roles: - description: Roles is the list of namespaced role names - for the user. - items: - type: string - type: array - subjects: - description: Subjects is the list of subject names like - users, user groups, and service accounts. - items: - description: |- - Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, - or a value for non-objects such as user and group names. - properties: - apiGroup: - description: |- - APIGroup holds the API group of the referenced subject. - Defaults to "" for ServiceAccount subjects. - Defaults to "rbac.authorization.k8s.io" for User and Group subjects. - type: string - kind: - description: |- - Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". - If the Authorizer does not recognized the kind value, the Authorizer should report an error. - type: string - name: - description: Name of the object being referenced. - type: string - namespace: - description: |- - Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty - the Authorizer should report an error. - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - type: array - type: object - generate: - description: Generation is used to create new resources. - properties: - apiVersion: - description: APIVersion specifies resource apiVersion. - type: string - clone: - description: |- - Clone specifies the source resource used to populate each generated resource. - At most one of Data or Clone can be specified. If neither are provided, the generated - resource will be created with default data only. - properties: - name: - description: Name specifies name of the resource. - type: string - namespace: - description: Namespace specifies source resource namespace. - type: string - type: object - cloneList: - description: CloneList specifies the list of source resource - used to populate each generated resource. - properties: - kinds: - description: Kinds is a list of resource kinds. - items: - type: string - type: array - namespace: - description: Namespace specifies source resource namespace. - type: string - selector: - description: |- - Selector is a label selector. Label keys and values in `matchLabels`. - wildcard characters are not supported. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: object - data: - description: |- - Data provides the resource declaration used to populate each generated resource. - At most one of Data or Clone must be specified. If neither are provided, the generated - resource will be created with default data only. - x-kubernetes-preserve-unknown-fields: true - kind: - description: Kind specifies resource kind. - type: string - name: - description: Name specifies the resource name. - type: string - namespace: - description: Namespace specifies resource namespace. - type: string - synchronize: - description: |- - Synchronize controls if generated resources should be kept in-sync with their source resource. - If Synchronize is set to "true" changes to generated resources will be overwritten with resource - data from Data or the resource specified in the Clone declaration. - Optional. Defaults to "false" if not specified. - type: boolean - uid: - description: UID specifies the resource uid. - type: string - type: object - imageExtractors: - additionalProperties: - items: - properties: - jmesPath: - description: |- - JMESPath is an optional JMESPath expression to apply to the image value. - This is useful when the extracted image begins with a prefix like 'docker://'. - The 'trim_prefix' function may be used to trim the prefix: trim_prefix(@, 'docker://'). - Note - Image digest mutation may not be used when applying a JMESPAth to an image. - type: string - key: - description: |- - Key is an optional name of the field within 'path' that will be used to uniquely identify an image. - Note - this field MUST be unique. - type: string - name: - description: |- - Name is the entry the image will be available under 'images.' in the context. - If this field is not defined, image entries will appear under 'images.custom'. - type: string - path: - description: |- - Path is the path to the object containing the image field in a custom resource. - It should be slash-separated. Each slash-separated key must be a valid YAML key or a wildcard '*'. - Wildcard keys are expanded in case of arrays or objects. - type: string - value: - description: |- - Value is an optional name of the field within 'path' that points to the image URI. - This is useful when a custom 'key' is also defined. - type: string - required: - - path - type: object - type: array - description: |- - ImageExtractors defines a mapping from kinds to ImageExtractorConfigs. - This config is only valid for verifyImages rules. - type: object - match: - description: |- - MatchResources defines when this policy rule should be applied. The match - criteria can include resource information (e.g. kind, name, namespace, labels) - and admission review request information like the user name or role. - At least one kind is required. - properties: - all: - description: All allows specifying resources which will - be ANDed - items: - description: ResourceFilter allow users to "AND" or "OR" - between resources - properties: - clusterRoles: - description: ClusterRoles is the list of cluster-wide - role names for the user. - items: - type: string - type: array - resources: - description: ResourceDescription contains information - about the resource being created or modified. - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations is a map of annotations (key-value pairs of type string). Annotation keys - and values support the wildcard characters "*" (matches zero or many characters) and - "?" (matches at least one character). - type: object - kinds: - description: Kinds is a list of resource kinds. - items: - type: string - type: array - name: - description: |- - Name is the name of the resource. The name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - NOTE: "Name" is being deprecated in favor of "Names". - type: string - names: - description: |- - Names are the names of the resources. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - namespaceSelector: - description: |- - NamespaceSelector is a label selector for the resource namespace. Label keys and values - in `matchLabels` support the wildcard characters `*` (matches zero or many characters) - and `?` (matches one character).Wildcards allows writing label selectors like - ["storage.k8s.io/*": "*"]. Note that using ["*" : "*"] matches any key and value but - does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - Namespaces is a list of namespaces names. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - operations: - description: Operations can contain values ["CREATE, - "UPDATE", "CONNECT", "DELETE"], which are used - to match a specific action. - items: - description: AdmissionOperation can have one - of the values CREATE, UPDATE, CONNECT, DELETE, - which are used to match a specific action. - enum: - - CREATE - - CONNECT - - UPDATE - - DELETE - type: string - type: array - selector: - description: |- - Selector is a label selector. Label keys and values in `matchLabels` support the wildcard - characters `*` (matches zero or many characters) and `?` (matches one character). - Wildcards allows writing label selectors like ["storage.k8s.io/*": "*"]. Note that - using ["*" : "*"] matches any key and value but does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: object - roles: - description: Roles is the list of namespaced role - names for the user. - items: - type: string - type: array - subjects: - description: Subjects is the list of subject names - like users, user groups, and service accounts. - items: - description: |- - Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, - or a value for non-objects such as user and group names. - properties: - apiGroup: - description: |- - APIGroup holds the API group of the referenced subject. - Defaults to "" for ServiceAccount subjects. - Defaults to "rbac.authorization.k8s.io" for User and Group subjects. - type: string - kind: - description: |- - Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". - If the Authorizer does not recognized the kind value, the Authorizer should report an error. - type: string - name: - description: Name of the object being referenced. - type: string - namespace: - description: |- - Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty - the Authorizer should report an error. - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - type: array - type: object - type: array - any: - description: Any allows specifying resources which will - be ORed - items: - description: ResourceFilter allow users to "AND" or "OR" - between resources - properties: - clusterRoles: - description: ClusterRoles is the list of cluster-wide - role names for the user. - items: - type: string - type: array - resources: - description: ResourceDescription contains information - about the resource being created or modified. - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations is a map of annotations (key-value pairs of type string). Annotation keys - and values support the wildcard characters "*" (matches zero or many characters) and - "?" (matches at least one character). - type: object - kinds: - description: Kinds is a list of resource kinds. - items: - type: string - type: array - name: - description: |- - Name is the name of the resource. The name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - NOTE: "Name" is being deprecated in favor of "Names". - type: string - names: - description: |- - Names are the names of the resources. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - namespaceSelector: - description: |- - NamespaceSelector is a label selector for the resource namespace. Label keys and values - in `matchLabels` support the wildcard characters `*` (matches zero or many characters) - and `?` (matches one character).Wildcards allows writing label selectors like - ["storage.k8s.io/*": "*"]. Note that using ["*" : "*"] matches any key and value but - does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - Namespaces is a list of namespaces names. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - operations: - description: Operations can contain values ["CREATE, - "UPDATE", "CONNECT", "DELETE"], which are used - to match a specific action. - items: - description: AdmissionOperation can have one - of the values CREATE, UPDATE, CONNECT, DELETE, - which are used to match a specific action. - enum: - - CREATE - - CONNECT - - UPDATE - - DELETE - type: string - type: array - selector: - description: |- - Selector is a label selector. Label keys and values in `matchLabels` support the wildcard - characters `*` (matches zero or many characters) and `?` (matches one character). - Wildcards allows writing label selectors like ["storage.k8s.io/*": "*"]. Note that - using ["*" : "*"] matches any key and value but does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: object - roles: - description: Roles is the list of namespaced role - names for the user. - items: - type: string - type: array - subjects: - description: Subjects is the list of subject names - like users, user groups, and service accounts. - items: - description: |- - Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, - or a value for non-objects such as user and group names. - properties: - apiGroup: - description: |- - APIGroup holds the API group of the referenced subject. - Defaults to "" for ServiceAccount subjects. - Defaults to "rbac.authorization.k8s.io" for User and Group subjects. - type: string - kind: - description: |- - Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". - If the Authorizer does not recognized the kind value, the Authorizer should report an error. - type: string - name: - description: Name of the object being referenced. - type: string - namespace: - description: |- - Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty - the Authorizer should report an error. - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - type: array - type: object - type: array - clusterRoles: - description: ClusterRoles is the list of cluster-wide role - names for the user. - items: - type: string - type: array - resources: - description: |- - ResourceDescription contains information about the resource being created or modified. - Requires at least one tag to be specified when under MatchResources. - Specifying ResourceDescription directly under match is being deprecated. - Please specify under "any" or "all" instead. - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations is a map of annotations (key-value pairs of type string). Annotation keys - and values support the wildcard characters "*" (matches zero or many characters) and - "?" (matches at least one character). - type: object - kinds: - description: Kinds is a list of resource kinds. - items: - type: string - type: array - name: - description: |- - Name is the name of the resource. The name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - NOTE: "Name" is being deprecated in favor of "Names". - type: string - names: - description: |- - Names are the names of the resources. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - namespaceSelector: - description: |- - NamespaceSelector is a label selector for the resource namespace. Label keys and values - in `matchLabels` support the wildcard characters `*` (matches zero or many characters) - and `?` (matches one character).Wildcards allows writing label selectors like - ["storage.k8s.io/*": "*"]. Note that using ["*" : "*"] matches any key and value but - does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - Namespaces is a list of namespaces names. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - operations: - description: Operations can contain values ["CREATE, - "UPDATE", "CONNECT", "DELETE"], which are used to - match a specific action. - items: - description: AdmissionOperation can have one of the - values CREATE, UPDATE, CONNECT, DELETE, which are - used to match a specific action. - enum: - - CREATE - - CONNECT - - UPDATE - - DELETE - type: string - type: array - selector: - description: |- - Selector is a label selector. Label keys and values in `matchLabels` support the wildcard - characters `*` (matches zero or many characters) and `?` (matches one character). - Wildcards allows writing label selectors like ["storage.k8s.io/*": "*"]. Note that - using ["*" : "*"] matches any key and value but does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: object - roles: - description: Roles is the list of namespaced role names - for the user. - items: - type: string - type: array - subjects: - description: Subjects is the list of subject names like - users, user groups, and service accounts. - items: - description: |- - Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, - or a value for non-objects such as user and group names. - properties: - apiGroup: - description: |- - APIGroup holds the API group of the referenced subject. - Defaults to "" for ServiceAccount subjects. - Defaults to "rbac.authorization.k8s.io" for User and Group subjects. - type: string - kind: - description: |- - Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". - If the Authorizer does not recognized the kind value, the Authorizer should report an error. - type: string - name: - description: Name of the object being referenced. - type: string - namespace: - description: |- - Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty - the Authorizer should report an error. - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - type: array - type: object - mutate: - description: Mutation is used to modify matching resources. - properties: - foreach: - description: ForEach applies mutation rules to a list of - sub-elements by creating a context for each entry in the - list and looping over it to apply the specified logic. - items: - description: ForEachMutation applies mutation rules to - a list of sub-elements by creating a context for each - entry in the list and looping over it to apply the specified - logic. - properties: - context: - description: Context defines variables and data sources - that can be used during rule execution. - items: - description: |- - ContextEntry adds variables and data sources to a rule Context. Either a - ConfigMap reference or a APILookup must be provided. - properties: - apiCall: - description: |- - APICall is an HTTP request to the Kubernetes API server, or other JSON web service. - The data returned is stored in the context with the name for the context entry. - properties: - data: - description: Data specifies the POST data - sent to the server. - items: - description: RequestData contains the - HTTP POST data - properties: - key: - description: Key is a unique identifier - for the data value - type: string - value: - description: Value is the data value - x-kubernetes-preserve-unknown-fields: true - required: - - key - - value - type: object - type: array - jmesPath: - description: |- - JMESPath is an optional JSON Match Expression that can be used to - transform the JSON response returned from the server. For example - a JMESPath of "items | length(@)" applied to the API server response - for the URLPath "/apis/apps/v1/deployments" will return the total count - of deployments across all namespaces. - type: string - method: - default: GET - description: Method is the HTTP request - type (GET or POST). - enum: - - GET - - POST - type: string - service: - description: Service is an API call to a - JSON web service - properties: - caBundle: - description: |- - CABundle is a PEM encoded CA bundle which will be used to validate - the server certificate. - type: string - url: - description: |- - URL is the JSON web service URL. A typical form is - `https://{service}.{namespace}:{port}/{path}`. - type: string - required: - - url - type: object - urlPath: - description: |- - URLPath is the URL path to be used in the HTTP GET or POST request to the - Kubernetes API server (e.g. "/api/v1/namespaces" or "/apis/apps/v1/deployments"). - The format required is the same format used by the `kubectl get --raw` command. - See https://kyverno.io/docs/writing-policies/external-data-sources/#variables-from-kubernetes-api-server-calls - for details. - type: string - type: object - configMap: - description: ConfigMap is the ConfigMap reference. - properties: - name: - description: Name is the ConfigMap name. - type: string - namespace: - description: Namespace is the ConfigMap - namespace. - type: string - required: - - name - type: object - imageRegistry: - description: |- - ImageRegistry defines requests to an OCI/Docker V2 registry to fetch image - details. - properties: - imageRegistryCredentials: - description: ImageRegistryCredentials provides - credentials that will be used for authentication - with registry - properties: - allowInsecureRegistry: - description: AllowInsecureRegistry allows - insecure access to a registry. - type: boolean - providers: - description: |- - Providers specifies a list of OCI Registry names, whose authentication providers are provided. - It can be of one of these values: default,google,azure,amazon,github. - items: - description: ImageRegistryCredentialsProvidersType - provides the list of credential - providers required. - enum: - - default - - amazon - - azure - - google - - github - type: string - type: array - secrets: - description: |- - Secrets specifies a list of secrets that are provided for credentials. - Secrets must live in the Kyverno namespace. - items: - type: string - type: array - type: object - jmesPath: - description: |- - JMESPath is an optional JSON Match Expression that can be used to - transform the ImageData struct returned as a result of processing - the image reference. - type: string - reference: - description: |- - Reference is image reference to a container image in the registry. - Example: ghcr.io/kyverno/kyverno:latest - type: string - required: - - reference - type: object - name: - description: Name is the variable name. - type: string - variable: - description: Variable defines an arbitrary JMESPath - context variable that can be defined inline. - properties: - default: - description: |- - Default is an optional arbitrary JSON object that the variable may take if the JMESPath - expression evaluates to nil - x-kubernetes-preserve-unknown-fields: true - jmesPath: - description: |- - JMESPath is an optional JMESPath Expression that can be used to - transform the variable. - type: string - value: - description: Value is any arbitrary JSON - object representable in YAML or JSON form. - x-kubernetes-preserve-unknown-fields: true - type: object - type: object - type: array - foreach: - description: Foreach declares a nested foreach iterator - x-kubernetes-preserve-unknown-fields: true - list: - description: |- - List specifies a JMESPath expression that results in one or more elements - to which the validation logic is applied. - type: string - order: - description: |- - Order defines the iteration order on the list. - Can be Ascending to iterate from first to last element or Descending to iterate in from last to first element. - enum: - - Ascending - - Descending - type: string - patchStrategicMerge: - description: |- - PatchStrategicMerge is a strategic merge patch used to modify resources. - See https://kubernetes.io/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/ - and https://kubectl.docs.kubernetes.io/references/kustomize/patchesstrategicmerge/. - x-kubernetes-preserve-unknown-fields: true - patchesJson6902: - description: |- - PatchesJSON6902 is a list of RFC 6902 JSON Patch declarations used to modify resources. - See https://tools.ietf.org/html/rfc6902 and https://kubectl.docs.kubernetes.io/references/kustomize/patchesjson6902/. - type: string - preconditions: - description: |- - AnyAllConditions are used to determine if a policy rule should be applied by evaluating a - set of conditions. The declaration can contain nested `any` or `all` statements. - See: https://kyverno.io/docs/writing-policies/preconditions/ - properties: - all: - description: |- - AllConditions enable variable-based conditional rule execution. This is useful for - finer control of when an rule is applied. A condition can reference object data - using JMESPath notation. - Here, all of the conditions need to pass - items: - description: Condition defines variable-based - conditional criteria for rule execution. - properties: - key: - description: Key is the context entry (using - JMESPath) for conditional rule evaluation. - x-kubernetes-preserve-unknown-fields: true - message: - description: Message is an optional display - message - type: string - operator: - description: |- - Operator is the conditional operation to perform. Valid operators are: - Equals, NotEquals, In, AnyIn, AllIn, NotIn, AnyNotIn, AllNotIn, GreaterThanOrEquals, - GreaterThan, LessThanOrEquals, LessThan, DurationGreaterThanOrEquals, DurationGreaterThan, - DurationLessThanOrEquals, DurationLessThan - enum: - - Equals - - NotEquals - - In - - AnyIn - - AllIn - - NotIn - - AnyNotIn - - AllNotIn - - GreaterThanOrEquals - - GreaterThan - - LessThanOrEquals - - LessThan - - DurationGreaterThanOrEquals - - DurationGreaterThan - - DurationLessThanOrEquals - - DurationLessThan - type: string - value: - description: |- - Value is the conditional value, or set of values. The values can be fixed set - or can be variables declared using JMESPath. - x-kubernetes-preserve-unknown-fields: true - type: object - type: array - any: - description: |- - AnyConditions enable variable-based conditional rule execution. This is useful for - finer control of when an rule is applied. A condition can reference object data - using JMESPath notation. - Here, at least one of the conditions need to pass - items: - description: Condition defines variable-based - conditional criteria for rule execution. - properties: - key: - description: Key is the context entry (using - JMESPath) for conditional rule evaluation. - x-kubernetes-preserve-unknown-fields: true - message: - description: Message is an optional display - message - type: string - operator: - description: |- - Operator is the conditional operation to perform. Valid operators are: - Equals, NotEquals, In, AnyIn, AllIn, NotIn, AnyNotIn, AllNotIn, GreaterThanOrEquals, - GreaterThan, LessThanOrEquals, LessThan, DurationGreaterThanOrEquals, DurationGreaterThan, - DurationLessThanOrEquals, DurationLessThan - enum: - - Equals - - NotEquals - - In - - AnyIn - - AllIn - - NotIn - - AnyNotIn - - AllNotIn - - GreaterThanOrEquals - - GreaterThan - - LessThanOrEquals - - LessThan - - DurationGreaterThanOrEquals - - DurationGreaterThan - - DurationLessThanOrEquals - - DurationLessThan - type: string - value: - description: |- - Value is the conditional value, or set of values. The values can be fixed set - or can be variables declared using JMESPath. - x-kubernetes-preserve-unknown-fields: true - type: object - type: array - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - type: array - patchStrategicMerge: - description: |- - PatchStrategicMerge is a strategic merge patch used to modify resources. - See https://kubernetes.io/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/ - and https://kubectl.docs.kubernetes.io/references/kustomize/patchesstrategicmerge/. - x-kubernetes-preserve-unknown-fields: true - patchesJson6902: - description: |- - PatchesJSON6902 is a list of RFC 6902 JSON Patch declarations used to modify resources. - See https://tools.ietf.org/html/rfc6902 and https://kubectl.docs.kubernetes.io/references/kustomize/patchesjson6902/. - type: string - targets: - description: Targets defines the target resources to be - mutated. - items: - description: TargetResourceSpec defines targets for mutating - existing resources. - properties: - apiVersion: - description: APIVersion specifies resource apiVersion. - type: string - context: - description: Context defines variables and data sources - that can be used during rule execution. - items: - description: |- - ContextEntry adds variables and data sources to a rule Context. Either a - ConfigMap reference or a APILookup must be provided. - properties: - apiCall: - description: |- - APICall is an HTTP request to the Kubernetes API server, or other JSON web service. - The data returned is stored in the context with the name for the context entry. - properties: - data: - description: Data specifies the POST data - sent to the server. - items: - description: RequestData contains the - HTTP POST data - properties: - key: - description: Key is a unique identifier - for the data value - type: string - value: - description: Value is the data value - x-kubernetes-preserve-unknown-fields: true - required: - - key - - value - type: object - type: array - jmesPath: - description: |- - JMESPath is an optional JSON Match Expression that can be used to - transform the JSON response returned from the server. For example - a JMESPath of "items | length(@)" applied to the API server response - for the URLPath "/apis/apps/v1/deployments" will return the total count - of deployments across all namespaces. - type: string - method: - default: GET - description: Method is the HTTP request - type (GET or POST). - enum: - - GET - - POST - type: string - service: - description: Service is an API call to a - JSON web service - properties: - caBundle: - description: |- - CABundle is a PEM encoded CA bundle which will be used to validate - the server certificate. - type: string - url: - description: |- - URL is the JSON web service URL. A typical form is - `https://{service}.{namespace}:{port}/{path}`. - type: string - required: - - url - type: object - urlPath: - description: |- - URLPath is the URL path to be used in the HTTP GET or POST request to the - Kubernetes API server (e.g. "/api/v1/namespaces" or "/apis/apps/v1/deployments"). - The format required is the same format used by the `kubectl get --raw` command. - See https://kyverno.io/docs/writing-policies/external-data-sources/#variables-from-kubernetes-api-server-calls - for details. - type: string - type: object - configMap: - description: ConfigMap is the ConfigMap reference. - properties: - name: - description: Name is the ConfigMap name. - type: string - namespace: - description: Namespace is the ConfigMap - namespace. - type: string - required: - - name - type: object - imageRegistry: - description: |- - ImageRegistry defines requests to an OCI/Docker V2 registry to fetch image - details. - properties: - imageRegistryCredentials: - description: ImageRegistryCredentials provides - credentials that will be used for authentication - with registry - properties: - allowInsecureRegistry: - description: AllowInsecureRegistry allows - insecure access to a registry. - type: boolean - providers: - description: |- - Providers specifies a list of OCI Registry names, whose authentication providers are provided. - It can be of one of these values: default,google,azure,amazon,github. - items: - description: ImageRegistryCredentialsProvidersType - provides the list of credential - providers required. - enum: - - default - - amazon - - azure - - google - - github - type: string - type: array - secrets: - description: |- - Secrets specifies a list of secrets that are provided for credentials. - Secrets must live in the Kyverno namespace. - items: - type: string - type: array - type: object - jmesPath: - description: |- - JMESPath is an optional JSON Match Expression that can be used to - transform the ImageData struct returned as a result of processing - the image reference. - type: string - reference: - description: |- - Reference is image reference to a container image in the registry. - Example: ghcr.io/kyverno/kyverno:latest - type: string - required: - - reference - type: object - name: - description: Name is the variable name. - type: string - variable: - description: Variable defines an arbitrary JMESPath - context variable that can be defined inline. - properties: - default: - description: |- - Default is an optional arbitrary JSON object that the variable may take if the JMESPath - expression evaluates to nil - x-kubernetes-preserve-unknown-fields: true - jmesPath: - description: |- - JMESPath is an optional JMESPath Expression that can be used to - transform the variable. - type: string - value: - description: Value is any arbitrary JSON - object representable in YAML or JSON form. - x-kubernetes-preserve-unknown-fields: true - type: object - type: object - type: array - kind: - description: Kind specifies resource kind. - type: string - name: - description: Name specifies the resource name. - type: string - namespace: - description: Namespace specifies resource namespace. - type: string - preconditions: - description: |- - Preconditions are used to determine if a policy rule should be applied by evaluating a - set of conditions. The declaration can contain nested `any` or `all` statements. A direct list - of conditions (without `any` or `all` statements is supported for backwards compatibility but - will be deprecated in the next major release. - See: https://kyverno.io/docs/writing-policies/preconditions/ - x-kubernetes-preserve-unknown-fields: true - uid: - description: UID specifies the resource uid. - type: string - type: object - type: array - type: object - name: - description: Name is a label to identify the rule, It must be - unique within the policy. - maxLength: 63 - type: string - preconditions: - description: |- - Preconditions are used to determine if a policy rule should be applied by evaluating a - set of conditions. The declaration can contain nested `any` or `all` statements. A direct list - of conditions (without `any` or `all` statements is supported for backwards compatibility but - will be deprecated in the next major release. - See: https://kyverno.io/docs/writing-policies/preconditions/ - x-kubernetes-preserve-unknown-fields: true - skipBackgroundRequests: - default: true - description: |- - SkipBackgroundRequests bypasses admission requests that are sent by the background controller. - The default value is set to "true", it must be set to "false" to apply - generate and mutateExisting rules to those requests. - type: boolean - validate: - description: Validation is used to validate matching resources. - properties: - anyPattern: - description: |- - AnyPattern specifies list of validation patterns. At least one of the patterns - must be satisfied for the validation rule to succeed. - x-kubernetes-preserve-unknown-fields: true - cel: - description: CEL allows validation checks using the Common - Expression Language (https://kubernetes.io/docs/reference/using-api/cel/). - properties: - auditAnnotations: - description: AuditAnnotations contains CEL expressions - which are used to produce audit annotations for the - audit event of the API request. - items: - description: AuditAnnotation describes how to produce - an audit annotation for an API request. - properties: - key: - description: |- - key specifies the audit annotation key. The audit annotation keys of - a ValidatingAdmissionPolicy must be unique. The key must be a qualified - name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length. - - The key is combined with the resource name of the - ValidatingAdmissionPolicy to construct an audit annotation key: - "{ValidatingAdmissionPolicy name}/{key}". - - If an admission webhook uses the same resource name as this ValidatingAdmissionPolicy - and the same audit annotation key, the annotation key will be identical. - In this case, the first annotation written with the key will be included - in the audit event and all subsequent annotations with the same key - will be discarded. - - Required. - type: string - valueExpression: - description: |- - valueExpression represents the expression which is evaluated by CEL to - produce an audit annotation value. The expression must evaluate to either - a string or null value. If the expression evaluates to a string, the - audit annotation is included with the string value. If the expression - evaluates to null or empty string the audit annotation will be omitted. - The valueExpression may be no longer than 5kb in length. - If the result of the valueExpression is more than 10kb in length, it - will be truncated to 10kb. - - If multiple ValidatingAdmissionPolicyBinding resources match an - API request, then the valueExpression will be evaluated for - each binding. All unique values produced by the valueExpressions - will be joined together in a comma-separated list. - - Required. - type: string - required: - - key - - valueExpression - type: object - type: array - expressions: - description: Expressions is a list of CELExpression - types. - items: - description: Validation specifies the CEL expression - which is used to apply the validation. - properties: - expression: - description: "Expression represents the expression - which will be evaluated by CEL.\nref: https://github.com/google/cel-spec\nCEL - expressions have access to the contents of the - API request/response, organized into CEL variables - as well as some other useful variables:\n\n- - 'object' - The object from the incoming request. - The value is null for DELETE requests.\n- 'oldObject' - - The existing object. The value is null for - CREATE requests.\n- 'request' - Attributes of - the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).\n- - 'params' - Parameter resource referred to by - the policy binding being evaluated. Only populated - if the policy has a ParamKind.\n- 'namespaceObject' - - The namespace object that the incoming object - belongs to. The value is null for cluster-scoped - resources.\n- 'variables' - Map of composited - variables, from its name to its lazily evaluated - value.\n For example, a variable named 'foo' - can be accessed as 'variables.foo'.\n- 'authorizer' - - A CEL Authorizer. May be used to perform authorization - checks for the principal (user or service account) - of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- - 'authorizer.requestResource' - A CEL ResourceCheck - constructed from the 'authorizer' and configured - with the\n request resource.\n\nThe `apiVersion`, - `kind`, `metadata.name` and `metadata.generateName` - are always accessible from the root of the\nobject. - No other metadata properties are accessible.\n\nOnly - property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` - are accessible.\nAccessible property names are - escaped according to the following rules when - accessed in the expression:\n- '__' escapes - to '__underscores__'\n- '.' escapes to '__dot__'\n- - '-' escapes to '__dash__'\n- '/' escapes to - '__slash__'\n- Property names that exactly match - a CEL RESERVED keyword escape to '__{keyword}__'. - The keywords are:\n\t \"true\", \"false\", - \"null\", \"in\", \"as\", \"break\", \"const\", - \"continue\", \"else\", \"for\", \"function\", - \"if\",\n\t \"import\", \"let\", \"loop\", - \"package\", \"namespace\", \"return\".\nExamples:\n - \ - Expression accessing a property named \"namespace\": - {\"Expression\": \"object.__namespace__ > 0\"}\n - \ - Expression accessing a property named \"x-prop\": - {\"Expression\": \"object.x__dash__prop > 0\"}\n - \ - Expression accessing a property named \"redact__d\": - {\"Expression\": \"object.redact__underscores__d - > 0\"}\n\nEquality on arrays with list type - of 'set' or 'map' ignores element order, i.e. - [1, 2] == [2, 1].\nConcatenation on arrays with - x-kubernetes-list-type use the semantics of - the list type:\n - 'set': `X + Y` performs - a union where the array positions of all elements - in `X` are preserved and\n non-intersecting - elements in `Y` are appended, retaining their - partial order.\n - 'map': `X + Y` performs - a merge where the array positions of all keys - in `X` are preserved but the values\n are - overwritten by values in `Y` when the key sets - of `X` and `Y` intersect. Elements in `Y` with\n - \ non-intersecting keys are appended, retaining - their partial order.\nRequired." - type: string - message: - description: |- - Message represents the message displayed when validation fails. The message is required if the Expression contains - line breaks. The message must not contain line breaks. - If unset, the message is "failed rule: {Rule}". - e.g. "must be a URL with the host matching spec.host" - If the Expression contains line breaks. Message is required. - The message must not contain line breaks. - If unset, the message is "failed Expression: {Expression}". - type: string - messageExpression: - description: |- - messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. - Since messageExpression is used as a failure message, it must evaluate to a string. - If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. - If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced - as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string - that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and - the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. - messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. - Example: - "object.x must be less than max ("+string(params.max)+")" - type: string - reason: - description: |- - Reason represents a machine-readable description of why this validation failed. - If this is the first validation in the list to fail, this reason, as well as the - corresponding HTTP response code, are used in the - HTTP response to the client. - The currently supported reasons are: "Unauthorized", "Forbidden", "Invalid", "RequestEntityTooLarge". - If not set, StatusReasonInvalid is used in the response to the client. - type: string - required: - - expression - type: object - type: array - paramKind: - description: ParamKind is a tuple of Group Kind and - Version. - properties: - apiVersion: - description: |- - APIVersion is the API group version the resources belong to. - In format of "group/version". - Required. - type: string - kind: - description: |- - Kind is the API kind the resources belong to. - Required. - type: string - type: object - x-kubernetes-map-type: atomic - paramRef: - description: ParamRef references a parameter resource. - properties: - name: - description: |- - `name` is the name of the resource being referenced. - - `name` and `selector` are mutually exclusive properties. If one is set, - the other must be unset. - type: string - namespace: - description: |- - namespace is the namespace of the referenced resource. Allows limiting - the search for params to a specific namespace. Applies to both `name` and - `selector` fields. - - A per-namespace parameter may be used by specifying a namespace-scoped - `paramKind` in the policy and leaving this field empty. - - - If `paramKind` is cluster-scoped, this field MUST be unset. Setting this - field results in a configuration error. - - - If `paramKind` is namespace-scoped, the namespace of the object being - evaluated for admission will be used when this field is left unset. Take - care that if this is left empty the binding must not match any cluster-scoped - resources, which will result in an error. - type: string - parameterNotFoundAction: - description: |- - `parameterNotFoundAction` controls the behavior of the binding when the resource - exists, and name or selector is valid, but there are no parameters - matched by the binding. If the value is set to `Allow`, then no - matched parameters will be treated as successful validation by the binding. - If set to `Deny`, then no matched parameters will be subject to the - `failurePolicy` of the policy. - - Allowed values are `Allow` or `Deny` - Default to `Deny` - type: string - selector: - description: |- - selector can be used to match multiple param objects based on their labels. - Supply selector: {} to match all resources of the ParamKind. - - If multiple params are found, they are all evaluated with the policy expressions - and the results are ANDed together. - - One of `name` or `selector` must be set, but `name` and `selector` are - mutually exclusive properties. If one is set, the other must be unset. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: object - x-kubernetes-map-type: atomic - variables: - description: |- - Variables contain definitions of variables that can be used in composition of other expressions. - Each variable is defined as a named CEL expression. - The variables defined here will be available under `variables` in other expressions of the policy. - items: - description: Variable is the definition of a variable - that is used for composition. - properties: - expression: - description: |- - Expression is the expression that will be evaluated as the value of the variable. - The CEL expression has access to the same identifiers as the CEL expressions in Validation. - type: string - name: - description: |- - Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. - The variable can be accessed in other expressions through `variables` - For example, if name is "foo", the variable will be available as `variables.foo` - type: string - required: - - expression - - name - type: object - type: array - type: object - deny: - description: Deny defines conditions used to pass or fail - a validation rule. - properties: - conditions: - description: |- - Multiple conditions can be declared under an `any` or `all` statement. A direct list - of conditions (without `any` or `all` statements) is also supported for backwards compatibility - but will be deprecated in the next major release. - See: https://kyverno.io/docs/writing-policies/validate/#deny-rules - x-kubernetes-preserve-unknown-fields: true - type: object - foreach: - description: ForEach applies validate rules to a list of - sub-elements by creating a context for each entry in the - list and looping over it to apply the specified logic. - items: - description: ForEachValidation applies validate rules - to a list of sub-elements by creating a context for - each entry in the list and looping over it to apply - the specified logic. - properties: - anyPattern: - description: |- - AnyPattern specifies list of validation patterns. At least one of the patterns - must be satisfied for the validation rule to succeed. - x-kubernetes-preserve-unknown-fields: true - context: - description: Context defines variables and data sources - that can be used during rule execution. - items: - description: |- - ContextEntry adds variables and data sources to a rule Context. Either a - ConfigMap reference or a APILookup must be provided. - properties: - apiCall: - description: |- - APICall is an HTTP request to the Kubernetes API server, or other JSON web service. - The data returned is stored in the context with the name for the context entry. - properties: - data: - description: Data specifies the POST data - sent to the server. - items: - description: RequestData contains the - HTTP POST data - properties: - key: - description: Key is a unique identifier - for the data value - type: string - value: - description: Value is the data value - x-kubernetes-preserve-unknown-fields: true - required: - - key - - value - type: object - type: array - jmesPath: - description: |- - JMESPath is an optional JSON Match Expression that can be used to - transform the JSON response returned from the server. For example - a JMESPath of "items | length(@)" applied to the API server response - for the URLPath "/apis/apps/v1/deployments" will return the total count - of deployments across all namespaces. - type: string - method: - default: GET - description: Method is the HTTP request - type (GET or POST). - enum: - - GET - - POST - type: string - service: - description: Service is an API call to a - JSON web service - properties: - caBundle: - description: |- - CABundle is a PEM encoded CA bundle which will be used to validate - the server certificate. - type: string - url: - description: |- - URL is the JSON web service URL. A typical form is - `https://{service}.{namespace}:{port}/{path}`. - type: string - required: - - url - type: object - urlPath: - description: |- - URLPath is the URL path to be used in the HTTP GET or POST request to the - Kubernetes API server (e.g. "/api/v1/namespaces" or "/apis/apps/v1/deployments"). - The format required is the same format used by the `kubectl get --raw` command. - See https://kyverno.io/docs/writing-policies/external-data-sources/#variables-from-kubernetes-api-server-calls - for details. - type: string - type: object - configMap: - description: ConfigMap is the ConfigMap reference. - properties: - name: - description: Name is the ConfigMap name. - type: string - namespace: - description: Namespace is the ConfigMap - namespace. - type: string - required: - - name - type: object - imageRegistry: - description: |- - ImageRegistry defines requests to an OCI/Docker V2 registry to fetch image - details. - properties: - imageRegistryCredentials: - description: ImageRegistryCredentials provides - credentials that will be used for authentication - with registry - properties: - allowInsecureRegistry: - description: AllowInsecureRegistry allows - insecure access to a registry. - type: boolean - providers: - description: |- - Providers specifies a list of OCI Registry names, whose authentication providers are provided. - It can be of one of these values: default,google,azure,amazon,github. - items: - description: ImageRegistryCredentialsProvidersType - provides the list of credential - providers required. - enum: - - default - - amazon - - azure - - google - - github - type: string - type: array - secrets: - description: |- - Secrets specifies a list of secrets that are provided for credentials. - Secrets must live in the Kyverno namespace. - items: - type: string - type: array - type: object - jmesPath: - description: |- - JMESPath is an optional JSON Match Expression that can be used to - transform the ImageData struct returned as a result of processing - the image reference. - type: string - reference: - description: |- - Reference is image reference to a container image in the registry. - Example: ghcr.io/kyverno/kyverno:latest - type: string - required: - - reference - type: object - name: - description: Name is the variable name. - type: string - variable: - description: Variable defines an arbitrary JMESPath - context variable that can be defined inline. - properties: - default: - description: |- - Default is an optional arbitrary JSON object that the variable may take if the JMESPath - expression evaluates to nil - x-kubernetes-preserve-unknown-fields: true - jmesPath: - description: |- - JMESPath is an optional JMESPath Expression that can be used to - transform the variable. - type: string - value: - description: Value is any arbitrary JSON - object representable in YAML or JSON form. - x-kubernetes-preserve-unknown-fields: true - type: object - type: object - type: array - deny: - description: Deny defines conditions used to pass - or fail a validation rule. - properties: - conditions: - description: |- - Multiple conditions can be declared under an `any` or `all` statement. A direct list - of conditions (without `any` or `all` statements) is also supported for backwards compatibility - but will be deprecated in the next major release. - See: https://kyverno.io/docs/writing-policies/validate/#deny-rules - x-kubernetes-preserve-unknown-fields: true - type: object - elementScope: - description: |- - ElementScope specifies whether to use the current list element as the scope for validation. Defaults to "true" if not specified. - When set to "false", "request.object" is used as the validation scope within the foreach - block to allow referencing other elements in the subtree. - type: boolean - foreach: - description: Foreach declares a nested foreach iterator - x-kubernetes-preserve-unknown-fields: true - list: - description: |- - List specifies a JMESPath expression that results in one or more elements - to which the validation logic is applied. - type: string - pattern: - description: Pattern specifies an overlay-style pattern - used to check resources. - x-kubernetes-preserve-unknown-fields: true - preconditions: - description: |- - AnyAllConditions are used to determine if a policy rule should be applied by evaluating a - set of conditions. The declaration can contain nested `any` or `all` statements. - See: https://kyverno.io/docs/writing-policies/preconditions/ - properties: - all: - description: |- - AllConditions enable variable-based conditional rule execution. This is useful for - finer control of when an rule is applied. A condition can reference object data - using JMESPath notation. - Here, all of the conditions need to pass - items: - description: Condition defines variable-based - conditional criteria for rule execution. - properties: - key: - description: Key is the context entry (using - JMESPath) for conditional rule evaluation. - x-kubernetes-preserve-unknown-fields: true - message: - description: Message is an optional display - message - type: string - operator: - description: |- - Operator is the conditional operation to perform. Valid operators are: - Equals, NotEquals, In, AnyIn, AllIn, NotIn, AnyNotIn, AllNotIn, GreaterThanOrEquals, - GreaterThan, LessThanOrEquals, LessThan, DurationGreaterThanOrEquals, DurationGreaterThan, - DurationLessThanOrEquals, DurationLessThan - enum: - - Equals - - NotEquals - - In - - AnyIn - - AllIn - - NotIn - - AnyNotIn - - AllNotIn - - GreaterThanOrEquals - - GreaterThan - - LessThanOrEquals - - LessThan - - DurationGreaterThanOrEquals - - DurationGreaterThan - - DurationLessThanOrEquals - - DurationLessThan - type: string - value: - description: |- - Value is the conditional value, or set of values. The values can be fixed set - or can be variables declared using JMESPath. - x-kubernetes-preserve-unknown-fields: true - type: object - type: array - any: - description: |- - AnyConditions enable variable-based conditional rule execution. This is useful for - finer control of when an rule is applied. A condition can reference object data - using JMESPath notation. - Here, at least one of the conditions need to pass - items: - description: Condition defines variable-based - conditional criteria for rule execution. - properties: - key: - description: Key is the context entry (using - JMESPath) for conditional rule evaluation. - x-kubernetes-preserve-unknown-fields: true - message: - description: Message is an optional display - message - type: string - operator: - description: |- - Operator is the conditional operation to perform. Valid operators are: - Equals, NotEquals, In, AnyIn, AllIn, NotIn, AnyNotIn, AllNotIn, GreaterThanOrEquals, - GreaterThan, LessThanOrEquals, LessThan, DurationGreaterThanOrEquals, DurationGreaterThan, - DurationLessThanOrEquals, DurationLessThan - enum: - - Equals - - NotEquals - - In - - AnyIn - - AllIn - - NotIn - - AnyNotIn - - AllNotIn - - GreaterThanOrEquals - - GreaterThan - - LessThanOrEquals - - LessThan - - DurationGreaterThanOrEquals - - DurationGreaterThan - - DurationLessThanOrEquals - - DurationLessThan - type: string - value: - description: |- - Value is the conditional value, or set of values. The values can be fixed set - or can be variables declared using JMESPath. - x-kubernetes-preserve-unknown-fields: true - type: object - type: array - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - type: array - manifests: - description: Manifest specifies conditions for manifest - verification - properties: - annotationDomain: - description: AnnotationDomain is custom domain of annotation - for message and signature. Default is "cosign.sigstore.dev". - type: string - attestors: - description: Attestors specified the required attestors - (i.e. authorities) - items: - properties: - count: - description: |- - Count specifies the required number of entries that must match. If the count is null, all entries must match - (a logical AND). If the count is 1, at least one entry must match (a logical OR). If the count contains a - value N, then N must be less than or equal to the size of entries, and at least N entries must match. - minimum: 1 - type: integer - entries: - description: |- - Entries contains the available attestors. An attestor can be a static key, - attributes for keyless verification, or a nested attestor declaration. - items: - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations are used for image verification. - Every specified key-value pair must exist and match in the verified payload. - The payload may contain other key-value pairs. - type: object - attestor: - description: Attestor is a nested set of - Attestor used to specify a more complex - set of match authorities. - x-kubernetes-preserve-unknown-fields: true - certificates: - description: Certificates specifies one - or more certificates. - properties: - cert: - description: Cert is an optional PEM-encoded - public certificate. - type: string - certChain: - description: CertChain is an optional - PEM encoded set of certificates used - to verify. - type: string - ctlog: - description: |- - CTLog (certificate timestamp log) provides a configuration for validation of Signed Certificate - Timestamps (SCTs). If the value is unset, the default behavior by Cosign is used. - properties: - ignoreSCT: - description: |- - IgnoreSCT defines whether to use the Signed Certificate Timestamp (SCT) log to check for a certificate - timestamp. Default is false. Set to true if this was opted out during signing. - type: boolean - pubkey: - description: PubKey, if set, is - used to validate SCTs against - a custom source. - type: string - type: object - rekor: - description: |- - Rekor provides configuration for the Rekor transparency log service. If an empty object - is provided the public instance of Rekor (https://rekor.sigstore.dev) is used. - properties: - ignoreTlog: - description: IgnoreTlog skips transparency - log verification. - type: boolean - pubkey: - description: |- - RekorPubKey is an optional PEM-encoded public key to use for a custom Rekor. - If set, this will be used to validate transparency log signatures from a custom Rekor. - type: string - url: - description: URL is the address - of the transparency log. Defaults - to the public Rekor log instance - https://rekor.sigstore.dev. - type: string - required: - - url - type: object - type: object - keyless: - description: |- - Keyless is a set of attribute used to verify a Sigstore keyless attestor. - See https://github.com/sigstore/cosign/blob/main/KEYLESS.md. - properties: - additionalExtensions: - additionalProperties: - type: string - description: AdditionalExtensions are - certificate-extensions used for keyless - signing. - type: object - ctlog: - description: |- - CTLog (certificate timestamp log) provides a configuration for validation of Signed Certificate - Timestamps (SCTs). If the value is unset, the default behavior by Cosign is used. - properties: - ignoreSCT: - description: |- - IgnoreSCT defines whether to use the Signed Certificate Timestamp (SCT) log to check for a certificate - timestamp. Default is false. Set to true if this was opted out during signing. - type: boolean - pubkey: - description: PubKey, if set, is - used to validate SCTs against - a custom source. - type: string - type: object - issuer: - description: Issuer is the certificate - issuer used for keyless signing. - type: string - rekor: - description: |- - Rekor provides configuration for the Rekor transparency log service. If an empty object - is provided the public instance of Rekor (https://rekor.sigstore.dev) is used. - properties: - ignoreTlog: - description: IgnoreTlog skips transparency - log verification. - type: boolean - pubkey: - description: |- - RekorPubKey is an optional PEM-encoded public key to use for a custom Rekor. - If set, this will be used to validate transparency log signatures from a custom Rekor. - type: string - url: - description: URL is the address - of the transparency log. Defaults - to the public Rekor log instance - https://rekor.sigstore.dev. - type: string - required: - - url - type: object - roots: - description: |- - Roots is an optional set of PEM encoded trusted root certificates. - If not provided, the system roots are used. - type: string - subject: - description: Subject is the verified - identity used for keyless signing, - for example the email address. - type: string - type: object - keys: - description: Keys specifies one or more - public keys. - properties: - ctlog: - description: |- - CTLog (certificate timestamp log) provides a configuration for validation of Signed Certificate - Timestamps (SCTs). If the value is unset, the default behavior by Cosign is used. - properties: - ignoreSCT: - description: |- - IgnoreSCT defines whether to use the Signed Certificate Timestamp (SCT) log to check for a certificate - timestamp. Default is false. Set to true if this was opted out during signing. - type: boolean - pubkey: - description: PubKey, if set, is - used to validate SCTs against - a custom source. - type: string - type: object - kms: - description: |- - KMS provides the URI to the public key stored in a Key Management System. See: - https://github.com/sigstore/cosign/blob/main/KMS.md - type: string - publicKeys: - description: |- - Keys is a set of X.509 public keys used to verify image signatures. The keys can be directly - specified or can be a variable reference to a key specified in a ConfigMap (see - https://kyverno.io/docs/writing-policies/variables/), or reference a standard Kubernetes Secret - elsewhere in the cluster by specifying it in the format "k8s:///". - The named Secret must specify a key `cosign.pub` containing the public key used for - verification, (see https://github.com/sigstore/cosign/blob/main/KMS.md#kubernetes-secret). - When multiple keys are specified each key is processed as a separate staticKey entry - (.attestors[*].entries.keys) within the set of attestors and the count is applied across the keys. - type: string - rekor: - description: |- - Rekor provides configuration for the Rekor transparency log service. If an empty object - is provided the public instance of Rekor (https://rekor.sigstore.dev) is used. - properties: - ignoreTlog: - description: IgnoreTlog skips transparency - log verification. - type: boolean - pubkey: - description: |- - RekorPubKey is an optional PEM-encoded public key to use for a custom Rekor. - If set, this will be used to validate transparency log signatures from a custom Rekor. - type: string - url: - description: URL is the address - of the transparency log. Defaults - to the public Rekor log instance - https://rekor.sigstore.dev. - type: string - required: - - url - type: object - secret: - description: Reference to a Secret resource - that contains a public key - properties: - name: - description: Name of the secret. - The provided secret must contain - a key named cosign.pub. - type: string - namespace: - description: Namespace name where - the Secret exists. - type: string - required: - - name - - namespace - type: object - signatureAlgorithm: - default: sha256 - description: Specify signature algorithm - for public keys. Supported values - are sha224, sha256, sha384 and sha512. - type: string - type: object - repository: - description: |- - Repository is an optional alternate OCI repository to use for signatures and attestations that match this rule. - If specified Repository will override other OCI image repository locations for this Attestor. - type: string - type: object - type: array - type: object - type: array - dryRun: - description: DryRun configuration - properties: - enable: - type: boolean - namespace: - type: string - type: object - ignoreFields: - description: Fields which will be ignored while comparing - manifests. - items: - properties: - fields: - items: - type: string - type: array - objects: - items: - properties: - group: - type: string - kind: - type: string - name: - type: string - namespace: - type: string - version: - type: string - type: object - type: array - type: object - type: array - repository: - description: |- - Repository is an optional alternate OCI repository to use for resource bundle reference. - The repository can be overridden per Attestor or Attestation. - type: string - type: object - message: - description: Message specifies a custom message to be displayed - on failure. - type: string - pattern: - description: Pattern specifies an overlay-style pattern - used to check resources. - x-kubernetes-preserve-unknown-fields: true - podSecurity: - description: |- - PodSecurity applies exemptions for Kubernetes Pod Security admission - by specifying exclusions for Pod Security Standards controls. - properties: - exclude: - description: Exclude specifies the Pod Security Standard - controls to be excluded. - items: - description: PodSecurityStandard specifies the Pod - Security Standard controls to be excluded. - properties: - controlName: - description: |- - ControlName specifies the name of the Pod Security Standard control. - See: https://kubernetes.io/docs/concepts/security/pod-security-standards/ - enum: - - HostProcess - - Host Namespaces - - Privileged Containers - - Capabilities - - HostPath Volumes - - Host Ports - - AppArmor - - SELinux - - /proc Mount Type - - Seccomp - - Sysctls - - Volume Types - - Privilege Escalation - - Running as Non-root - - Running as Non-root user - type: string - images: - description: |- - Images selects matching containers and applies the container level PSS. - Each image is the image name consisting of the registry address, repository, image, and tag. - Empty list matches no containers, PSS checks are applied at the pod level only. - Wildcards ('*' and '?') are allowed. See: https://kubernetes.io/docs/concepts/containers/images. - items: - type: string - type: array - required: - - controlName - type: object - type: array - level: - description: |- - Level defines the Pod Security Standard level to be applied to workloads. - Allowed values are privileged, baseline, and restricted. - enum: - - privileged - - baseline - - restricted - type: string - version: - description: |- - Version defines the Pod Security Standard versions that Kubernetes supports. - Allowed values are v1.19, v1.20, v1.21, v1.22, v1.23, v1.24, v1.25, v1.26, latest. Defaults to latest. - enum: - - v1.19 - - v1.20 - - v1.21 - - v1.22 - - v1.23 - - v1.24 - - v1.25 - - v1.26 - - latest - type: string - type: object - type: object - verifyImages: - description: VerifyImages is used to verify image signatures - and mutate them to add a digest - items: - description: |- - ImageVerification validates that images that match the specified pattern - are signed with the supplied public key. Once the image is verified it is - mutated to include the SHA digest retrieved during the registration. - properties: - additionalExtensions: - additionalProperties: - type: string - description: Deprecated. - type: object - annotations: - additionalProperties: - type: string - description: Deprecated. Use annotations per Attestor - instead. - type: object - attestations: - description: |- - Attestations are optional checks for signed in-toto Statements used to verify the image. - See https://github.com/in-toto/attestation. Kyverno fetches signed attestations from the - OCI registry and decodes them into a list of Statement declarations. - items: - description: |- - Attestation are checks for signed in-toto Statements that are used to verify the image. - See https://github.com/in-toto/attestation. Kyverno fetches signed attestations from the - OCI registry and decodes them into a list of Statements. - properties: - attestors: - description: Attestors specify the required attestors - (i.e. authorities). - items: - properties: - count: - description: |- - Count specifies the required number of entries that must match. If the count is null, all entries must match - (a logical AND). If the count is 1, at least one entry must match (a logical OR). If the count contains a - value N, then N must be less than or equal to the size of entries, and at least N entries must match. - minimum: 1 - type: integer - entries: - description: |- - Entries contains the available attestors. An attestor can be a static key, - attributes for keyless verification, or a nested attestor declaration. - items: - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations are used for image verification. - Every specified key-value pair must exist and match in the verified payload. - The payload may contain other key-value pairs. - type: object - attestor: - description: Attestor is a nested set - of Attestor used to specify a more - complex set of match authorities. - x-kubernetes-preserve-unknown-fields: true - certificates: - description: Certificates specifies - one or more certificates. - properties: - cert: - description: Cert is an optional - PEM-encoded public certificate. - type: string - certChain: - description: CertChain is an optional - PEM encoded set of certificates - used to verify. - type: string - ctlog: - description: |- - CTLog (certificate timestamp log) provides a configuration for validation of Signed Certificate - Timestamps (SCTs). If the value is unset, the default behavior by Cosign is used. - properties: - ignoreSCT: - description: |- - IgnoreSCT defines whether to use the Signed Certificate Timestamp (SCT) log to check for a certificate - timestamp. Default is false. Set to true if this was opted out during signing. - type: boolean - pubkey: - description: PubKey, if set, - is used to validate SCTs against - a custom source. - type: string - type: object - rekor: - description: |- - Rekor provides configuration for the Rekor transparency log service. If an empty object - is provided the public instance of Rekor (https://rekor.sigstore.dev) is used. - properties: - ignoreTlog: - description: IgnoreTlog skips - transparency log verification. - type: boolean - pubkey: - description: |- - RekorPubKey is an optional PEM-encoded public key to use for a custom Rekor. - If set, this will be used to validate transparency log signatures from a custom Rekor. - type: string - url: - description: URL is the address - of the transparency log. Defaults - to the public Rekor log instance - https://rekor.sigstore.dev. - type: string - required: - - url - type: object - type: object - keyless: - description: |- - Keyless is a set of attribute used to verify a Sigstore keyless attestor. - See https://github.com/sigstore/cosign/blob/main/KEYLESS.md. - properties: - additionalExtensions: - additionalProperties: - type: string - description: AdditionalExtensions - are certificate-extensions used - for keyless signing. - type: object - ctlog: - description: |- - CTLog (certificate timestamp log) provides a configuration for validation of Signed Certificate - Timestamps (SCTs). If the value is unset, the default behavior by Cosign is used. - properties: - ignoreSCT: - description: |- - IgnoreSCT defines whether to use the Signed Certificate Timestamp (SCT) log to check for a certificate - timestamp. Default is false. Set to true if this was opted out during signing. - type: boolean - pubkey: - description: PubKey, if set, - is used to validate SCTs against - a custom source. - type: string - type: object - issuer: - description: Issuer is the certificate - issuer used for keyless signing. - type: string - rekor: - description: |- - Rekor provides configuration for the Rekor transparency log service. If an empty object - is provided the public instance of Rekor (https://rekor.sigstore.dev) is used. - properties: - ignoreTlog: - description: IgnoreTlog skips - transparency log verification. - type: boolean - pubkey: - description: |- - RekorPubKey is an optional PEM-encoded public key to use for a custom Rekor. - If set, this will be used to validate transparency log signatures from a custom Rekor. - type: string - url: - description: URL is the address - of the transparency log. Defaults - to the public Rekor log instance - https://rekor.sigstore.dev. - type: string - required: - - url - type: object - roots: - description: |- - Roots is an optional set of PEM encoded trusted root certificates. - If not provided, the system roots are used. - type: string - subject: - description: Subject is the verified - identity used for keyless signing, - for example the email address. - type: string - type: object - keys: - description: Keys specifies one or more - public keys. - properties: - ctlog: - description: |- - CTLog (certificate timestamp log) provides a configuration for validation of Signed Certificate - Timestamps (SCTs). If the value is unset, the default behavior by Cosign is used. - properties: - ignoreSCT: - description: |- - IgnoreSCT defines whether to use the Signed Certificate Timestamp (SCT) log to check for a certificate - timestamp. Default is false. Set to true if this was opted out during signing. - type: boolean - pubkey: - description: PubKey, if set, - is used to validate SCTs against - a custom source. - type: string - type: object - kms: - description: |- - KMS provides the URI to the public key stored in a Key Management System. See: - https://github.com/sigstore/cosign/blob/main/KMS.md - type: string - publicKeys: - description: |- - Keys is a set of X.509 public keys used to verify image signatures. The keys can be directly - specified or can be a variable reference to a key specified in a ConfigMap (see - https://kyverno.io/docs/writing-policies/variables/), or reference a standard Kubernetes Secret - elsewhere in the cluster by specifying it in the format "k8s:///". - The named Secret must specify a key `cosign.pub` containing the public key used for - verification, (see https://github.com/sigstore/cosign/blob/main/KMS.md#kubernetes-secret). - When multiple keys are specified each key is processed as a separate staticKey entry - (.attestors[*].entries.keys) within the set of attestors and the count is applied across the keys. - type: string - rekor: - description: |- - Rekor provides configuration for the Rekor transparency log service. If an empty object - is provided the public instance of Rekor (https://rekor.sigstore.dev) is used. - properties: - ignoreTlog: - description: IgnoreTlog skips - transparency log verification. - type: boolean - pubkey: - description: |- - RekorPubKey is an optional PEM-encoded public key to use for a custom Rekor. - If set, this will be used to validate transparency log signatures from a custom Rekor. - type: string - url: - description: URL is the address - of the transparency log. Defaults - to the public Rekor log instance - https://rekor.sigstore.dev. - type: string - required: - - url - type: object - secret: - description: Reference to a Secret - resource that contains a public - key - properties: - name: - description: Name of the secret. - The provided secret must contain - a key named cosign.pub. - type: string - namespace: - description: Namespace name - where the Secret exists. - type: string - required: - - name - - namespace - type: object - signatureAlgorithm: - default: sha256 - description: Specify signature algorithm - for public keys. Supported values - are sha224, sha256, sha384 and - sha512. - type: string - type: object - repository: - description: |- - Repository is an optional alternate OCI repository to use for signatures and attestations that match this rule. - If specified Repository will override other OCI image repository locations for this Attestor. - type: string - type: object - type: array - type: object - type: array - conditions: - description: |- - Conditions are used to verify attributes within a Predicate. If no Conditions are specified - the attestation check is satisfied as long there are predicates that match the predicate type. - items: - description: |- - AnyAllConditions consists of conditions wrapped denoting a logical criteria to be fulfilled. - AnyConditions get fulfilled when at least one of its sub-conditions passes. - AllConditions get fulfilled only when all of its sub-conditions pass. - properties: - all: - description: |- - AllConditions enable variable-based conditional rule execution. This is useful for - finer control of when an rule is applied. A condition can reference object data - using JMESPath notation. - Here, all of the conditions need to pass - items: - description: Condition defines variable-based - conditional criteria for rule execution. - properties: - key: - description: Key is the context entry - (using JMESPath) for conditional rule - evaluation. - x-kubernetes-preserve-unknown-fields: true - message: - description: Message is an optional - display message - type: string - operator: - description: |- - Operator is the conditional operation to perform. Valid operators are: - Equals, NotEquals, In, AnyIn, AllIn, NotIn, AnyNotIn, AllNotIn, GreaterThanOrEquals, - GreaterThan, LessThanOrEquals, LessThan, DurationGreaterThanOrEquals, DurationGreaterThan, - DurationLessThanOrEquals, DurationLessThan - enum: - - Equals - - NotEquals - - In - - AnyIn - - AllIn - - NotIn - - AnyNotIn - - AllNotIn - - GreaterThanOrEquals - - GreaterThan - - LessThanOrEquals - - LessThan - - DurationGreaterThanOrEquals - - DurationGreaterThan - - DurationLessThanOrEquals - - DurationLessThan - type: string - value: - description: |- - Value is the conditional value, or set of values. The values can be fixed set - or can be variables declared using JMESPath. - x-kubernetes-preserve-unknown-fields: true - type: object - type: array - any: - description: |- - AnyConditions enable variable-based conditional rule execution. This is useful for - finer control of when an rule is applied. A condition can reference object data - using JMESPath notation. - Here, at least one of the conditions need to pass - items: - description: Condition defines variable-based - conditional criteria for rule execution. - properties: - key: - description: Key is the context entry - (using JMESPath) for conditional rule - evaluation. - x-kubernetes-preserve-unknown-fields: true - message: - description: Message is an optional - display message - type: string - operator: - description: |- - Operator is the conditional operation to perform. Valid operators are: - Equals, NotEquals, In, AnyIn, AllIn, NotIn, AnyNotIn, AllNotIn, GreaterThanOrEquals, - GreaterThan, LessThanOrEquals, LessThan, DurationGreaterThanOrEquals, DurationGreaterThan, - DurationLessThanOrEquals, DurationLessThan - enum: - - Equals - - NotEquals - - In - - AnyIn - - AllIn - - NotIn - - AnyNotIn - - AllNotIn - - GreaterThanOrEquals - - GreaterThan - - LessThanOrEquals - - LessThan - - DurationGreaterThanOrEquals - - DurationGreaterThan - - DurationLessThanOrEquals - - DurationLessThan - type: string - value: - description: |- - Value is the conditional value, or set of values. The values can be fixed set - or can be variables declared using JMESPath. - x-kubernetes-preserve-unknown-fields: true - type: object - type: array - type: object - type: array - predicateType: - description: Deprecated in favour of 'Type', to - be removed soon - type: string - type: - description: Type defines the type of attestation - contained within the Statement. - type: string - type: object - type: array - attestors: - description: Attestors specified the required attestors - (i.e. authorities) - items: - properties: - count: - description: |- - Count specifies the required number of entries that must match. If the count is null, all entries must match - (a logical AND). If the count is 1, at least one entry must match (a logical OR). If the count contains a - value N, then N must be less than or equal to the size of entries, and at least N entries must match. - minimum: 1 - type: integer - entries: - description: |- - Entries contains the available attestors. An attestor can be a static key, - attributes for keyless verification, or a nested attestor declaration. - items: - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations are used for image verification. - Every specified key-value pair must exist and match in the verified payload. - The payload may contain other key-value pairs. - type: object - attestor: - description: Attestor is a nested set of Attestor - used to specify a more complex set of match - authorities. - x-kubernetes-preserve-unknown-fields: true - certificates: - description: Certificates specifies one or - more certificates. - properties: - cert: - description: Cert is an optional PEM-encoded - public certificate. - type: string - certChain: - description: CertChain is an optional - PEM encoded set of certificates used - to verify. - type: string - ctlog: - description: |- - CTLog (certificate timestamp log) provides a configuration for validation of Signed Certificate - Timestamps (SCTs). If the value is unset, the default behavior by Cosign is used. - properties: - ignoreSCT: - description: |- - IgnoreSCT defines whether to use the Signed Certificate Timestamp (SCT) log to check for a certificate - timestamp. Default is false. Set to true if this was opted out during signing. - type: boolean - pubkey: - description: PubKey, if set, is used - to validate SCTs against a custom - source. - type: string - type: object - rekor: - description: |- - Rekor provides configuration for the Rekor transparency log service. If an empty object - is provided the public instance of Rekor (https://rekor.sigstore.dev) is used. - properties: - ignoreTlog: - description: IgnoreTlog skips transparency - log verification. - type: boolean - pubkey: - description: |- - RekorPubKey is an optional PEM-encoded public key to use for a custom Rekor. - If set, this will be used to validate transparency log signatures from a custom Rekor. - type: string - url: - description: URL is the address of - the transparency log. Defaults to - the public Rekor log instance https://rekor.sigstore.dev. - type: string - required: - - url - type: object - type: object - keyless: - description: |- - Keyless is a set of attribute used to verify a Sigstore keyless attestor. - See https://github.com/sigstore/cosign/blob/main/KEYLESS.md. - properties: - additionalExtensions: - additionalProperties: - type: string - description: AdditionalExtensions are - certificate-extensions used for keyless - signing. - type: object - ctlog: - description: |- - CTLog (certificate timestamp log) provides a configuration for validation of Signed Certificate - Timestamps (SCTs). If the value is unset, the default behavior by Cosign is used. - properties: - ignoreSCT: - description: |- - IgnoreSCT defines whether to use the Signed Certificate Timestamp (SCT) log to check for a certificate - timestamp. Default is false. Set to true if this was opted out during signing. - type: boolean - pubkey: - description: PubKey, if set, is used - to validate SCTs against a custom - source. - type: string - type: object - issuer: - description: Issuer is the certificate - issuer used for keyless signing. - type: string - rekor: - description: |- - Rekor provides configuration for the Rekor transparency log service. If an empty object - is provided the public instance of Rekor (https://rekor.sigstore.dev) is used. - properties: - ignoreTlog: - description: IgnoreTlog skips transparency - log verification. - type: boolean - pubkey: - description: |- - RekorPubKey is an optional PEM-encoded public key to use for a custom Rekor. - If set, this will be used to validate transparency log signatures from a custom Rekor. - type: string - url: - description: URL is the address of - the transparency log. Defaults to - the public Rekor log instance https://rekor.sigstore.dev. - type: string - required: - - url - type: object - roots: - description: |- - Roots is an optional set of PEM encoded trusted root certificates. - If not provided, the system roots are used. - type: string - subject: - description: Subject is the verified identity - used for keyless signing, for example - the email address. - type: string - type: object - keys: - description: Keys specifies one or more public - keys. - properties: - ctlog: - description: |- - CTLog (certificate timestamp log) provides a configuration for validation of Signed Certificate - Timestamps (SCTs). If the value is unset, the default behavior by Cosign is used. - properties: - ignoreSCT: - description: |- - IgnoreSCT defines whether to use the Signed Certificate Timestamp (SCT) log to check for a certificate - timestamp. Default is false. Set to true if this was opted out during signing. - type: boolean - pubkey: - description: PubKey, if set, is used - to validate SCTs against a custom - source. - type: string - type: object - kms: - description: |- - KMS provides the URI to the public key stored in a Key Management System. See: - https://github.com/sigstore/cosign/blob/main/KMS.md - type: string - publicKeys: - description: |- - Keys is a set of X.509 public keys used to verify image signatures. The keys can be directly - specified or can be a variable reference to a key specified in a ConfigMap (see - https://kyverno.io/docs/writing-policies/variables/), or reference a standard Kubernetes Secret - elsewhere in the cluster by specifying it in the format "k8s:///". - The named Secret must specify a key `cosign.pub` containing the public key used for - verification, (see https://github.com/sigstore/cosign/blob/main/KMS.md#kubernetes-secret). - When multiple keys are specified each key is processed as a separate staticKey entry - (.attestors[*].entries.keys) within the set of attestors and the count is applied across the keys. - type: string - rekor: - description: |- - Rekor provides configuration for the Rekor transparency log service. If an empty object - is provided the public instance of Rekor (https://rekor.sigstore.dev) is used. - properties: - ignoreTlog: - description: IgnoreTlog skips transparency - log verification. - type: boolean - pubkey: - description: |- - RekorPubKey is an optional PEM-encoded public key to use for a custom Rekor. - If set, this will be used to validate transparency log signatures from a custom Rekor. - type: string - url: - description: URL is the address of - the transparency log. Defaults to - the public Rekor log instance https://rekor.sigstore.dev. - type: string - required: - - url - type: object - secret: - description: Reference to a Secret resource - that contains a public key - properties: - name: - description: Name of the secret. The - provided secret must contain a key - named cosign.pub. - type: string - namespace: - description: Namespace name where - the Secret exists. - type: string - required: - - name - - namespace - type: object - signatureAlgorithm: - default: sha256 - description: Specify signature algorithm - for public keys. Supported values are - sha224, sha256, sha384 and sha512. - type: string - type: object - repository: - description: |- - Repository is an optional alternate OCI repository to use for signatures and attestations that match this rule. - If specified Repository will override other OCI image repository locations for this Attestor. - type: string - type: object - type: array - type: object - type: array - image: - description: Deprecated. Use ImageReferences instead. - type: string - imageReferences: - description: |- - ImageReferences is a list of matching image reference patterns. At least one pattern in the - list must match the image for the rule to apply. Each image reference consists of a registry - address (defaults to docker.io), repository, image, and tag (defaults to latest). - Wildcards ('*' and '?') are allowed. See: https://kubernetes.io/docs/concepts/containers/images. - items: - type: string - type: array - imageRegistryCredentials: - description: ImageRegistryCredentials provides credentials - that will be used for authentication with registry. - properties: - allowInsecureRegistry: - description: AllowInsecureRegistry allows insecure - access to a registry. - type: boolean - providers: - description: |- - Providers specifies a list of OCI Registry names, whose authentication providers are provided. - It can be of one of these values: default,google,azure,amazon,github. - items: - description: ImageRegistryCredentialsProvidersType - provides the list of credential providers required. - enum: - - default - - amazon - - azure - - google - - github - type: string - type: array - secrets: - description: |- - Secrets specifies a list of secrets that are provided for credentials. - Secrets must live in the Kyverno namespace. - items: - type: string - type: array - type: object - issuer: - description: Deprecated. Use KeylessAttestor instead. - type: string - key: - description: Deprecated. Use StaticKeyAttestor instead. - type: string - mutateDigest: - default: true - description: |- - MutateDigest enables replacement of image tags with digests. - Defaults to true. - type: boolean - repository: - description: |- - Repository is an optional alternate OCI repository to use for image signatures and attestations that match this rule. - If specified Repository will override the default OCI image repository configured for the installation. - The repository can also be overridden per Attestor or Attestation. - type: string - required: - default: true - description: Required validates that images are verified - i.e. have matched passed a signature or attestation - check. - type: boolean - roots: - description: Deprecated. Use KeylessAttestor instead. - type: string - subject: - description: Deprecated. Use KeylessAttestor instead. - type: string - type: - description: |- - Type specifies the method of signature validation. The allowed options - are Cosign and Notary. By default Cosign is used if a type is not specified. - enum: - - Cosign - - Notary - type: string - useCache: - default: true - description: UseCache enables caching of image verify - responses for this rule. - type: boolean - verifyDigest: - default: true - description: VerifyDigest validates that images have a - digest. - type: boolean - type: object - type: array - required: - - name - type: object - type: array - schemaValidation: - description: |- - SchemaValidation skips validation checks for policies as well as patched resources. - Optional. The default value is set to "true", it must be set to "false" to disable the validation checks. - type: boolean - useServerSideApply: - description: |- - UseServerSideApply controls whether to use server-side apply for generate rules - If is set to "true" create & update for generate rules will use apply instead of create/update. - Defaults to "false" if not specified. - type: boolean - validationFailureAction: - default: Audit - description: |- - ValidationFailureAction defines if a validation policy rule violation should block - the admission review request (enforce), or allow (audit) the admission review request - and report an error in a policy report. Optional. - Allowed values are audit or enforce. The default value is "Audit". - enum: - - audit - - enforce - - Audit - - Enforce - type: string - validationFailureActionOverrides: - description: |- - ValidationFailureActionOverrides is a Cluster Policy attribute that specifies ValidationFailureAction - namespace-wise. It overrides ValidationFailureAction for the specified namespaces. - items: - properties: - action: - description: ValidationFailureAction defines the policy validation - failure action - enum: - - audit - - enforce - - Audit - - Enforce - type: string - namespaceSelector: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - items: - type: string - type: array - type: object - type: array - webhookTimeoutSeconds: - description: |- - WebhookTimeoutSeconds specifies the maximum time in seconds allowed to apply this policy. - After the configured time expires, the admission request may fail, or may simply ignore the policy results, - based on the failure policy. The default timeout is 10s, the value must be between 1 and 30 seconds. - format: int32 - type: integer - required: - - controlPlaneSelector - type: object - x-kubernetes-validations: - - message: policyName is immutable - rule: has(self.policyName) == has(oldSelf.policyName) - status: - description: SharedUpboundPolicyStatus defines the observed state of the - projected polcies. - properties: - failed: - description: list of provisioning failures. - items: - description: SharedUpboundPolicyProvisioningFailure defines policy - provisioning failure. - properties: - conditions: - description: List of conditions. - items: - description: Condition contains details for one aspect of - the current state of this API Resource. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, - Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - controlPlane: - description: ControlPlane name where the failure occurred. - type: string - required: - - controlPlane - type: object - type: array - x-kubernetes-list-map-keys: - - controlPlane - x-kubernetes-list-type: map - observedGeneration: - description: observed resource generation. - format: int64 - type: integer - provisioned: - description: List of successfully provisioned targets. - items: - description: SharedUpboundPolicyProvisioningSuccess defines policy - provisioning success. - properties: - controlPlane: - description: ControlPlane name where the external secret got - successfully projected. - type: string - required: - - controlPlane - type: object - type: array - x-kubernetes-list-map-keys: - - controlPlane - x-kubernetes-list-type: map - type: object - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.12/references.upbound.io_referencedobjects.yaml b/static/crds/space/v1.12/references.upbound.io_referencedobjects.yaml deleted file mode 100644 index 13c713152..000000000 --- a/static/crds/space/v1.12/references.upbound.io_referencedobjects.yaml +++ /dev/null @@ -1,303 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: referencedobjects.references.upbound.io -spec: - group: references.upbound.io - names: - categories: - - managed - - kubernetes - kind: ReferencedObject - listKind: ReferencedObjectList - plural: referencedobjects - singular: referencedobject - scope: Cluster - versions: - - additionalPrinterColumns: - - jsonPath: .spec.forProvider.manifest.kind - name: KIND - type: string - - jsonPath: .spec.composite.kind - name: KIND - priority: 1 - type: string - - jsonPath: .spec.composite.apiVersion - name: APIVERSION - priority: 2 - type: string - - jsonPath: .spec.composite.name - name: COMPOSITE - priority: 1 - type: string - - jsonPath: .spec.composite.JSONPath - name: JSONPATH - priority: 1 - type: string - - jsonPath: .status.atProvider.manifest.kind - name: REFERENCEKIND - priority: 1 - type: string - - jsonPath: .status.atProvider.manifest.apiVersion - name: REFERENCEAPIVERSION - priority: 2 - type: string - - jsonPath: .status.atProvider.manifest.metadata.name - name: REFERENCEDNAME - priority: 1 - type: string - - jsonPath: .status.atProvider.manifest.metadata.namespace - name: REFERENCEDNAMESPACE - priority: 1 - type: string - - jsonPath: .status.conditions[?(@.type=='Synced')].status - name: SYNCED - type: string - - jsonPath: .status.conditions[?(@.type=='Ready')].status - name: READY - type: string - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: |- - A ReferencedObject represents a Kubernetes object that is referenced by a - claim. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: A ObjectSpec defines the desired state of a Object. - properties: - composite: - description: |- - Composite is the composite object that holds the reference. - The composite must be bound to a claim, that can be local or remote. - properties: - apiVersion: - description: APIVersion of the referencing composite resource. - type: string - jsonPath: - description: |- - JSONPath is where the reference is stored in the composite. - The reference is resolved in the context of the claim. - The JSONPath is expected to end with a object field called `*Ref`, or - an array field called `*Refs`. In the former case, the object field must - have the following schema. In latter case, each item in the array must: - - apiVersion: string - kind: string - name: string - namespace: string # optional, defaults to be a local reference. - UID: string # optional - grants: []string # optional, defaults to ["Observe"], but can hold any of - ["Observe", "Create", "Update", "Delete"]. - claims: []string # optional, additional verbs the user must be authorized to. - - Grants are a closed-set, corresponding to the management policy of - Crossplane. Grants are authorized through RBAC and Upbound ReBAC for the - referenced object when creating or updating a reference. - - Claims are optional verbs the user creating or referencing an object - must be authorized to. These can include custom verbs. - - With grants the user specifies the permissions that the claim gives to the - composite: "Observe" allows the composite to read the object, "Create" to - create it, "Update" to update it, and "Delete" to delete it. - - Claims tell the composition author about additional permissions the user - might have. - - OpenAPI v3 validation and defaulting can be used to restrict and/or - auto-populate the fields. - - A JSONPath must have a counterpart schema in the CompositeResourceDefinition's - references.upbound.io/schema annotation. - minLength: 1 - type: string - kind: - description: Kind of the referencing composite resource. - minLength: 1 - type: string - name: - description: Name of the referencing composite resource. - minLength: 1 - type: string - required: - - apiVersion - - jsonPath - - kind - - name - type: object - deletionPolicy: - default: Delete - description: |- - DeletionPolicy specifies what will happen to the underlying external - when this managed resource is deleted - either "Delete" or "Orphan" the - external resource. - This field is planned to be deprecated in favor of the ManagementPolicies - field in a future release. Currently, both could be set independently and - non-default values would be honored if the feature flag is enabled. - See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 - enum: - - Orphan - - Delete - type: string - forProvider: - description: ForProvider is the object's desired state. - properties: - manifest: - description: Raw JSON representation of the kubernetes object - to be created. - type: object - x-kubernetes-embedded-resource: true - x-kubernetes-preserve-unknown-fields: true - required: - - manifest - type: object - managementPolicies: - default: - - '*' - description: |- - THIS IS A BETA FIELD. It is on by default but can be opted out - through a Crossplane feature flag. - ManagementPolicies specify the array of actions Crossplane is allowed to - take on the managed and external resources. - This field is planned to replace the DeletionPolicy field in a future - release. Currently, both could be set independently and non-default - values would be honored if the feature flag is enabled. If both are - custom, the DeletionPolicy field will be ignored. - See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223 - and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md - items: - description: |- - A ManagementAction represents an action that the Crossplane controllers - can take on an external resource. - enum: - - Observe - - Create - - Update - - Delete - - LateInitialize - - '*' - type: string - type: array - ownerPolicy: - description: |- - OwnerPolicy defines how created objects should be owned. - 'OnCreate' requires management policy 'Create' or '*'. - enum: - - OnCreate - type: string - readiness: - default: {} - description: Readiness defines how the object's readiness condition - should be computed. - properties: - policy: - default: ObjectExists - description: Policy defines how the Object's readiness condition - should be computed. - enum: - - WhenSynced - - ObjectExists - - ObjectReady - - ObjectConditionsAllTrue - type: string - type: object - required: - - composite - type: object - status: - description: A ObjectStatus represents the observed state of a Object. - properties: - atProvider: - description: AtProvider is the object's observed state. - properties: - manifest: - description: Raw JSON representation of the remote object. - type: object - x-kubernetes-embedded-resource: true - x-kubernetes-preserve-unknown-fields: true - type: object - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.12/scheduling.upbound.io_environments.yaml b/static/crds/space/v1.12/scheduling.upbound.io_environments.yaml deleted file mode 100644 index 6e43da173..000000000 --- a/static/crds/space/v1.12/scheduling.upbound.io_environments.yaml +++ /dev/null @@ -1,244 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: environments.scheduling.upbound.io -spec: - group: scheduling.upbound.io - names: - categories: - - scheduling - - envs - kind: Environment - listKind: EnvironmentList - plural: environments - shortNames: - - env - singular: environment - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: Environment specifies where remote claims are scheduled to. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: EnvironmentSpec defines the desired state of Environment. - properties: - dimensions: - additionalProperties: - type: string - description: |- - Dimensions are the label keys and values to select control planes - implementing the desired API resource group. These dimensions apply to - all resource groups uniformly. Per resource group dimensions can - be specified in the resource group configuration. - type: object - resourceGroups: - description: |- - ResourceGroups define label keys and values and potentially other - properties to select a control plane along dimensions for individual - API resource groups. - - A resource group not specified in this list will not be scheduled. - items: - description: ResourceGroup defines the desired scheduling of a resource - group. - properties: - dimensions: - additionalProperties: - type: string - description: |- - Dimensions are the label keys and values to select control planes - implementing the desired API resource group. These are in addition to - the dimensions specified at the .spec level and override them. - type: object - name: - description: Name is the name of the resource group. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - schedule: - description: |- - Schedule defines the scheduling result of the resource group. It is - set by the scheduler and cannot be mutated by the user. But the user - can unset it to allow the scheduler to reschedule the resource group. - - If unset, the resource group will be rescheduled by first setting the - proposed schedule in the status and then setting this field here. - - If no scheduling is possible, the Ready condition will be set to False - with the NoControlPlaneAvailable reason. - - A schedule will never change unless the user unsets it. - items: - description: |- - ResourceSchedule indicates where instances of a resource group are scheduled to, - i.e. by which control plane and in which group and space claims will be - implemented. - properties: - controlPlane: - description: |- - ControlPlane is the name of the control plane in which the resource group is scheduled. - This is empty if the resource group cannot be scheduled. - type: string - group: - description: |- - Group is the name of the group in which the resource group is scheduled. - This is empty if the resource group cannot be scheduled. - type: string - name: - description: Name is the name of the resource group. - minLength: 1 - type: string - space: - description: |- - Space is the name of the space in which the resource group is scheduled. - This is empty if the resource group cannot be scheduled. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map - type: object - status: - description: EnvironmentStatus defines the observed state of the Environment. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - resourceGroups: - description: |- - ProposedSchedule indicates where instances of a resource group according - to the current dimensions are proposed to be scheduled to. - items: - description: ResourceStatus represents the status of one individual - resource group. - properties: - message: - description: Message is a human-readable message for the scheduling - decision. - type: string - name: - description: Name is the name of the resource group. - minLength: 1 - type: string - proposed: - description: |- - Proposed is the proposed schedule for the resource group. This either - matches the schedule in the spec or is a new proposal by the scheduler. - - In the later case, the user can accept the proposal by removing the - existing schedule in the spec for the given resource group. - properties: - controlPlane: - description: |- - ControlPlane is the name of the control plane in which the resource group is scheduled. - This is empty if the resource group cannot be scheduled. - type: string - group: - description: |- - Group is the name of the group in which the resource group is scheduled. - This is empty if the resource group cannot be scheduled. - type: string - space: - description: |- - Space is the name of the space in which the resource group is scheduled. - This is empty if the resource group cannot be scheduled. - type: string - type: object - reason: - description: Reason is a machine-readable reason for the scheduling - decision. - type: string - required: - - name - type: object - type: array - type: object - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.12/spaces.upbound.io_backups.yaml b/static/crds/space/v1.12/spaces.upbound.io_backups.yaml deleted file mode 100644 index 949ca81b8..000000000 --- a/static/crds/space/v1.12/spaces.upbound.io_backups.yaml +++ /dev/null @@ -1,382 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: backups.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - spaces - kind: Backup - listKind: BackupList - plural: backups - singular: backup - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .status.phase - name: Phase - type: string - - jsonPath: .status.retries - name: Retries - type: integer - - jsonPath: .spec.ttl - name: TTL - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: Backup represents a single backup of a ControlPlane. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: BackupSpec defines a backup over a set of ControlPlanes. - properties: - configRef: - description: |- - ConfigRef is a reference to the backup configuration. - ApiGroup is optional and defaults to "spaces.upbound.io". - Kind is required, and the only supported value is "SharedBackupConfig" at - the moment. - Name is required. - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. - type: string - kind: - description: Kind is the type of resource being referenced. - minLength: 1 - type: string - name: - description: Name is the name of resource being referenced. - minLength: 1 - type: string - type: object - x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: backup config ref must be a reference to a SharedBackupConfig - rule: (!has(self.apiGroup) || self.apiGroup == 'spaces.upbound.io') - && self.kind == 'SharedBackupConfig' - - message: backup config ref must have a name - rule: size(self.name) > 0 - controlPlane: - description: |- - ControlPlane is the name of the ControlPlane to backup. - Requires "backup" permission on the referenced ControlPlane. - minLength: 1 - type: string - deletionPolicy: - default: Orphan - description: DeletionPolicy is the policy for the backup. - enum: - - Orphan - - Delete - type: string - excludedResources: - description: |- - ExcludedResources is a slice of resource names that are not - included in the backup. Used to filter the included extra resources. - items: - type: string - type: array - x-kubernetes-list-type: set - ttl: - description: |- - TTL is the time to live for the backup. After this time, the backup - will be eligible for garbage collection. If not set, the backup will - not be garbage collected. - type: string - required: - - configRef - - controlPlane - type: object - x-kubernetes-validations: - - message: backup target controlplane can not be changed after creation - rule: self.controlPlane == oldSelf.controlPlane - - message: backup excluded resources can not be changed after creation - rule: (!has(self.excludedResources) && !has(oldSelf.excludedResources)) - || self.excludedResources == oldSelf.excludedResources - - message: backup config ref can not be changed after creation - rule: self.configRef == oldSelf.configRef - status: - description: BackupStatus represents the observed state of a Backup. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - phase: - default: Pending - description: Phase is the current phase of the backup. - enum: - - Pending - - InProgress - - Failed - - Completed - - Deleted - type: string - retries: - description: Retries is the number of times the backup has been retried. - format: int32 - type: integer - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} - - additionalPrinterColumns: - - jsonPath: .status.phase - name: Phase - type: string - - jsonPath: .status.retries - name: Retries - type: integer - - jsonPath: .spec.ttl - name: TTL - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1beta1 - schema: - openAPIV3Schema: - description: Backup represents a single backup of a ControlPlane. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: BackupSpec defines a backup over a set of ControlPlanes. - properties: - configRef: - description: |- - ConfigRef is a reference to the backup configuration. - ApiGroup is optional and defaults to "spaces.upbound.io". - Kind is required, and the only supported value is "SharedBackupConfig" at - the moment. - Name is required. - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. - type: string - kind: - description: Kind is the type of resource being referenced. - minLength: 1 - type: string - name: - description: Name is the name of resource being referenced. - minLength: 1 - type: string - type: object - x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: backup config ref must be a reference to a SharedBackupConfig - rule: (!has(self.apiGroup) || self.apiGroup == 'spaces.upbound.io') - && self.kind == 'SharedBackupConfig' - - message: backup config ref must have a name - rule: size(self.name) > 0 - controlPlane: - description: |- - ControlPlane is the name of the ControlPlane to backup. - Requires "backup" permission on the referenced ControlPlane. - minLength: 1 - type: string - deletionPolicy: - default: Orphan - description: DeletionPolicy is the policy for the backup. - enum: - - Orphan - - Delete - type: string - excludedResources: - description: |- - ExcludedResources is a slice of resource names that are not - included in the backup. Used to filter the included extra resources. - items: - type: string - type: array - x-kubernetes-list-type: set - ttl: - description: |- - TTL is the time to live for the backup. After this time, the backup - will be eligible for garbage collection. If not set, the backup will - not be garbage collected. - type: string - required: - - configRef - - controlPlane - type: object - x-kubernetes-validations: - - message: backup target controlplane can not be changed after creation - rule: self.controlPlane == oldSelf.controlPlane - - message: backup excluded resources can not be changed after creation - rule: (!has(self.excludedResources) && !has(oldSelf.excludedResources)) - || self.excludedResources == oldSelf.excludedResources - - message: backup config ref can not be changed after creation - rule: self.configRef == oldSelf.configRef - status: - description: BackupStatus represents the observed state of a Backup. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - phase: - default: Pending - description: Phase is the current phase of the backup. - enum: - - Pending - - InProgress - - Failed - - Completed - - Deleted - type: string - retries: - description: Retries is the number of times the backup has been retried. - format: int32 - type: integer - type: object - required: - - spec - type: object - served: true - storage: false - subresources: - status: {} diff --git a/static/crds/space/v1.12/spaces.upbound.io_backupschedules.yaml b/static/crds/space/v1.12/spaces.upbound.io_backupschedules.yaml deleted file mode 100644 index 68db3d5e8..000000000 --- a/static/crds/space/v1.12/spaces.upbound.io_backupschedules.yaml +++ /dev/null @@ -1,408 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: backupschedules.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - spaces - kind: BackupSchedule - listKind: BackupScheduleList - plural: backupschedules - singular: backupschedule - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .spec.schedule - name: Schedule - type: string - - jsonPath: .status.lastBackup - name: LastBackup - type: date - - jsonPath: .spec.ttl - name: TTL - type: string - - jsonPath: .spec.controlPlane - name: ControlPlane - type: string - - jsonPath: .spec.suspend - name: Suspended - type: boolean - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: BackupSchedule represents a single ControlPlane schedule for - Backups. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: BackupScheduleSpec defines a backup schedule over a set of - ControlPlanes. - properties: - configRef: - description: |- - ConfigRef is a reference to the backup configuration. - ApiGroup is optional and defaults to "spaces.upbound.io". - Kind is required, and the only supported value is "SharedBackupConfig" at - the moment. - Name is required. - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. - type: string - kind: - description: Kind is the type of resource being referenced. - minLength: 1 - type: string - name: - description: Name is the name of resource being referenced. - minLength: 1 - type: string - type: object - x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: backup config ref must be a reference to a SharedBackupConfig - rule: (!has(self.apiGroup) || self.apiGroup == 'spaces.upbound.io') - && self.kind == 'SharedBackupConfig' - - message: backup config ref must have a name - rule: size(self.name) > 0 - controlPlane: - description: |- - ControlPlane is the name of the ControlPlane to which the schedule - applies. - Requires "get" permission on the referenced ControlPlane. - type: string - x-kubernetes-validations: - - message: target can not be changed after creation - rule: self == oldSelf - deletionPolicy: - default: Orphan - description: DeletionPolicy is the policy for the backup. - enum: - - Orphan - - Delete - type: string - excludedResources: - description: |- - ExcludedResources is a slice of resource names that are not - included in the backup. Used to filter the included extra resources. - items: - type: string - type: array - x-kubernetes-list-type: set - schedule: - description: Schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - minLength: 1 - type: string - suspend: - description: |- - Suspend specifies whether the schedule is suspended. If true, no - Backups will be created, but running backups will be allowed to - complete. - type: boolean - ttl: - description: |- - TTL is the time to live for the backup. After this time, the backup - will be eligible for garbage collection. If not set, the backup will - not be garbage collected. - type: string - useOwnerReferencesInBackup: - description: |- - UseOwnerReferencesBackup specifies whether an ownership chain should be - established between this resource and the Backup it creates. - If set to true, the Backup will be garbage collected when this resource - is deleted. - type: boolean - required: - - configRef - - controlPlane - - schedule - type: object - status: - description: BackupScheduleStatus represents the observed state of a BackupSchedule. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - lastBackup: - description: |- - LastBackup is the last time a Backup was run for this - Schedule schedule - format: date-time - type: string - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} - - additionalPrinterColumns: - - jsonPath: .spec.schedule - name: Schedule - type: string - - jsonPath: .status.lastBackup - name: LastBackup - type: date - - jsonPath: .spec.ttl - name: TTL - type: string - - jsonPath: .spec.controlPlane - name: ControlPlane - type: string - - jsonPath: .spec.suspend - name: Suspended - type: boolean - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1beta1 - schema: - openAPIV3Schema: - description: BackupSchedule represents a single ControlPlane schedule for - Backups. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: BackupScheduleSpec defines a backup schedule over a set of - ControlPlanes. - properties: - configRef: - description: |- - ConfigRef is a reference to the backup configuration. - ApiGroup is optional and defaults to "spaces.upbound.io". - Kind is required, and the only supported value is "SharedBackupConfig" at - the moment. - Name is required. - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. - type: string - kind: - description: Kind is the type of resource being referenced. - minLength: 1 - type: string - name: - description: Name is the name of resource being referenced. - minLength: 1 - type: string - type: object - x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: backup config ref must be a reference to a SharedBackupConfig - rule: (!has(self.apiGroup) || self.apiGroup == 'spaces.upbound.io') - && self.kind == 'SharedBackupConfig' - - message: backup config ref must have a name - rule: size(self.name) > 0 - controlPlane: - description: |- - ControlPlane is the name of the ControlPlane to which the schedule - applies. - Requires "get" permission on the referenced ControlPlane. - type: string - x-kubernetes-validations: - - message: target can not be changed after creation - rule: self == oldSelf - deletionPolicy: - default: Orphan - description: DeletionPolicy is the policy for the backup. - enum: - - Orphan - - Delete - type: string - excludedResources: - description: |- - ExcludedResources is a slice of resource names that are not - included in the backup. Used to filter the included extra resources. - items: - type: string - type: array - x-kubernetes-list-type: set - schedule: - description: Schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - minLength: 1 - type: string - suspend: - description: |- - Suspend specifies whether the schedule is suspended. If true, no - Backups will be created, but running backups will be allowed to - complete. - type: boolean - ttl: - description: |- - TTL is the time to live for the backup. After this time, the backup - will be eligible for garbage collection. If not set, the backup will - not be garbage collected. - type: string - useOwnerReferencesInBackup: - description: |- - UseOwnerReferencesBackup specifies whether an ownership chain should be - established between this resource and the Backup it creates. - If set to true, the Backup will be garbage collected when this resource - is deleted. - type: boolean - required: - - configRef - - controlPlane - - schedule - type: object - status: - description: BackupScheduleStatus represents the observed state of a BackupSchedule. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - lastBackup: - description: |- - LastBackup is the last time a Backup was run for this - Schedule schedule - format: date-time - type: string - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - type: object - required: - - spec - type: object - served: true - storage: false - subresources: - status: {} diff --git a/static/crds/space/v1.12/spaces.upbound.io_controlplanes.yaml b/static/crds/space/v1.12/spaces.upbound.io_controlplanes.yaml deleted file mode 100644 index 96eec2a60..000000000 --- a/static/crds/space/v1.12/spaces.upbound.io_controlplanes.yaml +++ /dev/null @@ -1,312 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: controlplanes.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - spaces - kind: ControlPlane - listKind: ControlPlaneList - plural: controlplanes - shortNames: - - ctp - - ctps - singular: controlplane - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .spec.crossplane.version - name: Crossplane - type: string - - jsonPath: .status.conditions[?(@.type=='Ready')].status - name: Ready - type: string - - jsonPath: .status.message - name: Message - type: string - - jsonPath: .spec.class - name: Class - priority: 1 - type: string - - jsonPath: .status.size.resourceUsage.cpu - name: CPU Usage - priority: 1 - type: string - - jsonPath: .status.size.resourceUsage.memory - name: Memory Usage - priority: 1 - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1beta1 - schema: - openAPIV3Schema: - description: ControlPlane defines a managed Crossplane instance. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: A ControlPlaneSpec represents the desired state of the ControlPlane. - properties: - class: - default: default - description: |- - [[GATE:EnableControlPlaneClasses]] - Class specifies the class of the control plane. This affects the - control plane sizing, including component replicas and resource - requirements. There are multiple predefined classes, with "default" - being the standard Spaces control plane without any additional class - configuration. Check the Upbound Cloud documentation for a list of all - available classes. Defaults to "default". - type: string - x-kubernetes-validations: - - message: class is immutable - rule: self == oldSelf - crossplane: - description: Crossplane defines the configuration for Crossplane. - properties: - autoUpgrade: - default: - channel: Stable - description: AutoUpgrades defines the auto upgrade configuration - for Crossplane. - properties: - channel: - default: Stable - description: |- - Channel defines the upgrade channels for Crossplane. We support the following channels where 'Stable' is the - default: - - None: disables auto-upgrades and keeps the control plane at its current version of Crossplane. - - Patch: automatically upgrades the control plane to the latest supported patch version when it - becomes available while keeping the minor version the same. - - Stable: automatically upgrades the control plane to the latest supported patch release on minor - version N-1, where N is the latest supported minor version. - - Rapid: automatically upgrades the cluster to the latest supported patch release on the latest - supported minor version. - enum: - - None - - Patch - - Stable - - Rapid - type: string - type: object - state: - default: Running - description: |- - State defines the state for crossplane and provider workloads. We support - the following states where 'Running' is the default: - - Running: Starts/Scales up all crossplane and provider workloads in the ControlPlane - - Paused: Pauses/Scales down all crossplane and provider workloads in the ControlPlane - enum: - - Running - - Paused - type: string - version: - description: Version is the version of Universal Crossplane to - install. - type: string - x-kubernetes-validations: - - message: The version must not start with a leading 'v' - rule: (self.matches('^[^v].*')) - type: object - restore: - description: |- - [[GATE:EnableSharedBackup]] THIS IS AN ALPHA FIELD. Do not use it in production. - Restore specifies details about the control planes restore configuration. - properties: - finishedAt: - description: |- - FinishedAt is the time at which the control plane was restored, it's not - meant to be set by the user, but rather by the system when the control - plane is restored. - format: date-time - type: string - source: - description: |- - Source of the Backup or BackupSchedule to restore from. - Require "restore" permission on the referenced Backup or BackupSchedule. - ApiGroup is optional and defaults to "spaces.upbound.io". - Kind is required, and the only supported kinds are Backup and - BackupSchedule at the moment. - Name is required. - properties: - apiGroup: - description: APIGroup is the group for the resource being - referenced. - type: string - kind: - description: Kind is the type of resource being referenced. - minLength: 1 - type: string - name: - description: Name is the name of resource being referenced. - minLength: 1 - type: string - type: object - x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: source must be a reference to a Backup or BackupSchedule - (v1alpha1) - rule: (!has(self.apiGroup) || self.apiGroup == 'spaces.upbound.io') - && (self.kind == 'Backup' || self.kind == 'BackupSchedule') - - message: source is immutable - rule: oldSelf == self - required: - - source - type: object - x-kubernetes-validations: - - message: finishedAt is immutable once set - rule: '!has(oldSelf.finishedAt) || oldSelf.finishedAt == self.finishedAt' - writeConnectionSecretToRef: - description: |- - WriteConnectionSecretToReference specifies the namespace and name of a - Secret to which any connection details for this managed resource should - be written. Connection details frequently include the endpoint, username, - and password required to connect to the managed resource. - This field is planned to be replaced in a future release in favor of - PublishConnectionDetailsTo. Currently, both could be set independently - and connection details would be published to both without affecting - each other. - - If omitted, it is defaulted to the namespace of the ControlPlane. - Deprecated: Use Hub or Upbound identities instead. - properties: - name: - description: Name of the secret. - type: string - namespace: - description: |- - Namespace of the secret. If omitted, it is equal to - the namespace of the resource containing this reference as a field. - type: string - required: - - name - type: object - type: object - x-kubernetes-validations: - - message: '[[GATE:EnableSharedBackup]] restore source can not be unset' - rule: '!has(oldSelf.restore) || has(self.restore)' - - message: '[[GATE:EnableSharedBackup]] restore source can not be set - after creation' - rule: has(oldSelf.restore) || !has(self.restore) - - message: '"version" cannot be empty when upgrade channel is "None"' - rule: '!has(self.crossplane.autoUpgrade) || self.crossplane.autoUpgrade.channel - != "None" || self.crossplane.version != ""' - status: - description: A ControlPlaneStatus represents the observed state of a ControlPlane. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - controlPlaneID: - type: string - firstAvailableAt: - description: FirstAvailableAt is the time at which the control plane - was available for the first time. - format: date-time - type: string - message: - description: |- - Message is a human-readable message indicating details about why the - ControlPlane is in this condition. - type: string - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - size: - description: |- - [[GATE:EnableControlPlaneClasses]] - Size holds the status information about the control plane size, - including resource usage. - properties: - resourceUsage: - description: ResourceUsage represents the resource limits and - consumption of the control plane. - properties: - cpu: - description: CPU represents the CPU resource usage. - type: string - memory: - description: Memory represents the memory resource usage. - type: string - type: object - type: object - type: object - required: - - spec - type: object - x-kubernetes-validations: - - message: control plane name cannot exceed 63 characters - rule: self.metadata.name.size() <= 63 - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.12/spaces.upbound.io_incontrolplaneoverrides.yaml b/static/crds/space/v1.12/spaces.upbound.io_incontrolplaneoverrides.yaml deleted file mode 100644 index 295fc05d0..000000000 --- a/static/crds/space/v1.12/spaces.upbound.io_incontrolplaneoverrides.yaml +++ /dev/null @@ -1,256 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: incontrolplaneoverrides.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - spaces - kind: InControlPlaneOverride - listKind: InControlPlaneOverrideList - plural: incontrolplaneoverrides - singular: incontrolplaneoverride - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .status.conditions[?(@.type=='Synced')].status - name: SYNCED - type: string - - jsonPath: .status.conditions[?(@.type=='Ready')].status - name: READY - type: string - name: v1alpha1 - schema: - openAPIV3Schema: - description: |- - InControlPlaneOverride represents resource configuration overrides in - a ControlPlane. The specified override can be applied on single objects - as well as claim/XR object hierarchies. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: |- - InControlPlaneOverrideSpec defines a configuration override - on a target object hierarchy in a target ControlPlane with the - given name. - properties: - controlPlaneName: - description: |- - ControlPlaneName is the name of the target ControlPlane where - the resource configuration overrides will be applied. - minLength: 1 - type: string - x-kubernetes-validations: - - message: controlPlaneName is immutable - rule: self == oldSelf - deletionPolicy: - default: RollBack - description: |- - DeletionPolicy specifies whether when the InControlPlaneOverride object - is deleted, the configuration override should be kept (Keep) or - rolled back (RollBack). - enum: - - RollBack - - Keep - type: string - override: - description: |- - Override denotes the configuration override to be applied on the target - object hierarchy. The fully specified intent is obtained by serializing - the Override. - properties: - metadata: - description: Metadata specifies the patch metadata. - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations represents the Kube object annotations. - Only the following annotations are allowed to be patched: - - crossplane.io/paused - - spaces.upbound.io/force-reconcile-at - type: object - x-kubernetes-validations: - - message: Only the crossplane.io/paused and spaces.upbound.io/force-reconcile-at - annotations are allowed - rule: self.all(k, k == 'crossplane.io/paused' || k == 'spaces.upbound.io/force-reconcile-at') - type: object - type: object - propagationPolicy: - default: None - description: |- - PropagationPolicy specifies whether the configuration override will be - applied only to the object referenced in TargetRef (None), after an - ascending or descending hierarchy traversal will be done starting with - the target object. - enum: - - None - - Ascending - - Descending - type: string - targetRef: - description: |- - TargetRef is the object reference to a Kubernetes API object where the - configuration override will start. The controller will traverse the - target object's hierarchy depending on the PropagationPolicy. If - PropagationPolicy is None, then only the target object will be updated. - properties: - apiVersion: - description: APIVersion of the referenced object. - minLength: 1 - type: string - kind: - description: Kind of the referenced object. - minLength: 1 - type: string - name: - description: Name of the referenced object. - minLength: 1 - type: string - namespace: - description: Namespace of the referenced object. - type: string - required: - - apiVersion - - kind - - name - type: object - required: - - controlPlaneName - - override - - targetRef - type: object - status: - description: |- - InControlPlaneOverrideStatus defines the status of an InControlPlaneOverride - object. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - objectRefs: - items: - description: |- - PatchedObjectStatus represents the state of an applied patch to an object - in the target hierarchy. - properties: - apiVersion: - description: APIVersion of the referenced object. - minLength: 1 - type: string - kind: - description: Kind of the referenced object. - minLength: 1 - type: string - message: - description: Message holds an optional detail message detailing - the observed state. - type: string - name: - description: Name of the referenced object. - minLength: 1 - type: string - namespace: - description: Namespace of the referenced object. - type: string - reason: - description: Reason is the reason for the target objects override - Status. - type: string - status: - description: Status of the configuration override. - enum: - - Success - - Skipped - - Error - type: string - uid: - description: Metadata UID of the patch target object. - type: string - required: - - apiVersion - - kind - - name - - reason - - status - type: object - type: array - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.12/spaces.upbound.io_sharedbackupconfigs.yaml b/static/crds/space/v1.12/spaces.upbound.io_sharedbackupconfigs.yaml deleted file mode 100644 index e0c258f9d..000000000 --- a/static/crds/space/v1.12/spaces.upbound.io_sharedbackupconfigs.yaml +++ /dev/null @@ -1,268 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: sharedbackupconfigs.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - spaces - kind: SharedBackupConfig - listKind: SharedBackupConfigList - plural: sharedbackupconfigs - singular: sharedbackupconfig - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .spec.objectStorage.provider - name: Provider - type: string - - jsonPath: .spec.objectStorage.bucket - name: Bucket - type: string - - jsonPath: .spec.objectStorage.credentials.source - name: Auth - type: string - - jsonPath: .spec.objectStorage.credentials.secretRef.name - name: Secret - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: SharedBackupConfig defines the configuration to backup and restore - ControlPlanes. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: |- - A SharedBackupConfigSpec represents the configuration to backup or restore - ControlPlanes. - properties: - objectStorage: - description: ObjectStorage specifies the object storage configuration - for the given provider. - properties: - bucket: - description: Bucket is the name of the bucket to store backups - in. - minLength: 1 - type: string - config: - description: |- - Config is a free-form map of configuration options for the object storage provider. - See https://github.com/thanos-io/objstore?tab=readme-ov-file for more - information on the formats for each supported cloud provider. Bucket and - Provider will override the required values in the config. - type: object - x-kubernetes-preserve-unknown-fields: true - credentials: - description: Credentials specifies the credentials to access the - object storage. - properties: - secretRef: - description: |- - A SecretRef is a reference to a secret key that contains the credentials - that must be used to connect to the provider. - properties: - key: - default: credentials - description: The key to select. - type: string - name: - description: Name of the secret. - type: string - required: - - key - - name - type: object - source: - description: |- - Source of the credentials. - Source "Secret" requires "get" permissions on the referenced Secret. - enum: - - Secret - - InjectedIdentity - type: string - required: - - source - type: object - prefix: - description: |- - Prefix is the prefix to use for all backups using this - SharedBackupConfig, e.g. "prod/cluster1", resulting in backups for - controlplane "ctp1" in namespace "ns1" being stored in - "prod/cluster1/ns1/ctp1". - type: string - provider: - description: Provider is the name of the object storage provider. - enum: - - AWS - - Azure - - GCP - type: string - required: - - bucket - - credentials - - provider - type: object - x-kubernetes-validations: - - message: credentials.secretRef.name must be set when source is Secret - rule: self.credentials.source != 'Secret' || (has(self.credentials.secretRef) - && has(self.credentials.secretRef.name)) - required: - - objectStorage - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} - - additionalPrinterColumns: - - jsonPath: .spec.objectStorage.provider - name: Provider - type: string - - jsonPath: .spec.objectStorage.bucket - name: Bucket - type: string - - jsonPath: .spec.objectStorage.credentials.source - name: Auth - type: string - - jsonPath: .spec.objectStorage.credentials.secretRef.name - name: Secret - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1beta1 - schema: - openAPIV3Schema: - description: SharedBackupConfig defines the configuration to backup and restore - ControlPlanes. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: |- - A SharedBackupConfigSpec represents the configuration to backup or restore - ControlPlanes. - properties: - objectStorage: - description: ObjectStorage specifies the object storage configuration - for the given provider. - properties: - bucket: - description: Bucket is the name of the bucket to store backups - in. - minLength: 1 - type: string - config: - description: |- - Config is a free-form map of configuration options for the object storage provider. - See https://github.com/thanos-io/objstore?tab=readme-ov-file for more - information on the formats for each supported cloud provider. Bucket and - Provider will override the required values in the config. - type: object - x-kubernetes-preserve-unknown-fields: true - credentials: - description: Credentials specifies the credentials to access the - object storage. - properties: - secretRef: - description: |- - A SecretRef is a reference to a secret key that contains the credentials - that must be used to connect to the provider. - properties: - key: - default: credentials - description: The key to select. - type: string - name: - description: Name of the secret. - type: string - required: - - key - - name - type: object - source: - description: |- - Source of the credentials. - Source "Secret" requires "get" permissions on the referenced Secret. - enum: - - Secret - - InjectedIdentity - type: string - required: - - source - type: object - prefix: - description: |- - Prefix is the prefix to use for all backups using this - SharedBackupConfig, e.g. "prod/cluster1", resulting in backups for - controlplane "ctp1" in namespace "ns1" being stored in - "prod/cluster1/ns1/ctp1". - type: string - provider: - description: Provider is the name of the object storage provider. - enum: - - AWS - - Azure - - GCP - type: string - required: - - bucket - - credentials - - provider - type: object - x-kubernetes-validations: - - message: credentials.secretRef.name must be set when source is Secret - rule: self.credentials.source != 'Secret' || (has(self.credentials.secretRef) - && has(self.credentials.secretRef.name)) - required: - - objectStorage - type: object - required: - - spec - type: object - served: true - storage: false - subresources: - status: {} diff --git a/static/crds/space/v1.12/spaces.upbound.io_sharedbackups.yaml b/static/crds/space/v1.12/spaces.upbound.io_sharedbackups.yaml deleted file mode 100644 index 8fe41fc8f..000000000 --- a/static/crds/space/v1.12/spaces.upbound.io_sharedbackups.yaml +++ /dev/null @@ -1,564 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: sharedbackups.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - spaces - kind: SharedBackup - listKind: SharedBackupList - plural: sharedbackups - singular: sharedbackup - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.annotations.sharedbackup\.internal\.spaces\.upbound\.io/completed - name: Completed - type: string - - jsonPath: .metadata.annotations.sharedbackup\.internal\.spaces\.upbound\.io/failed - name: Failed - type: string - - jsonPath: .metadata.annotations.sharedbackup\.internal\.spaces\.upbound\.io/provisioned - name: Provisioned - type: string - - jsonPath: .status.phase - name: Phase - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: SharedBackup defines a backup over a set of ControlPlanes. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: SharedBackupSpec defines a backup over a set of ControlPlanes. - properties: - configRef: - description: |- - ConfigRef is a reference to the backup configuration. - ApiGroup is optional and defaults to "spaces.upbound.io". - Kind is required, and the only supported value is "SharedBackupConfig" at - the moment. - Name is required. - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. - type: string - kind: - description: Kind is the type of resource being referenced. - minLength: 1 - type: string - name: - description: Name is the name of resource being referenced. - minLength: 1 - type: string - type: object - x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: backup config ref must be a reference to a SharedBackupConfig - rule: (!has(self.apiGroup) || self.apiGroup == 'spaces.upbound.io') - && self.kind == 'SharedBackupConfig' - - message: backup config ref must have a name - rule: size(self.name) > 0 - controlPlaneSelector: - description: |- - ControlPlaneSelector defines the selector for ControlPlanes to backup. - Requires "backup" permission on all ControlPlanes in the same namespace. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: either names or a labelSelector must be specified - rule: (has(self.labelSelectors) || has(self.names)) && (size(self.labelSelectors) - > 0 || size(self.names) > 0) - deletionPolicy: - default: Orphan - description: DeletionPolicy is the policy for the backup. - enum: - - Orphan - - Delete - type: string - excludedResources: - description: |- - ExcludedResources is a slice of resource names that are not - included in the backup. Used to filter the included extra resources. - items: - type: string - type: array - x-kubernetes-list-type: set - ttl: - description: |- - TTL is the time to live for the backup. After this time, the backup - will be eligible for garbage collection. If not set, the backup will - not be garbage collected. - type: string - useOwnerReferencesInBackup: - description: |- - UseOwnerReferencesBackup specifies whether an ownership chain should be - established between this resource and the Backup it creates. - If set to true, the Backup will be garbage collected when this resource - is deleted. - type: boolean - required: - - configRef - - controlPlaneSelector - type: object - x-kubernetes-validations: - - message: shared backup ControlPlane selectors can not be changed after - creation - rule: self.controlPlaneSelector == oldSelf.controlPlaneSelector - - message: shared backup excluded resources can not be changed after creation - rule: (!has(self.excludedResources) && !has(oldSelf.excludedResources)) - || self.excludedResources == oldSelf.excludedResources - status: - description: SharedBackupStatus represents the observed state of a SharedBackup. - properties: - completed: - description: Completed is the list of ControlPlanes for which the - backup completed successfully. - items: - type: string - type: array - x-kubernetes-list-type: set - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - failed: - description: Failed is the list of ControlPlanes for which the backup - failed. - items: - type: string - type: array - x-kubernetes-list-type: set - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - phase: - default: Pending - description: Phase represents the current phase of the SharedBackup. - enum: - - Pending - - InProgress - - Failed - - Completed - type: string - selectedControlPlanes: - description: SelectedControlPlanes represents the names of the selected - ControlPlanes. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} - - additionalPrinterColumns: - - jsonPath: .metadata.annotations.sharedbackup\.internal\.spaces\.upbound\.io/completed - name: Completed - type: string - - jsonPath: .metadata.annotations.sharedbackup\.internal\.spaces\.upbound\.io/failed - name: Failed - type: string - - jsonPath: .metadata.annotations.sharedbackup\.internal\.spaces\.upbound\.io/provisioned - name: Provisioned - type: string - - jsonPath: .status.phase - name: Phase - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1beta1 - schema: - openAPIV3Schema: - description: SharedBackup defines a backup over a set of ControlPlanes. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: SharedBackupSpec defines a backup over a set of ControlPlanes. - properties: - configRef: - description: |- - ConfigRef is a reference to the backup configuration. - ApiGroup is optional and defaults to "spaces.upbound.io". - Kind is required, and the only supported value is "SharedBackupConfig" at - the moment. - Name is required. - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. - type: string - kind: - description: Kind is the type of resource being referenced. - minLength: 1 - type: string - name: - description: Name is the name of resource being referenced. - minLength: 1 - type: string - type: object - x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: backup config ref must be a reference to a SharedBackupConfig - rule: (!has(self.apiGroup) || self.apiGroup == 'spaces.upbound.io') - && self.kind == 'SharedBackupConfig' - - message: backup config ref must have a name - rule: size(self.name) > 0 - controlPlaneSelector: - description: |- - ControlPlaneSelector defines the selector for ControlPlanes to backup. - Requires "backup" permission on all ControlPlanes in the same namespace. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: either names or a labelSelector must be specified - rule: (has(self.labelSelectors) || has(self.names)) && (size(self.labelSelectors) - > 0 || size(self.names) > 0) - deletionPolicy: - default: Orphan - description: DeletionPolicy is the policy for the backup. - enum: - - Orphan - - Delete - type: string - excludedResources: - description: |- - ExcludedResources is a slice of resource names that are not - included in the backup. Used to filter the included extra resources. - items: - type: string - type: array - x-kubernetes-list-type: set - ttl: - description: |- - TTL is the time to live for the backup. After this time, the backup - will be eligible for garbage collection. If not set, the backup will - not be garbage collected. - type: string - useOwnerReferencesInBackup: - description: |- - UseOwnerReferencesBackup specifies whether an ownership chain should be - established between this resource and the Backup it creates. - If set to true, the Backup will be garbage collected when this resource - is deleted. - type: boolean - required: - - configRef - - controlPlaneSelector - type: object - x-kubernetes-validations: - - message: shared backup ControlPlane selectors can not be changed after - creation - rule: self.controlPlaneSelector == oldSelf.controlPlaneSelector - - message: shared backup excluded resources can not be changed after creation - rule: (!has(self.excludedResources) && !has(oldSelf.excludedResources)) - || self.excludedResources == oldSelf.excludedResources - status: - description: SharedBackupStatus represents the observed state of a SharedBackup. - properties: - completed: - description: Completed is the list of ControlPlanes for which the - backup completed successfully. - items: - type: string - type: array - x-kubernetes-list-type: set - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - failed: - description: Failed is the list of ControlPlanes for which the backup - failed. - items: - type: string - type: array - x-kubernetes-list-type: set - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - phase: - default: Pending - description: Phase represents the current phase of the SharedBackup. - enum: - - Pending - - InProgress - - Failed - - Completed - type: string - selectedControlPlanes: - description: SelectedControlPlanes represents the names of the selected - ControlPlanes. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - required: - - spec - type: object - served: true - storage: false - subresources: - status: {} diff --git a/static/crds/space/v1.12/spaces.upbound.io_sharedbackupschedules.yaml b/static/crds/space/v1.12/spaces.upbound.io_sharedbackupschedules.yaml deleted file mode 100644 index 541be0aee..000000000 --- a/static/crds/space/v1.12/spaces.upbound.io_sharedbackupschedules.yaml +++ /dev/null @@ -1,528 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: sharedbackupschedules.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - spaces - kind: SharedBackupSchedule - listKind: SharedBackupScheduleList - plural: sharedbackupschedules - singular: sharedbackupschedule - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .spec.schedule - name: Schedule - type: string - - jsonPath: .spec.suspend - name: Suspended - type: boolean - - jsonPath: .metadata.annotations.sharedbackupschedule\.internal\.spaces\.upbound\.io/provisioned-total - name: Provisioned - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: |- - SharedBackupSchedule defines a schedule for SharedBackup on a set of - ControlPlanes. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: SharedBackupScheduleSpec defines the desired state of a SharedBackupSchedule. - properties: - configRef: - description: |- - ConfigRef is a reference to the backup configuration. - ApiGroup is optional and defaults to "spaces.upbound.io". - Kind is required, and the only supported value is "SharedBackupConfig" at - the moment. - Name is required. - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. - type: string - kind: - description: Kind is the type of resource being referenced. - minLength: 1 - type: string - name: - description: Name is the name of resource being referenced. - minLength: 1 - type: string - type: object - x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: backup config ref must be a reference to a SharedBackupConfig - rule: (!has(self.apiGroup) || self.apiGroup == 'spaces.upbound.io') - && self.kind == 'SharedBackupConfig' - - message: backup config ref must have a name - rule: size(self.name) > 0 - controlPlaneSelector: - description: |- - ControlPlaneSelector defines the selector for ControlPlanes to backup. - Requires "backup" permission on all ControlPlanes in the same namespace. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: either names or a labelSelector must be specified - rule: (has(self.labelSelectors) || has(self.names)) && (size(self.labelSelectors) - > 0 || size(self.names) > 0) - deletionPolicy: - default: Orphan - description: DeletionPolicy is the policy for the backup. - enum: - - Orphan - - Delete - type: string - excludedResources: - description: |- - ExcludedResources is a slice of resource names that are not - included in the backup. Used to filter the included extra resources. - items: - type: string - type: array - x-kubernetes-list-type: set - schedule: - description: Schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - minLength: 1 - type: string - suspend: - description: |- - Suspend specifies whether the schedule is suspended. If true, no - Backups will be created, but running backups will be allowed to - complete. - type: boolean - ttl: - description: |- - TTL is the time to live for the backup. After this time, the backup - will be eligible for garbage collection. If not set, the backup will - not be garbage collected. - type: string - useOwnerReferencesInBackup: - description: |- - UseOwnerReferencesBackup specifies whether an ownership chain should be - established between this resource and the Backup it creates. - If set to true, the Backup will be garbage collected when this resource - is deleted. - type: boolean - required: - - configRef - - controlPlaneSelector - - schedule - type: object - status: - description: SharedBackupScheduleStatus represents the observed state - of a SharedBackupSchedule. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - selectedControlPlanes: - description: |- - SelectedControlPlanes is the list of ControlPlanes that are selected - for backup. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} - - additionalPrinterColumns: - - jsonPath: .spec.schedule - name: Schedule - type: string - - jsonPath: .spec.suspend - name: Suspended - type: boolean - - jsonPath: .metadata.annotations.sharedbackupschedule\.internal\.spaces\.upbound\.io/provisioned-total - name: Provisioned - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1beta1 - schema: - openAPIV3Schema: - description: |- - SharedBackupSchedule defines a schedule for SharedBackup on a set of - ControlPlanes. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: SharedBackupScheduleSpec defines the desired state of a SharedBackupSchedule. - properties: - configRef: - description: |- - ConfigRef is a reference to the backup configuration. - ApiGroup is optional and defaults to "spaces.upbound.io". - Kind is required, and the only supported value is "SharedBackupConfig" at - the moment. - Name is required. - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. - type: string - kind: - description: Kind is the type of resource being referenced. - minLength: 1 - type: string - name: - description: Name is the name of resource being referenced. - minLength: 1 - type: string - type: object - x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: backup config ref must be a reference to a SharedBackupConfig - rule: (!has(self.apiGroup) || self.apiGroup == 'spaces.upbound.io') - && self.kind == 'SharedBackupConfig' - - message: backup config ref must have a name - rule: size(self.name) > 0 - controlPlaneSelector: - description: |- - ControlPlaneSelector defines the selector for ControlPlanes to backup. - Requires "backup" permission on all ControlPlanes in the same namespace. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: either names or a labelSelector must be specified - rule: (has(self.labelSelectors) || has(self.names)) && (size(self.labelSelectors) - > 0 || size(self.names) > 0) - deletionPolicy: - default: Orphan - description: DeletionPolicy is the policy for the backup. - enum: - - Orphan - - Delete - type: string - excludedResources: - description: |- - ExcludedResources is a slice of resource names that are not - included in the backup. Used to filter the included extra resources. - items: - type: string - type: array - x-kubernetes-list-type: set - schedule: - description: Schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - minLength: 1 - type: string - suspend: - description: |- - Suspend specifies whether the schedule is suspended. If true, no - Backups will be created, but running backups will be allowed to - complete. - type: boolean - ttl: - description: |- - TTL is the time to live for the backup. After this time, the backup - will be eligible for garbage collection. If not set, the backup will - not be garbage collected. - type: string - useOwnerReferencesInBackup: - description: |- - UseOwnerReferencesBackup specifies whether an ownership chain should be - established between this resource and the Backup it creates. - If set to true, the Backup will be garbage collected when this resource - is deleted. - type: boolean - required: - - configRef - - controlPlaneSelector - - schedule - type: object - status: - description: SharedBackupScheduleStatus represents the observed state - of a SharedBackupSchedule. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - selectedControlPlanes: - description: |- - SelectedControlPlanes is the list of ControlPlanes that are selected - for backup. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - required: - - spec - type: object - served: true - storage: false - subresources: - status: {} diff --git a/static/crds/space/v1.12/spaces.upbound.io_sharedexternalsecrets.yaml b/static/crds/space/v1.12/spaces.upbound.io_sharedexternalsecrets.yaml deleted file mode 100644 index 00c2dd3ab..000000000 --- a/static/crds/space/v1.12/spaces.upbound.io_sharedexternalsecrets.yaml +++ /dev/null @@ -1,745 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: sharedexternalsecrets.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - externalsecrets - kind: SharedExternalSecret - listKind: SharedExternalSecretList - plural: sharedexternalsecrets - shortNames: - - ses - singular: sharedexternalsecret - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.annotations.sharedexternalsecrets\.internal\.spaces\.upbound\.io/provisioned-total - name: Provisioned - type: string - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: |- - SharedExternalSecret specifies a shared ExternalSecret projected into the specified - ControlPlanes of the same namespace as ClusterExternalSecret and with that - propagated into the specified namespaces. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: SharedExternalSecretSpec defines the desired state of SharedExternalSecret. - properties: - controlPlaneSelector: - description: |- - The secret is projected only to control planes - matching the provided selector. Either names or a labelSelector must be specified. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: either names or a labelSelector must be specified - rule: (has(self.labelSelectors) || has(self.names)) && (size(self.labelSelectors) - > 0 || size(self.names) > 0) - externalSecretMetadata: - description: The metadata of the secret store to be created. - properties: - annotations: - additionalProperties: - type: string - description: Annotations that are set on projected resource. - type: object - labels: - additionalProperties: - type: string - description: Labels that are set on projected resource. - type: object - type: object - externalSecretName: - description: |- - ExternalSecretName is the name to use when creating external secret within a control plane. - optional, if not set, SharedExternalSecret name will be used. - When set, it is immutable. - maxLength: 253 - minLength: 1 - type: string - x-kubernetes-validations: - - message: externalSecretName is immutable - rule: self == oldSelf - externalSecretSpec: - description: The spec for the ExternalSecrets to be created. - properties: - data: - description: Data defines the connection between the Kubernetes - Secret keys and the Provider data - items: - description: ExternalSecretData defines the connection between - the Kubernetes Secret key (spec.data.) and the Provider - data. - properties: - remoteRef: - description: |- - RemoteRef points to the remote secret and defines - which secret (version/property/..) to fetch. - properties: - conversionStrategy: - default: Default - description: Used to define a conversion Strategy - enum: - - Default - - Unicode - type: string - decodingStrategy: - default: None - description: Used to define a decoding Strategy - enum: - - Auto - - Base64 - - Base64URL - - None - type: string - key: - description: Key is the key used in the Provider, mandatory - type: string - metadataPolicy: - default: None - description: Policy for fetching tags/labels from provider - secrets, possible options are Fetch, None. Defaults - to None - enum: - - None - - Fetch - type: string - property: - description: Used to select a specific property of the - Provider value (if a map), if supported - type: string - version: - description: Used to select a specific version of the - Provider value, if supported - type: string - required: - - key - type: object - secretKey: - description: |- - SecretKey defines the key in which the controller stores - the value. This is the key in the Kind=Secret - type: string - sourceRef: - description: |- - SourceRef allows you to override the source - from which the value will pulled from. - maxProperties: 1 - properties: - generatorRef: - description: |- - GeneratorRef points to a generator custom resource. - - Deprecated: The generatorRef is not implemented in .data[]. - this will be removed with v1. - properties: - apiVersion: - default: generators.external-secrets.io/v1alpha1 - description: Specify the apiVersion of the generator - resource - type: string - kind: - description: Specify the Kind of the resource, e.g. - Password, ACRAccessToken etc. - type: string - name: - description: Specify the name of the generator resource - type: string - required: - - kind - - name - type: object - storeRef: - description: SecretStoreRef defines which SecretStore - to fetch the ExternalSecret data. - properties: - kind: - description: |- - Kind of the SecretStore resource (SecretStore or ClusterSecretStore) - Defaults to `SecretStore` - type: string - name: - description: Name of the SecretStore resource - type: string - required: - - name - type: object - type: object - required: - - remoteRef - - secretKey - type: object - type: array - dataFrom: - description: |- - DataFrom is used to fetch all properties from a specific Provider data - If multiple entries are specified, the Secret keys are merged in the specified order - items: - properties: - extract: - description: |- - Used to extract multiple key/value pairs from one secret - Note: Extract does not support sourceRef.Generator or sourceRef.GeneratorRef. - properties: - conversionStrategy: - default: Default - description: Used to define a conversion Strategy - enum: - - Default - - Unicode - type: string - decodingStrategy: - default: None - description: Used to define a decoding Strategy - enum: - - Auto - - Base64 - - Base64URL - - None - type: string - key: - description: Key is the key used in the Provider, mandatory - type: string - metadataPolicy: - default: None - description: Policy for fetching tags/labels from provider - secrets, possible options are Fetch, None. Defaults - to None - enum: - - None - - Fetch - type: string - property: - description: Used to select a specific property of the - Provider value (if a map), if supported - type: string - version: - description: Used to select a specific version of the - Provider value, if supported - type: string - required: - - key - type: object - find: - description: |- - Used to find secrets based on tags or regular expressions - Note: Find does not support sourceRef.Generator or sourceRef.GeneratorRef. - properties: - conversionStrategy: - default: Default - description: Used to define a conversion Strategy - enum: - - Default - - Unicode - type: string - decodingStrategy: - default: None - description: Used to define a decoding Strategy - enum: - - Auto - - Base64 - - Base64URL - - None - type: string - name: - description: Finds secrets based on the name. - properties: - regexp: - description: Finds secrets base - type: string - type: object - path: - description: A root path to start the find operations. - type: string - tags: - additionalProperties: - type: string - description: Find secrets based on tags. - type: object - type: object - rewrite: - description: |- - Used to rewrite secret Keys after getting them from the secret Provider - Multiple Rewrite operations can be provided. They are applied in a layered order (first to last) - items: - properties: - regexp: - description: |- - Used to rewrite with regular expressions. - The resulting key will be the output of a regexp.ReplaceAll operation. - properties: - source: - description: Used to define the regular expression - of a re.Compiler. - type: string - target: - description: Used to define the target pattern - of a ReplaceAll operation. - type: string - required: - - source - - target - type: object - transform: - description: |- - Used to apply string transformation on the secrets. - The resulting key will be the output of the template applied by the operation. - properties: - template: - description: |- - Used to define the template to apply on the secret name. - `.value ` will specify the secret name in the template. - type: string - required: - - template - type: object - type: object - type: array - sourceRef: - description: |- - SourceRef points to a store or generator - which contains secret values ready to use. - Use this in combination with Extract or Find pull values out of - a specific SecretStore. - When sourceRef points to a generator Extract or Find is not supported. - The generator returns a static map of values - maxProperties: 1 - properties: - generatorRef: - description: GeneratorRef points to a generator custom - resource. - properties: - apiVersion: - default: generators.external-secrets.io/v1alpha1 - description: Specify the apiVersion of the generator - resource - type: string - kind: - description: Specify the Kind of the resource, e.g. - Password, ACRAccessToken etc. - type: string - name: - description: Specify the name of the generator resource - type: string - required: - - kind - - name - type: object - storeRef: - description: SecretStoreRef defines which SecretStore - to fetch the ExternalSecret data. - properties: - kind: - description: |- - Kind of the SecretStore resource (SecretStore or ClusterSecretStore) - Defaults to `SecretStore` - type: string - name: - description: Name of the SecretStore resource - type: string - required: - - name - type: object - type: object - type: object - type: array - refreshInterval: - default: 1h - description: |- - RefreshInterval is the amount of time before the values are read again from the SecretStore provider - Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h" - May be set to zero to fetch and create it once. Defaults to 1h. - type: string - secretStoreRef: - description: SecretStoreRef defines which SecretStore to fetch - the ExternalSecret data. - properties: - kind: - description: |- - Kind of the SecretStore resource (SecretStore or ClusterSecretStore) - Defaults to `SecretStore` - type: string - name: - description: Name of the SecretStore resource - type: string - required: - - name - type: object - target: - default: - creationPolicy: Owner - deletionPolicy: Retain - description: |- - ExternalSecretTarget defines the Kubernetes Secret to be created - There can be only one target per ExternalSecret. - properties: - creationPolicy: - default: Owner - description: |- - CreationPolicy defines rules on how to create the resulting Secret - Defaults to 'Owner' - enum: - - Owner - - Orphan - - Merge - - None - type: string - deletionPolicy: - default: Retain - description: |- - DeletionPolicy defines rules on how to delete the resulting Secret - Defaults to 'Retain' - enum: - - Delete - - Merge - - Retain - type: string - immutable: - description: Immutable defines if the final secret will be - immutable - type: boolean - name: - description: |- - Name defines the name of the Secret resource to be managed - This field is immutable - Defaults to the .metadata.name of the ExternalSecret resource - type: string - template: - description: Template defines a blueprint for the created - Secret resource. - properties: - data: - additionalProperties: - type: string - type: object - engineVersion: - default: v2 - description: |- - EngineVersion specifies the template engine version - that should be used to compile/execute the - template specified in .data and .templateFrom[]. - enum: - - v1 - - v2 - type: string - mergePolicy: - default: Replace - enum: - - Replace - - Merge - type: string - metadata: - description: ExternalSecretTemplateMetadata defines metadata - fields for the Secret blueprint. - properties: - annotations: - additionalProperties: - type: string - type: object - labels: - additionalProperties: - type: string - type: object - type: object - templateFrom: - items: - properties: - configMap: - properties: - items: - items: - properties: - key: - type: string - templateAs: - default: Values - enum: - - Values - - KeysAndValues - type: string - required: - - key - type: object - type: array - name: - type: string - required: - - items - - name - type: object - literal: - type: string - secret: - properties: - items: - items: - properties: - key: - type: string - templateAs: - default: Values - enum: - - Values - - KeysAndValues - type: string - required: - - key - type: object - type: array - name: - type: string - required: - - items - - name - type: object - target: - default: Data - enum: - - Data - - Annotations - - Labels - type: string - type: object - type: array - type: - type: string - type: object - type: object - type: object - namespaceSelector: - description: |- - The projected secret can be consumed - only within namespaces matching the provided selector. - Either names or a labelSelector must be specified. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: either names or a labelSelector must be specified - rule: (has(self.labelSelectors) || has(self.names)) && (size(self.labelSelectors) - > 0 || size(self.names) > 0) - refreshTime: - description: Used to configure secret refresh interval in seconds. - type: string - required: - - controlPlaneSelector - - externalSecretSpec - - namespaceSelector - type: object - x-kubernetes-validations: - - message: externalSecretName is immutable - rule: has(self.externalSecretName) == has(oldSelf.externalSecretName) - status: - description: SharedExternalSecretStatus defines the observed state of - the ExternalSecret. - properties: - failed: - description: list of provisioning failures. - items: - description: |- - SharedExternalSecretProvisioningFailure describes a external secret provisioning - failure in a specific control plane. - properties: - conditions: - description: List of conditions. - items: - properties: - message: - type: string - status: - type: string - type: - type: string - required: - - status - - type - type: object - type: array - controlPlane: - description: ControlPlane name where the failure occurred. - type: string - required: - - controlPlane - type: object - type: array - x-kubernetes-list-map-keys: - - controlPlane - x-kubernetes-list-type: map - observedGeneration: - description: observed resource generation. - format: int64 - type: integer - provisioned: - description: List of successfully provisioned targets. - items: - description: SharedExternalSecretProvisioningSuccess defines external - secret provisioning success. - properties: - controlPlane: - description: ControlPlane name where the external secret got - successfully projected. - type: string - required: - - controlPlane - type: object - type: array - x-kubernetes-list-map-keys: - - controlPlane - x-kubernetes-list-type: map - type: object - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.12/spaces.upbound.io_sharedsecretstores.yaml b/static/crds/space/v1.12/spaces.upbound.io_sharedsecretstores.yaml deleted file mode 100644 index 499a2208f..000000000 --- a/static/crds/space/v1.12/spaces.upbound.io_sharedsecretstores.yaml +++ /dev/null @@ -1,2702 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: sharedsecretstores.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - externalsecrets - kind: SharedSecretStore - listKind: SharedSecretStoreList - plural: sharedsecretstores - shortNames: - - sss - singular: sharedsecretstore - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.annotations.sharedsecretstores\.internal\.spaces\.upbound\.io/provisioned-total - name: Provisioned - type: string - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: |- - SharedSecretStore represents a shared SecretStore projected as ClusterSecretStore - into matching ControlPlanes in the same namespace. Once projected into a ControlPlane, - it can be referenced from ExternalSecret instances, as part of `storeRef` fields. - The secret store configuration including referenced credential are not leaked into the - ControlPlanes and in that sense can be called secure as they are invisible to the - ControlPlane workloads. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: SharedSecretStoreSpec defines the desired state of SecretStore. - properties: - controlPlaneSelector: - description: |- - The store is projected only to control planes - matching the provided selector. Either names or a labelSelector must be specified. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: either names or a labelSelector must be specified - rule: (has(self.labelSelectors) || has(self.names)) && (size(self.labelSelectors) - > 0 || size(self.names) > 0) - namespaceSelector: - description: |- - The projected secret store can be consumed - only within namespaces matching the provided selector. - Either names or a labelSelector must be specified. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: either names or a labelSelector must be specified - rule: (has(self.labelSelectors) || has(self.names)) && (size(self.labelSelectors) - > 0 || size(self.names) > 0) - provider: - description: Used to configure the provider. Only one provider may - be set. - maxProperties: 1 - minProperties: 1 - properties: - akeyless: - description: Akeyless configures this store to sync secrets using - Akeyless Vault provider - properties: - akeylessGWApiURL: - description: Akeyless GW API Url from which the secrets to - be fetched from. - type: string - authSecretRef: - description: Auth configures how the operator authenticates - with Akeyless. - properties: - kubernetesAuth: - description: |- - Kubernetes authenticates with Akeyless by passing the ServiceAccount - token stored in the named Secret resource. - properties: - accessID: - description: the Akeyless Kubernetes auth-method access-id - type: string - k8sConfName: - description: Kubernetes-auth configuration name in - Akeyless-Gateway - type: string - secretRef: - description: |- - Optional secret field containing a Kubernetes ServiceAccount JWT used - for authenticating with Akeyless. If a name is specified without a key, - `token` is the default. If one is not specified, the one bound to - the controller will be used. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - serviceAccountRef: - description: |- - Optional service account field containing the name of a kubernetes ServiceAccount. - If the service account is specified, the service account secret token JWT will be used - for authenticating with Akeyless. If the service account selector is not supplied, - the secretRef will be used instead. - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount resource - being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - required: - - accessID - - k8sConfName - type: object - secretRef: - description: |- - Reference to a Secret that contains the details - to authenticate with Akeyless. - properties: - accessID: - description: The SecretAccessID is used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - accessType: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - accessTypeParam: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - type: object - caBundle: - description: |- - PEM/base64 encoded CA bundle used to validate Akeyless Gateway certificate. Only used - if the AkeylessGWApiURL URL is using HTTPS protocol. If not set the system root certificates - are used to validate the TLS connection. - format: byte - type: string - caProvider: - description: The provider for the CA bundle to use to validate - Akeyless Gateway certificate. - properties: - key: - description: The key where the CA certificate can be found - in the Secret or ConfigMap. - type: string - name: - description: The name of the object located at the provider - type. - type: string - namespace: - description: |- - The namespace the Provider type is in. - Can only be defined when used in a ClusterSecretStore. - type: string - type: - description: The type of provider to use such as "Secret", - or "ConfigMap". - enum: - - Secret - - ConfigMap - type: string - required: - - name - - type - type: object - required: - - akeylessGWApiURL - - authSecretRef - type: object - alibaba: - description: Alibaba configures this store to sync secrets using - Alibaba Cloud provider - properties: - auth: - description: AlibabaAuth contains a secretRef for credentials. - properties: - rrsa: - description: Authenticate against Alibaba using RRSA. - properties: - oidcProviderArn: - type: string - oidcTokenFilePath: - type: string - roleArn: - type: string - sessionName: - type: string - required: - - oidcProviderArn - - oidcTokenFilePath - - roleArn - - sessionName - type: object - secretRef: - description: AlibabaAuthSecretRef holds secret references - for Alibaba credentials. - properties: - accessKeyIDSecretRef: - description: The AccessKeyID is used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - accessKeySecretSecretRef: - description: The AccessKeySecret is used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - required: - - accessKeyIDSecretRef - - accessKeySecretSecretRef - type: object - type: object - regionID: - description: Alibaba Region to be used for the provider - type: string - required: - - auth - - regionID - type: object - aws: - description: AWS configures this store to sync secrets using AWS - Secret Manager provider - properties: - additionalRoles: - description: AdditionalRoles is a chained list of Role ARNs - which the provider will sequentially assume before assuming - the Role - items: - type: string - type: array - auth: - description: |- - Auth defines the information necessary to authenticate against AWS - if not set aws sdk will infer credentials from your environment - see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials - properties: - jwt: - description: Authenticate against AWS using service account - tokens. - properties: - serviceAccountRef: - description: A reference to a ServiceAccount resource. - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount resource - being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - type: object - secretRef: - description: |- - AWSAuthSecretRef holds secret references for AWS credentials - both AccessKeyID and SecretAccessKey must be defined in order to properly authenticate. - properties: - accessKeyIDSecretRef: - description: The AccessKeyID is used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - secretAccessKeySecretRef: - description: The SecretAccessKey is used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - sessionTokenSecretRef: - description: |- - The SessionToken used for authentication - This must be defined if AccessKeyID and SecretAccessKey are temporary credentials - see: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - type: object - externalID: - description: AWS External ID set on assumed IAM roles - type: string - region: - description: AWS Region to be used for the provider - type: string - role: - description: Role is a Role ARN which the provider will assume - type: string - secretsManager: - description: SecretsManager defines how the provider behaves - when interacting with AWS SecretsManager - properties: - forceDeleteWithoutRecovery: - description: |- - Specifies whether to delete the secret without any recovery window. You - can't use both this parameter and RecoveryWindowInDays in the same call. - If you don't use either, then by default Secrets Manager uses a 30 day - recovery window. - see: https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_DeleteSecret.html#SecretsManager-DeleteSecret-request-ForceDeleteWithoutRecovery - type: boolean - recoveryWindowInDays: - description: |- - The number of days from 7 to 30 that Secrets Manager waits before - permanently deleting the secret. You can't use both this parameter and - ForceDeleteWithoutRecovery in the same call. If you don't use either, - then by default Secrets Manager uses a 30 day recovery window. - see: https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_DeleteSecret.html#SecretsManager-DeleteSecret-request-RecoveryWindowInDays - format: int64 - type: integer - type: object - service: - description: Service defines which service should be used - to fetch the secrets - enum: - - SecretsManager - - ParameterStore - type: string - sessionTags: - description: AWS STS assume role session tags - items: - properties: - key: - type: string - value: - type: string - required: - - key - - value - type: object - type: array - transitiveTagKeys: - description: AWS STS assume role transitive session tags. - Required when multiple rules are used with the provider - items: - type: string - type: array - required: - - region - - service - type: object - azurekv: - description: AzureKV configures this store to sync secrets using - Azure Key Vault provider - properties: - authSecretRef: - description: Auth configures how the operator authenticates - with Azure. Required for ServicePrincipal auth type. - properties: - clientId: - description: The Azure clientId of the service principle - used for authentication. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - clientSecret: - description: The Azure ClientSecret of the service principle - used for authentication. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - authType: - default: ServicePrincipal - description: |- - Auth type defines how to authenticate to the keyvault service. - Valid values are: - - "ServicePrincipal" (default): Using a service principal (tenantId, clientId, clientSecret) - - "ManagedIdentity": Using Managed Identity assigned to the pod (see aad-pod-identity) - enum: - - ServicePrincipal - - ManagedIdentity - - WorkloadIdentity - type: string - environmentType: - default: PublicCloud - description: |- - EnvironmentType specifies the Azure cloud environment endpoints to use for - connecting and authenticating with Azure. By default it points to the public cloud AAD endpoint. - The following endpoints are available, also see here: https://github.com/Azure/go-autorest/blob/main/autorest/azure/environments.go#L152 - PublicCloud, USGovernmentCloud, ChinaCloud, GermanCloud - enum: - - PublicCloud - - USGovernmentCloud - - ChinaCloud - - GermanCloud - type: string - identityId: - description: If multiple Managed Identity is assigned to the - pod, you can select the one to be used - type: string - serviceAccountRef: - description: |- - ServiceAccountRef specified the service account - that should be used when authenticating with WorkloadIdentity. - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - tenantId: - description: TenantID configures the Azure Tenant to send - requests to. Required for ServicePrincipal auth type. - type: string - vaultUrl: - description: Vault Url from which the secrets to be fetched - from. - type: string - required: - - vaultUrl - type: object - conjur: - description: Conjur configures this store to sync secrets using - conjur provider - properties: - auth: - properties: - apikey: - properties: - account: - type: string - apiKeyRef: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - userRef: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - required: - - account - - apiKeyRef - - userRef - type: object - jwt: - properties: - account: - type: string - secretRef: - description: |- - Optional SecretRef that refers to a key in a Secret resource containing JWT token to - authenticate with Conjur using the JWT authentication method. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - serviceAccountRef: - description: |- - Optional ServiceAccountRef specifies the Kubernetes service account for which to request - a token for with the `TokenRequest` API. - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount resource - being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - serviceID: - description: The conjur authn jwt webservice id - type: string - required: - - account - - serviceID - type: object - type: object - caBundle: - type: string - caProvider: - description: |- - Used to provide custom certificate authority (CA) certificates - for a secret store. The CAProvider points to a Secret or ConfigMap resource - that contains a PEM-encoded certificate. - properties: - key: - description: The key where the CA certificate can be found - in the Secret or ConfigMap. - type: string - name: - description: The name of the object located at the provider - type. - type: string - namespace: - description: |- - The namespace the Provider type is in. - Can only be defined when used in a ClusterSecretStore. - type: string - type: - description: The type of provider to use such as "Secret", - or "ConfigMap". - enum: - - Secret - - ConfigMap - type: string - required: - - name - - type - type: object - url: - type: string - required: - - auth - - url - type: object - delinea: - description: |- - Delinea DevOps Secrets Vault - https://docs.delinea.com/online-help/products/devops-secrets-vault/current - properties: - clientId: - description: ClientID is the non-secret part of the credential. - properties: - secretRef: - description: SecretRef references a key in a secret that - will be used as value. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - value: - description: Value can be specified directly to set a - value without using a secret. - type: string - type: object - clientSecret: - description: ClientSecret is the secret part of the credential. - properties: - secretRef: - description: SecretRef references a key in a secret that - will be used as value. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - value: - description: Value can be specified directly to set a - value without using a secret. - type: string - type: object - tenant: - description: Tenant is the chosen hostname / site name. - type: string - tld: - description: |- - TLD is based on the server location that was chosen during provisioning. - If unset, defaults to "com". - type: string - urlTemplate: - description: |- - URLTemplate - If unset, defaults to "https://%s.secretsvaultcloud.%s/v1/%s%s". - type: string - required: - - clientId - - clientSecret - - tenant - type: object - doppler: - description: Doppler configures this store to sync secrets using - the Doppler provider - properties: - auth: - description: Auth configures how the Operator authenticates - with the Doppler API - properties: - secretRef: - properties: - dopplerToken: - description: |- - The DopplerToken is used for authentication. - See https://docs.doppler.com/reference/api#authentication for auth token types. - The Key attribute defaults to dopplerToken if not specified. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - required: - - dopplerToken - type: object - required: - - secretRef - type: object - config: - description: Doppler config (required if not using a Service - Token) - type: string - format: - description: Format enables the downloading of secrets as - a file (string) - enum: - - json - - dotnet-json - - env - - yaml - - docker - type: string - nameTransformer: - description: Environment variable compatible name transforms - that change secret names to a different format - enum: - - upper-camel - - camel - - lower-snake - - tf-var - - dotnet-env - - lower-kebab - type: string - project: - description: Doppler project (required if not using a Service - Token) - type: string - required: - - auth - type: object - fake: - description: Fake configures a store with static key/value pairs - properties: - data: - items: - properties: - key: - type: string - value: - type: string - valueMap: - additionalProperties: - type: string - description: 'Deprecated: ValueMap is deprecated and - is intended to be removed in the future, use the `value` - field instead.' - type: object - version: - type: string - required: - - key - type: object - type: array - required: - - data - type: object - gcpsm: - description: GCPSM configures this store to sync secrets using - Google Cloud Platform Secret Manager provider - properties: - auth: - description: Auth defines the information necessary to authenticate - against GCP - properties: - secretRef: - properties: - secretAccessKeySecretRef: - description: The SecretAccessKey is used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - workloadIdentity: - properties: - clusterLocation: - type: string - clusterName: - type: string - clusterProjectID: - type: string - serviceAccountRef: - description: A reference to a ServiceAccount resource. - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount resource - being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - required: - - clusterLocation - - clusterName - - serviceAccountRef - type: object - type: object - projectID: - description: ProjectID project where secret is located - type: string - type: object - gitlab: - description: GitLab configures this store to sync secrets using - GitLab Variables provider - properties: - auth: - description: Auth configures how secret-manager authenticates - with a GitLab instance. - properties: - SecretRef: - properties: - accessToken: - description: AccessToken is used for authentication. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - required: - - SecretRef - type: object - environment: - description: Environment environment_scope of gitlab CI/CD - variables (Please see https://docs.gitlab.com/ee/ci/environments/#create-a-static-environment - on how to create environments) - type: string - groupIDs: - description: GroupIDs specify, which gitlab groups to pull - secrets from. Group secrets are read from left to right - followed by the project variables. - items: - type: string - type: array - inheritFromGroups: - description: InheritFromGroups specifies whether parent groups - should be discovered and checked for secrets. - type: boolean - projectID: - description: ProjectID specifies a project where secrets are - located. - type: string - url: - description: URL configures the GitLab instance URL. Defaults - to https://gitlab.com/. - type: string - required: - - auth - type: object - ibm: - description: IBM configures this store to sync secrets using IBM - Cloud provider - properties: - auth: - description: Auth configures how secret-manager authenticates - with the IBM secrets manager. - maxProperties: 1 - minProperties: 1 - properties: - containerAuth: - description: IBM Container-based auth with IAM Trusted - Profile. - properties: - iamEndpoint: - type: string - profile: - description: the IBM Trusted Profile - type: string - tokenLocation: - description: Location the token is mounted on the - pod - type: string - required: - - profile - type: object - secretRef: - properties: - secretApiKeySecretRef: - description: The SecretAccessKey is used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - type: object - serviceUrl: - description: ServiceURL is the Endpoint URL that is specific - to the Secrets Manager service instance - type: string - required: - - auth - type: object - keepersecurity: - description: KeeperSecurity configures this store to sync secrets - using the KeeperSecurity provider - properties: - authRef: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being referred - to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - folderID: - type: string - required: - - authRef - - folderID - type: object - kubernetes: - description: Kubernetes configures this store to sync secrets - using a Kubernetes cluster provider - properties: - auth: - description: Auth configures how secret-manager authenticates - with a Kubernetes instance. - maxProperties: 1 - minProperties: 1 - properties: - cert: - description: has both clientCert and clientKey as secretKeySelector - properties: - clientCert: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - clientKey: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - serviceAccount: - description: points to a service account that should be - used for authentication - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount resource - being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - token: - description: use static token to authenticate with - properties: - bearerToken: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - type: object - remoteNamespace: - default: default - description: Remote namespace to fetch the secrets from - type: string - server: - description: configures the Kubernetes server Address. - properties: - caBundle: - description: CABundle is a base64-encoded CA certificate - format: byte - type: string - caProvider: - description: 'see: https://external-secrets.io/v0.4.1/spec/#external-secrets.io/v1alpha1.CAProvider' - properties: - key: - description: The key where the CA certificate can - be found in the Secret or ConfigMap. - type: string - name: - description: The name of the object located at the - provider type. - type: string - namespace: - description: |- - The namespace the Provider type is in. - Can only be defined when used in a ClusterSecretStore. - type: string - type: - description: The type of provider to use such as "Secret", - or "ConfigMap". - enum: - - Secret - - ConfigMap - type: string - required: - - name - - type - type: object - url: - default: kubernetes.default - description: configures the Kubernetes server Address. - type: string - type: object - required: - - auth - type: object - onepassword: - description: OnePassword configures this store to sync secrets - using the 1Password Cloud provider - properties: - auth: - description: Auth defines the information necessary to authenticate - against OnePassword Connect Server - properties: - secretRef: - description: OnePasswordAuthSecretRef holds secret references - for 1Password credentials. - properties: - connectTokenSecretRef: - description: The ConnectToken is used for authentication - to a 1Password Connect Server. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - required: - - connectTokenSecretRef - type: object - required: - - secretRef - type: object - connectHost: - description: ConnectHost defines the OnePassword Connect Server - to connect to - type: string - vaults: - additionalProperties: - type: integer - description: Vaults defines which OnePassword vaults to search - in which order - type: object - required: - - auth - - connectHost - - vaults - type: object - oracle: - description: Oracle configures this store to sync secrets using - Oracle Vault provider - properties: - auth: - description: |- - Auth configures how secret-manager authenticates with the Oracle Vault. - If empty, use the instance principal, otherwise the user credentials specified in Auth. - properties: - secretRef: - description: SecretRef to pass through sensitive information. - properties: - fingerprint: - description: Fingerprint is the fingerprint of the - API private key. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - privatekey: - description: PrivateKey is the user's API Signing - Key in PEM format, used for authentication. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - required: - - fingerprint - - privatekey - type: object - tenancy: - description: Tenancy is the tenancy OCID where user is - located. - type: string - user: - description: User is an access OCID specific to the account. - type: string - required: - - secretRef - - tenancy - - user - type: object - compartment: - description: |- - Compartment is the vault compartment OCID. - Required for PushSecret - type: string - encryptionKey: - description: |- - EncryptionKey is the OCID of the encryption key within the vault. - Required for PushSecret - type: string - principalType: - description: |- - The type of principal to use for authentication. If left blank, the Auth struct will - determine the principal type. This optional field must be specified if using - workload identity. - enum: - - "" - - UserPrincipal - - InstancePrincipal - - Workload - type: string - region: - description: Region is the region where vault is located. - type: string - serviceAccountRef: - description: |- - ServiceAccountRef specified the service account - that should be used when authenticating with WorkloadIdentity. - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - vault: - description: Vault is the vault's OCID of the specific vault - where secret is located. - type: string - required: - - region - - vault - type: object - scaleway: - description: Scaleway - properties: - accessKey: - description: AccessKey is the non-secret part of the api key. - properties: - secretRef: - description: SecretRef references a key in a secret that - will be used as value. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - value: - description: Value can be specified directly to set a - value without using a secret. - type: string - type: object - apiUrl: - description: APIURL is the url of the api to use. Defaults - to https://api.scaleway.com - type: string - projectId: - description: 'ProjectID is the id of your project, which you - can find in the console: https://console.scaleway.com/project/settings' - type: string - region: - description: 'Region where your secrets are located: https://developers.scaleway.com/en/quickstart/#region-and-zone' - type: string - secretKey: - description: SecretKey is the non-secret part of the api key. - properties: - secretRef: - description: SecretRef references a key in a secret that - will be used as value. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - value: - description: Value can be specified directly to set a - value without using a secret. - type: string - type: object - required: - - accessKey - - projectId - - region - - secretKey - type: object - senhasegura: - description: Senhasegura configures this store to sync secrets - using senhasegura provider - properties: - auth: - description: Auth defines parameters to authenticate in senhasegura - properties: - clientId: - type: string - clientSecretSecretRef: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - required: - - clientId - - clientSecretSecretRef - type: object - ignoreSslCertificate: - default: false - description: IgnoreSslCertificate defines if SSL certificate - must be ignored - type: boolean - module: - description: Module defines which senhasegura module should - be used to get secrets - type: string - url: - description: URL of senhasegura - type: string - required: - - auth - - module - - url - type: object - upboundspaces: - description: UpboundProvider configures a store to sync secrets - with Upbound Spaces. - properties: - storeRef: - description: StoreRef holds ref to Upbound Spaces secret store - properties: - name: - description: Name of the secret store on Upbound Spaces - type: string - required: - - name - type: object - required: - - storeRef - type: object - vault: - description: Vault configures this store to sync secrets using - Hashi provider - properties: - auth: - description: Auth configures how secret-manager authenticates - with the Vault server. - properties: - appRole: - description: |- - AppRole authenticates with Vault using the App Role auth mechanism, - with the role and secret stored in a Kubernetes Secret resource. - properties: - path: - default: approle - description: |- - Path where the App Role authentication backend is mounted - in Vault, e.g: "approle" - type: string - roleId: - description: |- - RoleID configured in the App Role authentication backend when setting - up the authentication backend in Vault. - type: string - roleRef: - description: |- - Reference to a key in a Secret that contains the App Role ID used - to authenticate with Vault. - The `key` field must be specified and denotes which entry within the Secret - resource is used as the app role id. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - secretRef: - description: |- - Reference to a key in a Secret that contains the App Role secret used - to authenticate with Vault. - The `key` field must be specified and denotes which entry within the Secret - resource is used as the app role secret. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - required: - - path - - secretRef - type: object - cert: - description: |- - Cert authenticates with TLS Certificates by passing client certificate, private key and ca certificate - Cert authentication method - properties: - clientCert: - description: |- - ClientCert is a certificate to authenticate using the Cert Vault - authentication method - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - secretRef: - description: |- - SecretRef to a key in a Secret resource containing client private key to - authenticate with Vault using the Cert authentication method - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - iam: - description: |- - Iam authenticates with vault by passing a special AWS request signed with AWS IAM credentials - AWS IAM authentication method - properties: - externalID: - description: AWS External ID set on assumed IAM roles - type: string - jwt: - description: Specify a service account with IRSA enabled - properties: - serviceAccountRef: - description: A reference to a ServiceAccount resource. - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount - resource being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - type: object - path: - description: 'Path where the AWS auth method is enabled - in Vault, e.g: "aws"' - type: string - region: - description: AWS region - type: string - role: - description: This is the AWS role to be assumed before - talking to vault - type: string - secretRef: - description: Specify credentials in a Secret object - properties: - accessKeyIDSecretRef: - description: The AccessKeyID is used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource - being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - secretAccessKeySecretRef: - description: The SecretAccessKey is used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource - being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - sessionTokenSecretRef: - description: |- - The SessionToken used for authentication - This must be defined if AccessKeyID and SecretAccessKey are temporary credentials - see: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource - being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - vaultAwsIamServerID: - description: 'X-Vault-AWS-IAM-Server-ID is an additional - header used by Vault IAM auth method to mitigate - against different types of replay attacks. More - details here: https://developer.hashicorp.com/vault/docs/auth/aws' - type: string - vaultRole: - description: Vault Role. In vault, a role describes - an identity with a set of permissions, groups, or - policies you want to attach a user of the secrets - engine - type: string - required: - - vaultRole - type: object - jwt: - description: |- - Jwt authenticates with Vault by passing role and JWT token using the - JWT/OIDC authentication method - properties: - kubernetesServiceAccountToken: - description: |- - Optional ServiceAccountToken specifies the Kubernetes service account for which to request - a token for with the `TokenRequest` API. - properties: - audiences: - description: |- - Optional audiences field that will be used to request a temporary Kubernetes service - account token for the service account referenced by `serviceAccountRef`. - Defaults to a single audience `vault` it not specified. - Deprecated: use serviceAccountRef.Audiences instead - items: - type: string - type: array - expirationSeconds: - description: |- - Optional expiration time in seconds that will be used to request a temporary - Kubernetes service account token for the service account referenced by - `serviceAccountRef`. - Deprecated: this will be removed in the future. - Defaults to 10 minutes. - format: int64 - type: integer - serviceAccountRef: - description: Service account field containing - the name of a kubernetes ServiceAccount. - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount - resource being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - required: - - serviceAccountRef - type: object - path: - default: jwt - description: |- - Path where the JWT authentication backend is mounted - in Vault, e.g: "jwt" - type: string - role: - description: |- - Role is a JWT role to authenticate using the JWT/OIDC Vault - authentication method - type: string - secretRef: - description: |- - Optional SecretRef that refers to a key in a Secret resource containing JWT token to - authenticate with Vault using the JWT/OIDC authentication method. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - required: - - path - type: object - kubernetes: - description: |- - Kubernetes authenticates with Vault by passing the ServiceAccount - token stored in the named Secret resource to the Vault server. - properties: - mountPath: - default: kubernetes - description: |- - Path where the Kubernetes authentication backend is mounted in Vault, e.g: - "kubernetes" - type: string - role: - description: |- - A required field containing the Vault Role to assume. A Role binds a - Kubernetes ServiceAccount with a set of Vault policies. - type: string - secretRef: - description: |- - Optional secret field containing a Kubernetes ServiceAccount JWT used - for authenticating with Vault. If a name is specified without a key, - `token` is the default. If one is not specified, the one bound to - the controller will be used. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - serviceAccountRef: - description: |- - Optional service account field containing the name of a kubernetes ServiceAccount. - If the service account is specified, the service account secret token JWT will be used - for authenticating with Vault. If the service account selector is not supplied, - the secretRef will be used instead. - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount resource - being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - required: - - mountPath - - role - type: object - ldap: - description: |- - Ldap authenticates with Vault by passing username/password pair using - the LDAP authentication method - properties: - path: - default: ldap - description: |- - Path where the LDAP authentication backend is mounted - in Vault, e.g: "ldap" - type: string - secretRef: - description: |- - SecretRef to a key in a Secret resource containing password for the LDAP - user used to authenticate with Vault using the LDAP authentication - method - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - username: - description: |- - Username is a LDAP user name used to authenticate using the LDAP Vault - authentication method - type: string - required: - - path - - username - type: object - tokenSecretRef: - description: TokenSecretRef authenticates with Vault by - presenting a token. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - userPass: - description: UserPass authenticates with Vault by passing - username/password pair - properties: - path: - default: user - description: |- - Path where the UserPassword authentication backend is mounted - in Vault, e.g: "user" - type: string - secretRef: - description: |- - SecretRef to a key in a Secret resource containing password for the - user used to authenticate with Vault using the UserPass authentication - method - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - username: - description: |- - Username is a user name used to authenticate using the UserPass Vault - authentication method - type: string - required: - - path - - username - type: object - type: object - caBundle: - description: |- - PEM encoded CA bundle used to validate Vault server certificate. Only used - if the Server URL is using HTTPS protocol. This parameter is ignored for - plain HTTP protocol connection. If not set the system root certificates - are used to validate the TLS connection. - format: byte - type: string - caProvider: - description: The provider for the CA bundle to use to validate - Vault server certificate. - properties: - key: - description: The key where the CA certificate can be found - in the Secret or ConfigMap. - type: string - name: - description: The name of the object located at the provider - type. - type: string - namespace: - description: |- - The namespace the Provider type is in. - Can only be defined when used in a ClusterSecretStore. - type: string - type: - description: The type of provider to use such as "Secret", - or "ConfigMap". - enum: - - Secret - - ConfigMap - type: string - required: - - name - - type - type: object - forwardInconsistent: - description: |- - ForwardInconsistent tells Vault to forward read-after-write requests to the Vault - leader instead of simply retrying within a loop. This can increase performance if - the option is enabled serverside. - https://www.vaultproject.io/docs/configuration/replication#allow_forwarding_via_header - type: boolean - namespace: - description: |- - Name of the vault namespace. Namespaces is a set of features within Vault Enterprise that allows - Vault environments to support Secure Multi-tenancy. e.g: "ns1". - More about namespaces can be found here https://www.vaultproject.io/docs/enterprise/namespaces - type: string - path: - description: |- - Path is the mount path of the Vault KV backend endpoint, e.g: - "secret". The v2 KV secret engine version specific "/data" path suffix - for fetching secrets from Vault is optional and will be appended - if not present in specified path. - type: string - readYourWrites: - description: |- - ReadYourWrites ensures isolated read-after-write semantics by - providing discovered cluster replication states in each request. - More information about eventual consistency in Vault can be found here - https://www.vaultproject.io/docs/enterprise/consistency - type: boolean - server: - description: 'Server is the connection address for the Vault - server, e.g: "https://vault.example.com:8200".' - type: string - version: - default: v2 - description: |- - Version is the Vault KV secret engine version. This can be either "v1" or - "v2". Version defaults to "v2". - enum: - - v1 - - v2 - type: string - required: - - auth - - server - type: object - webhook: - description: Webhook configures this store to sync secrets using - a generic templated webhook - properties: - body: - description: Body - type: string - caBundle: - description: |- - PEM encoded CA bundle used to validate webhook server certificate. Only used - if the Server URL is using HTTPS protocol. This parameter is ignored for - plain HTTP protocol connection. If not set the system root certificates - are used to validate the TLS connection. - format: byte - type: string - caProvider: - description: The provider for the CA bundle to use to validate - webhook server certificate. - properties: - key: - description: The key the value inside of the provider - type to use, only used with "Secret" type - type: string - name: - description: The name of the object located at the provider - type. - type: string - namespace: - description: The namespace the Provider type is in. - type: string - type: - description: The type of provider to use such as "Secret", - or "ConfigMap". - enum: - - Secret - - ConfigMap - type: string - required: - - name - - type - type: object - headers: - additionalProperties: - type: string - description: Headers - type: object - method: - description: Webhook Method - type: string - result: - description: Result formatting - properties: - jsonPath: - description: Json path of return value - type: string - type: object - secrets: - description: |- - Secrets to fill in templates - These secrets will be passed to the templating function as key value pairs under the given name - items: - properties: - name: - description: Name of this secret in templates - type: string - secretRef: - description: Secret ref to fill in credentials - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - required: - - name - - secretRef - type: object - type: array - timeout: - description: Timeout - type: string - url: - description: Webhook url to call - type: string - required: - - result - - url - type: object - yandexcertificatemanager: - description: YandexCertificateManager configures this store to - sync secrets using Yandex Certificate Manager provider - properties: - apiEndpoint: - description: Yandex.Cloud API endpoint (e.g. 'api.cloud.yandex.net:443') - type: string - auth: - description: Auth defines the information necessary to authenticate - against Yandex Certificate Manager - properties: - authorizedKeySecretRef: - description: The authorized key used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - caProvider: - description: The provider for the CA bundle to use to validate - Yandex.Cloud server certificate. - properties: - certSecretRef: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - required: - - auth - type: object - yandexlockbox: - description: YandexLockbox configures this store to sync secrets - using Yandex Lockbox provider - properties: - apiEndpoint: - description: Yandex.Cloud API endpoint (e.g. 'api.cloud.yandex.net:443') - type: string - auth: - description: Auth defines the information necessary to authenticate - against Yandex Lockbox - properties: - authorizedKeySecretRef: - description: The authorized key used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - caProvider: - description: The provider for the CA bundle to use to validate - Yandex.Cloud server certificate. - properties: - certSecretRef: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - required: - - auth - type: object - type: object - refreshInterval: - description: Used to configure store refresh interval in seconds. - type: integer - retrySettings: - description: Used to configure http retries if failed. - properties: - maxRetries: - format: int32 - type: integer - retryInterval: - type: string - type: object - secretStoreMetadata: - description: The metadata of the secret store to be created. - properties: - annotations: - additionalProperties: - type: string - description: Annotations that are set on projected resource. - type: object - labels: - additionalProperties: - type: string - description: Labels that are set on projected resource. - type: object - type: object - secretStoreName: - description: |- - SecretStoreName is the name to use when creating secret stores within a control plane. - optional, if not set, SharedSecretStore name will be used. - When set, it is immutable. - maxLength: 253 - minLength: 1 - type: string - x-kubernetes-validations: - - message: value is immutable - rule: self == oldSelf - required: - - controlPlaneSelector - - namespaceSelector - - provider - type: object - x-kubernetes-validations: - - message: secretStoreName is immutable - rule: has(self.secretStoreName) == has(oldSelf.secretStoreName) - status: - description: SharedSecretStoreStatus defines the observed state of the - SecretStore. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - failed: - description: List of provisioning failures. - items: - description: SecretStoreProvisioningFailure defines secret store - provisioning failure. - properties: - conditions: - description: List of occurred conditions. - items: - properties: - lastTransitionTime: - format: date-time - type: string - message: - type: string - reason: - type: string - status: - type: string - type: - type: string - required: - - status - - type - type: object - type: array - controlPlane: - description: ControlPlane name where the failure occurred. - type: string - required: - - controlPlane - type: object - type: array - x-kubernetes-list-map-keys: - - controlPlane - x-kubernetes-list-type: map - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - provisioned: - description: List of successfully provisioned targets. - items: - description: SecretStoreProvisioningSuccess defines secret store - provision success. - properties: - controlPlane: - description: ControlPlane name where the secret store got projected - type: string - required: - - controlPlane - type: object - type: array - x-kubernetes-list-map-keys: - - controlPlane - x-kubernetes-list-type: map - type: object - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.12/spaces.upbound.io_simulations.yaml b/static/crds/space/v1.12/spaces.upbound.io_simulations.yaml deleted file mode 100644 index 856d1a82b..000000000 --- a/static/crds/space/v1.12/spaces.upbound.io_simulations.yaml +++ /dev/null @@ -1,243 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: simulations.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - spaces - kind: Simulation - listKind: SimulationList - plural: simulations - singular: simulation - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .spec.controlPlaneName - name: SOURCE - type: string - - jsonPath: .status.simulatedControlPlaneName - name: SIMULATED - type: string - - jsonPath: .status.conditions[?(@.type=='AcceptingChanges')].status - name: ACCEPTING-CHANGES - type: string - - jsonPath: .status.conditions[?(@.type=='AcceptingChanges')].reason - name: STATE - type: string - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: |- - A Simulation creates a simulation of a source ControlPlane. You can apply a - change set to the simulated control plane. When the Simulation is complete it - will detect the changes and report the difference compared to the source - control plane. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: SimulationSpec specifies how to run the simulation. - properties: - completionCriteria: - description: |- - CompletionCriteria specify how Spaces should determine when the - simulation is complete. If any of the criteria are met, Spaces will set - the Simulation's desired state to complete. Omit the criteria if you want - to manually mark the Simulation complete. - items: - description: A CompletionCriterion specifies when a simulation is - complete. - properties: - duration: - description: Duration after which the simulation is complete. - type: string - type: - description: Type of criterion. - enum: - - Duration - type: string - required: - - duration - - type - type: object - type: array - controlPlaneName: - description: |- - ControlPlaneName is the name of the ControlPlane to simulate a change to. - This control plane is known as the Simulation's 'source' control plane. - minLength: 1 - type: string - x-kubernetes-validations: - - message: The source controlplane can't be changed - rule: self == oldSelf - desiredState: - default: AcceptingChanges - description: DesiredState of the simulation. - enum: - - AcceptingChanges - - Complete - - Terminated - type: string - x-kubernetes-validations: - - message: A complete Simulation can only be terminated - rule: oldSelf != 'Complete' || self == 'Complete' || self == 'Terminated' - - message: A Simulation can't be un-terminated - rule: oldSelf != 'Terminated' || self == oldSelf - required: - - controlPlaneName - - desiredState - type: object - status: - description: SimulationStatus represents the observed state of a Simulation. - properties: - changes: - description: |- - Changes detected by the simulation. Only changes that happen while the - simulation is in the AcceptingChanges state are included. - items: - description: |- - A SimulationChange represents an object that changed while the simulation was - in the AcceptingChanges state. - properties: - change: - description: Change type. - enum: - - Unknown - - Create - - Update - - Delete - type: string - objectRef: - description: ObjectReference to the changed object. - properties: - apiVersion: - description: APIVersion of the changed resource. - type: string - kind: - description: Kind of the changed resource. - type: string - name: - description: Name of the changed resource. - type: string - namespace: - description: Namespace of the changed resource. - type: string - required: - - apiVersion - - kind - - name - type: object - required: - - change - - objectRef - type: object - type: array - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - controlPlaneData: - description: |- - ControlPlaneData exported from the source control plane and imported to - the simulated control plane. - properties: - exportTimestamp: - description: |- - ExportTimestamp is the time at which the source control plane's resources - were exported. Resources are exported to temporary storage before they're - imported to the simulated control plane. - format: date-time - type: string - importTimestamp: - description: |- - ImportTiemstamp is the time at which the source control plane's resources - were imported to the simulated control plane. - format: date-time - type: string - type: object - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - simulatedControlPlaneName: - description: |- - SimulatedControlPlaneName is the name of the control plane used to run - the simulation. - minLength: 1 - type: string - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.9/admin.spaces.upbound.io_spacebackupconfigs.yaml b/static/crds/space/v1.9/admin.spaces.upbound.io_spacebackupconfigs.yaml deleted file mode 100644 index 9feedd34f..000000000 --- a/static/crds/space/v1.9/admin.spaces.upbound.io_spacebackupconfigs.yaml +++ /dev/null @@ -1,177 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: spacebackupconfigs.admin.spaces.upbound.io -spec: - group: admin.spaces.upbound.io - names: - categories: - - spaces - kind: SpaceBackupConfig - listKind: SpaceBackupConfigList - plural: spacebackupconfigs - singular: spacebackupconfig - scope: Cluster - versions: - - additionalPrinterColumns: - - jsonPath: .spec.objectStorage.provider - name: Provider - type: string - - jsonPath: .spec.objectStorage.bucket - name: Bucket - type: string - - jsonPath: .spec.objectStorage.credentials.source - name: Auth - type: string - - jsonPath: .metadata.annotations.spacebackupconfig\.admin\.internal\.spaces\.upbound\.io/secret - name: Secret - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: SpaceBackupConfig defines the configuration to backup a Space. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: |- - A SpaceBackupConfigSpec represents the configuration to backup or restore - a Space. - properties: - objectStorage: - description: ObjectStorage specifies the object storage configuration - for the given provider. - properties: - bucket: - description: Bucket is the name of the bucket to store backups - in. - minLength: 1 - type: string - config: - description: |- - Config is a free-form map of configuration options for the object storage provider. - See https://github.com/thanos-io/objstore?tab=readme-ov-file for more - information on the formats for each supported cloud provider. Bucket and - Provider will override the required values in the config. - type: object - x-kubernetes-preserve-unknown-fields: true - credentials: - description: Credentials specifies the credentials to access the - object storage. - properties: - env: - description: |- - Env is a reference to an environment variable that contains credentials - that must be used to connect to the provider. - properties: - name: - description: Name is the name of an environment variable. - type: string - required: - - name - type: object - fs: - description: |- - Fs is a reference to a filesystem location that contains credentials that - must be used to connect to the provider. - properties: - path: - description: Path is a filesystem path. - type: string - required: - - path - type: object - secretRef: - description: |- - A SecretRef is a reference to a secret key that contains the credentials - that must be used to connect to the provider. - properties: - key: - default: credentials - description: The key to select. - type: string - name: - description: Name of the secret. - type: string - namespace: - description: Namespace of the secret. - type: string - required: - - key - - name - - namespace - type: object - source: - allOf: - - enum: - - Secret - - InjectedIdentity - - enum: - - Secret - - InjectedIdentity - description: |- - Source of the credentials. - Source "Secret" requires "get" permissions on the referenced Secret. - type: string - required: - - source - type: object - x-kubernetes-validations: - - message: secretRef.name and namespace must be set when source - is Secret - rule: self.source != 'Secret' || (has(self.secretRef) && has(self.secretRef.name) - && has(self.secretRef.__namespace__)) - prefix: - description: |- - Prefix is the prefix to use for all backups using this - SharedBackupConfig, e.g. "prod/cluster1", resulting in backups for - controlplane "ctp1" in namespace "ns1" being stored in - "prod/cluster1/ns1/ctp1". - type: string - provider: - description: Provider is the name of the object storage provider. - enum: - - AWS - - Azure - - GCP - type: string - required: - - bucket - - credentials - - provider - type: object - x-kubernetes-validations: - - message: credentials.secretRef.name must be set when source is Secret - rule: self.credentials.source != 'Secret' || (has(self.credentials.secretRef) - && has(self.credentials.secretRef.name)) - required: - - objectStorage - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.9/admin.spaces.upbound.io_spacebackups.yaml b/static/crds/space/v1.9/admin.spaces.upbound.io_spacebackups.yaml deleted file mode 100644 index 2e0824038..000000000 --- a/static/crds/space/v1.9/admin.spaces.upbound.io_spacebackups.yaml +++ /dev/null @@ -1,787 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: spacebackups.admin.spaces.upbound.io -spec: - group: admin.spaces.upbound.io - names: - categories: - - spaces - kind: SpaceBackup - listKind: SpaceBackupList - plural: spacebackups - singular: spacebackup - scope: Cluster - versions: - - additionalPrinterColumns: - - jsonPath: .status.phase - name: Phase - type: string - - jsonPath: .status.retries - name: Retries - type: integer - - jsonPath: .spec.ttl - name: TTL - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: SpaceBackup represents a backup of a Space. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: SpaceBackupSpec defines a backup over a set of Match - properties: - configRef: - description: |- - ConfigRef is a reference to the space backup configuration. - ApiGroup is optional and defaults to "spaces.upbound.io". - Kind is required, and the only supported value is "SpaceBackupConfig" at - the moment. - Name is required. - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. - type: string - kind: - description: Kind is the type of resource being referenced. - minLength: 1 - type: string - name: - description: Name is the name of resource being referenced. - minLength: 1 - type: string - type: object - x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: backup config ref must be a reference to a SpaceBackupConfig - rule: (!has(self.apiGroup) || self.apiGroup == 'admin.spaces.upbound.io') - && self.kind == 'SpaceBackupConfig' - - message: backup config ref must have a name - rule: size(self.name) > 0 - controlPlaneBackups: - description: ControlPlaneBackups is the definition of the control - plane backups, - properties: - excludedResources: - description: |- - ExcludedResources is a slice of resource names that are not - included in the backup. Used to filter the included extra resources. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - deletionPolicy: - default: Orphan - description: DeletionPolicy is the policy for the backup. - enum: - - Orphan - - Delete - type: string - exclude: - description: |- - Exclude is the selector for resources that should be excluded from the backup. - If both Match and Exclude are specified, the Exclude selector will be applied - after the Match selector. - By default, only SpaceBackups are excluded. - properties: - controlPlanes: - description: |- - ControlPlanes specifies the control planes selected. - A control plane is matched if any of the control plane selectors matches, if not specified - any control plane in the selected groups is matched. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - extras: - description: Extras specifies the extra resources selected. - items: - description: GenericSpaceBackupResourceSelector represents a - generic resource selector. - properties: - apiGroup: - description: APIGroup is the group of the resource. - type: string - kind: - description: Kind is the kind of the resource. - type: string - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - required: - - apiGroup - - kind - type: object - type: array - groups: - description: |- - Groups specifies the groups selected. - A group is matched if any of the group selectors matches, if not specified - any group is matched. Group selector is ANDed with all other selectors, so no resource in - a group not matching the group selector will be included in the backup. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - secrets: - description: Spaces specifies the spaces selected. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - type: object - match: - description: |- - Match is the selector for resources that should be included in the backup. - By default, we'll back up all Groups and for each Group: - - All ControlPlanes. - - All Secrets. - - All other Space API resources, e.g. SharedBackupConfigs, SharedUpboundPolicies, Backups, etc... - properties: - controlPlanes: - description: |- - ControlPlanes specifies the control planes selected. - A control plane is matched if any of the control plane selectors matches, if not specified - any control plane in the selected groups is matched. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - extras: - description: Extras specifies the extra resources selected. - items: - description: GenericSpaceBackupResourceSelector represents a - generic resource selector. - properties: - apiGroup: - description: APIGroup is the group of the resource. - type: string - kind: - description: Kind is the kind of the resource. - type: string - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - required: - - apiGroup - - kind - type: object - type: array - groups: - description: |- - Groups specifies the groups selected. - A group is matched if any of the group selectors matches, if not specified - any group is matched. Group selector is ANDed with all other selectors, so no resource in - a group not matching the group selector will be included in the backup. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - secrets: - description: Spaces specifies the spaces selected. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - type: object - ttl: - description: |- - TTL is the time to live for the backup. After this time, the backup - will be eligible for garbage collection. If not set, the backup will - not be garbage collected. - type: string - required: - - configRef - type: object - x-kubernetes-validations: - - message: spec.configRef can't be changed or set after creation - rule: '!has(self.configRef) && !has(oldSelf.configRef) || (self.configRef - == oldSelf.configRef) ' - - message: spec.match can't be changed or set after creation - rule: '!has(self.match) && !has(oldSelf.match) || (self.match == oldSelf.match) ' - - message: spec.exclude can't be changed or set after creation - rule: '!has(self.exclude) && !has(oldSelf.exclude) || (self.exclude - == oldSelf.exclude) ' - - message: spec.controlPlaneBackups can't be changed or set after creation - rule: '!has(self.controlPlaneBackups) && !has(oldSelf.controlPlaneBackups) - || (self.controlPlaneBackups == oldSelf.controlPlaneBackups) ' - status: - description: SpaceBackupStatus represents the observed state of a SpaceBackup. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - phase: - default: Pending - description: Phase is the current phase of the backup. - enum: - - Pending - - InProgress - - Failed - - Completed - - Deleted - type: string - retries: - description: Retries is the number of times the backup has been retried. - format: int32 - type: integer - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.9/admin.spaces.upbound.io_spacebackupschedules.yaml b/static/crds/space/v1.9/admin.spaces.upbound.io_spacebackupschedules.yaml deleted file mode 100644 index bf66b7856..000000000 --- a/static/crds/space/v1.9/admin.spaces.upbound.io_spacebackupschedules.yaml +++ /dev/null @@ -1,789 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: spacebackupschedules.admin.spaces.upbound.io -spec: - group: admin.spaces.upbound.io - names: - categories: - - spaces - kind: SpaceBackupSchedule - listKind: SpaceBackupScheduleList - plural: spacebackupschedules - singular: spacebackupschedule - scope: Cluster - versions: - - additionalPrinterColumns: - - jsonPath: .spec.schedule - name: Schedule - type: string - - jsonPath: .status.lastBackup - name: LastBackup - type: date - - jsonPath: .spec.ttl - name: TTL - type: string - - jsonPath: .spec.suspend - name: Suspended - type: boolean - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: SpaceBackupSchedule represents a schedule to backup a Space. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: SpaceBackupScheduleSpec defines a space backup schedule. - properties: - configRef: - description: |- - ConfigRef is a reference to the space backup configuration. - ApiGroup is optional and defaults to "spaces.upbound.io". - Kind is required, and the only supported value is "SpaceBackupConfig" at - the moment. - Name is required. - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. - type: string - kind: - description: Kind is the type of resource being referenced. - minLength: 1 - type: string - name: - description: Name is the name of resource being referenced. - minLength: 1 - type: string - type: object - x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: backup config ref must be a reference to a SpaceBackupConfig - rule: (!has(self.apiGroup) || self.apiGroup == 'admin.spaces.upbound.io') - && self.kind == 'SpaceBackupConfig' - - message: backup config ref must have a name - rule: size(self.name) > 0 - controlPlaneBackups: - description: ControlPlaneBackups is the definition of the control - plane backups, - properties: - excludedResources: - description: |- - ExcludedResources is a slice of resource names that are not - included in the backup. Used to filter the included extra resources. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - deletionPolicy: - default: Orphan - description: DeletionPolicy is the policy for the backup. - enum: - - Orphan - - Delete - type: string - exclude: - description: |- - Exclude is the selector for resources that should be excluded from the backup. - If both Match and Exclude are specified, the Exclude selector will be applied - after the Match selector. - By default, only SpaceBackups are excluded. - properties: - controlPlanes: - description: |- - ControlPlanes specifies the control planes selected. - A control plane is matched if any of the control plane selectors matches, if not specified - any control plane in the selected groups is matched. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - extras: - description: Extras specifies the extra resources selected. - items: - description: GenericSpaceBackupResourceSelector represents a - generic resource selector. - properties: - apiGroup: - description: APIGroup is the group of the resource. - type: string - kind: - description: Kind is the kind of the resource. - type: string - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - required: - - apiGroup - - kind - type: object - type: array - groups: - description: |- - Groups specifies the groups selected. - A group is matched if any of the group selectors matches, if not specified - any group is matched. Group selector is ANDed with all other selectors, so no resource in - a group not matching the group selector will be included in the backup. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - secrets: - description: Spaces specifies the spaces selected. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - type: object - match: - description: |- - Match is the selector for resources that should be included in the backup. - By default, we'll back up all Groups and for each Group: - - All ControlPlanes. - - All Secrets. - - All other Space API resources, e.g. SharedBackupConfigs, SharedUpboundPolicies, Backups, etc... - properties: - controlPlanes: - description: |- - ControlPlanes specifies the control planes selected. - A control plane is matched if any of the control plane selectors matches, if not specified - any control plane in the selected groups is matched. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - extras: - description: Extras specifies the extra resources selected. - items: - description: GenericSpaceBackupResourceSelector represents a - generic resource selector. - properties: - apiGroup: - description: APIGroup is the group of the resource. - type: string - kind: - description: Kind is the kind of the resource. - type: string - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - required: - - apiGroup - - kind - type: object - type: array - groups: - description: |- - Groups specifies the groups selected. - A group is matched if any of the group selectors matches, if not specified - any group is matched. Group selector is ANDed with all other selectors, so no resource in - a group not matching the group selector will be included in the backup. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - secrets: - description: Spaces specifies the spaces selected. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - type: object - schedule: - description: Schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - minLength: 1 - type: string - suspend: - description: |- - Suspend specifies whether the schedule is suspended. If true, no - SpaceBackups will be created, but running backups will be allowed to - complete. - type: boolean - ttl: - description: |- - TTL is the time to live for the backup. After this time, the backup - will be eligible for garbage collection. If not set, the backup will - not be garbage collected. - type: string - useOwnerReferencesInBackup: - description: |- - UseOwnerReferencesBackup specifies whether an ownership chain should be - established between this resource and the Backup it creates. - If set to true, the Backup will be garbage collected when this resource - is deleted. - type: boolean - required: - - configRef - - schedule - type: object - status: - description: SpaceBackupScheduleStatus represents the observed state of - a SpaceBackupSchedule. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - lastBackup: - description: |- - LastBackup is the last time a Backup was run for this - Schedule schedule - format: date-time - type: string - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.9/authorization.spaces.upbound.io_objectrolebindings.yaml b/static/crds/space/v1.9/authorization.spaces.upbound.io_objectrolebindings.yaml deleted file mode 100644 index dcca4418f..000000000 --- a/static/crds/space/v1.9/authorization.spaces.upbound.io_objectrolebindings.yaml +++ /dev/null @@ -1,153 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: objectrolebindings.authorization.spaces.upbound.io -spec: - group: authorization.spaces.upbound.io - names: - categories: - - iam - kind: ObjectRoleBinding - listKind: ObjectRoleBindingList - plural: objectrolebindings - singular: objectrolebinding - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: |- - A ObjectRoleBinding binds a namespaced API object to a set of subjects, at varying access levels. - For now, there can be at most one ObjectRoleBinding pointing to each API object. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: ObjectRoleBindingSpec is ObjectRoleBinding's spec. - properties: - object: - description: |- - Object references the object to which the listed subjects should have access at varying levels. - The object value is immutable after creation. - properties: - apiGroup: - description: |- - APIGroup defines the apiGroup of the object being pointed to. - With some minor differences, this is essentially matched as a DNS subdomain, like how Kubernetes validates it. - The Kubernetes legacy core group is denoted as "core". - maxLength: 64 - pattern: ^[a-z][a-z0-9-]{0,61}[a-z0-9](\.[a-z][a-z0-9-]{0,61}[a-z0-9])*$ - type: string - x-kubernetes-validations: - - message: apiGroup is immutable - rule: self == oldSelf - - message: apiGroup must be 'core' for now. This will change in - the future. - rule: self == 'core' - name: - description: |- - Name points to the .metadata.name of the object targeted. - Kubernetes validates this as a DNS 1123 subdomain. - maxLength: 253 - pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ - type: string - x-kubernetes-validations: - - message: name is immutable - rule: self == oldSelf - resource: - description: |- - Resource defines the resource type (often kind in plural, e.g. - controlplanes) being pointed to. - With some minor differences, this is essentially matched as a DNS label, like how Kubernetes validates it. - maxLength: 63 - pattern: ^[a-z][a-z0-9-]{1,61}[a-z0-9]$ - type: string - x-kubernetes-validations: - - message: resource is immutable - rule: self == oldSelf - - message: resource must be 'namespaces' for now. This will change - in the future. - rule: self == 'namespaces' - required: - - apiGroup - - name - - resource - type: object - subjects: - description: Subjects should be a map type with both kind+name as - a key - items: - description: |- - SubjectBinding contains a reference to the object or user identities a role - binding applies to. - properties: - kind: - description: |- - Kind of subject being referenced. Values defined by this API group are - for now only "UpboundTeam". - enum: - - UpboundTeam - type: string - x-kubernetes-validations: - - message: kind must be 'UpboundTeam' for now. This will change - in the future. - rule: self == 'UpboundTeam' - name: - description: |- - Name (identifier) of the subject (of the specified kind) being referenced. - The identifier must be 2-100 chars, [a-zA-Z0-9-], no repeating dashes, can't start/end with a dash. - Notably, a UUID fits that format. - maxLength: 100 - pattern: ^([a-zA-Z0-9]+-?)+[a-zA-Z0-9]$ - type: string - role: - description: |- - Role this subject has on the associated Object. - The list of valid roles is defined for each target API resource separately. - For namespaces, valid values are "viewer", "editor", and "admin". - The format of this is essentially a RFC 1035 label with underscores instead of dashes, minimum three characters long. - maxLength: 63 - pattern: ^[a-z][a-z0-9_]{1,62}[a-z0-9]$ - type: string - required: - - kind - - name - - role - type: object - type: array - x-kubernetes-list-map-keys: - - kind - - name - x-kubernetes-list-type: map - required: - - object - - subjects - type: object - status: - description: ObjectRoleBindingStatus is RoleBindings' status. - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.9/embed.go b/static/crds/space/v1.9/embed.go deleted file mode 100644 index d6ea58626..000000000 --- a/static/crds/space/v1.9/embed.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2024 Upbound Inc. -// All rights reserved - -package crds - -import ( - "embed" -) - -//go:embed *.yaml -var Manifests embed.FS diff --git a/static/crds/space/v1.9/observability.spaces.upbound.io_sharedtelemetryconfigs.yaml b/static/crds/space/v1.9/observability.spaces.upbound.io_sharedtelemetryconfigs.yaml deleted file mode 100644 index cdbd5f84a..000000000 --- a/static/crds/space/v1.9/observability.spaces.upbound.io_sharedtelemetryconfigs.yaml +++ /dev/null @@ -1,329 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: sharedtelemetryconfigs.observability.spaces.upbound.io -spec: - group: observability.spaces.upbound.io - names: - categories: - - observability - kind: SharedTelemetryConfig - listKind: SharedTelemetryConfigList - plural: sharedtelemetryconfigs - shortNames: - - stc - singular: sharedtelemetryconfig - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.annotations.sharedtelemetryconfig\.internal\.spaces\.upbound\.io/selected - name: Selected - type: string - - jsonPath: .metadata.annotations.sharedtelemetryconfig\.internal\.spaces\.upbound\.io/failed - name: Failed - type: string - - jsonPath: .metadata.annotations.sharedtelemetryconfig\.internal\.spaces\.upbound\.io/provisioned - name: Provisioned - type: string - - jsonPath: .status.conditions[?(@.type=='Validated')].status - name: Validated - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: SharedTelemetryConfig defines a telemetry configuration over - a set of ControlPlanes. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: SharedTelemetryConfigSpec defines a telemetry configuration - over a set of ControlPlanes. - properties: - controlPlaneSelector: - description: |- - ControlPlaneSelector defines the selector for ControlPlanes on which to - configure telemetry. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: either names or a labelSelector must be specified - rule: (has(self.labelSelectors) || has(self.names)) && (size(self.labelSelectors) - > 0 || size(self.names) > 0) - exportPipeline: - description: |- - ExportPipeline defines the telemetry exporter pipeline to configure on - the selected ControlPlanes. - properties: - logs: - description: |- - Logs defines the logs exporter pipeline to configure on the - selected ControlPlanes. The value has to be present in the - spec.exporters field. - items: - type: string - maxItems: 10 - type: array - metrics: - description: |- - Metrics defines the metrics exporter pipeline to configure on the - selected ControlPlanes. The value has to be present in the - spec.exporters field. - items: - type: string - maxItems: 10 - type: array - traces: - description: |- - Traces defines the traces exporter pipeline to configure on the - selected ControlPlanes. The value has to be present in the - spec.exporters field. - items: - type: string - maxItems: 10 - type: array - type: object - exporters: - description: |- - Exporters defines the exporters to configure on the selected ControlPlanes. - Untyped as we use the underlying OpenTelemetryOperator to configure the - OpenTelemetry collector's exporters. Use the OpenTelemetry Collector - documentation to configure the exporters. - Currently only supported exporters are push based exporters. - type: object - x-kubernetes-preserve-unknown-fields: true - required: - - controlPlaneSelector - - exportPipeline - - exporters - type: object - status: - description: SharedTelemetryConfigStatus represents the observed state - of a SharedTelemetryConfig. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - failed: - description: list of provisioning failures. - items: - description: SharedTelemetryConfigProvisioningFailure defines configuration - provisioning failure. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition - from one status to another. - type: string - status: - description: Status of this condition; is it currently - True, False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - controlPlane: - description: ControlPlane name where the failure occurred. - type: string - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - required: - - controlPlane - type: object - type: array - x-kubernetes-list-map-keys: - - controlPlane - x-kubernetes-list-type: map - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - provisioned: - description: List of successfully provisioned targets. - items: - type: string - type: array - x-kubernetes-list-type: set - selectedControlPlanes: - description: SelectedControlPlanes represents the names of the selected - ControlPlanes. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.9/policy.spaces.upbound.io_sharedupboundpolicies.yaml b/static/crds/space/v1.9/policy.spaces.upbound.io_sharedupboundpolicies.yaml deleted file mode 100644 index 30f732e75..000000000 --- a/static/crds/space/v1.9/policy.spaces.upbound.io_sharedupboundpolicies.yaml +++ /dev/null @@ -1,4303 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: sharedupboundpolicies.policy.spaces.upbound.io -spec: - group: policy.spaces.upbound.io - names: - categories: - - policies - kind: SharedUpboundPolicy - listKind: SharedUpboundPolicyList - plural: sharedupboundpolicies - shortNames: - - sup - singular: sharedupboundpolicy - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.annotations.sharedupboundpolicies\.internal\.spaces\.upbound\.io/provisioned-total - name: Provisioned - type: string - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: |- - SharedUpboundPolicy specifies a shared Kyverno policy projected into the specified - ControlPlanes of the same namespace as SharedUpboundPolicy. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: SharedUpboundPolicySpec defines the desired state of SharedUpboundPolicy. - properties: - admission: - default: true - description: |- - Admission controls if rules are applied during admission. - Optional. Default value is "true". - type: boolean - applyRules: - description: |- - ApplyRules controls how rules in a policy are applied. Rule are processed in - the order of declaration. When set to `One` processing stops after a rule has - been applied i.e. the rule matches and results in a pass, fail, or error. When - set to `All` all rules in the policy are processed. The default is `All`. - enum: - - All - - One - type: string - background: - default: true - description: |- - Background controls if rules are applied to existing resources during a background scan. - Optional. Default value is "true". The value must be set to "false" if the policy rule - uses variables that are only available in the admission review request (e.g. user name). - type: boolean - controlPlaneSelector: - description: |- - The policy is projected only to control planes - matching the provided selector. Either names or a labelSelector must be specified. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: either names or a labelSelector must be specified - rule: (has(self.labelSelectors) || has(self.names)) && (size(self.labelSelectors) - > 0 || size(self.names) > 0) - failurePolicy: - description: |- - FailurePolicy defines how unexpected policy errors and webhook response timeout errors are handled. - Rules within the same policy share the same failure behavior. - This field should not be accessed directly, instead `GetFailurePolicy()` should be used. - Allowed values are Ignore or Fail. Defaults to Fail. - enum: - - Ignore - - Fail - type: string - generateExisting: - description: |- - GenerateExisting controls whether to trigger generate rule in existing resources - If is set to "true" generate rule will be triggered and applied to existing matched resources. - Defaults to "false" if not specified. - type: boolean - generateExistingOnPolicyUpdate: - description: Deprecated, use generateExisting instead - type: boolean - mutateExistingOnPolicyUpdate: - description: |- - MutateExistingOnPolicyUpdate controls if a mutateExisting policy is applied on policy events. - Default value is "false". - type: boolean - policyMetadata: - description: The metadata of the policy to be created. - properties: - annotations: - additionalProperties: - type: string - description: Annotations that are set on projected resource. - type: object - labels: - additionalProperties: - type: string - description: Labels that are set on projected resource. - type: object - type: object - policyName: - description: |- - PolicyName is the name to use when creating policy within a control plane. - optional, if not set, SharedUpboundPolicy name will be used. - When set, it is immutable. - maxLength: 253 - minLength: 1 - type: string - x-kubernetes-validations: - - message: policyName is immutable - rule: self == oldSelf - rules: - description: |- - Rules is a list of Rule instances. A Policy contains multiple rules and - each rule can validate, mutate, or generate resources. - items: - description: |- - Rule defines a validation, mutation, or generation control for matching resources. - Each rules contains a match declaration to select resources, and an optional exclude - declaration to specify which resources to exclude. - properties: - celPreconditions: - description: |- - CELPreconditions are used to determine if a policy rule should be applied by evaluating a - set of CEL conditions. It can only be used with the validate.cel subrule - items: - description: MatchCondition represents a condition which must - by fulfilled for a request to be sent to a webhook. - properties: - expression: - description: |- - Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. - CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables: - - 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. - See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the - request resource. - Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ - - Required. - type: string - name: - description: |- - Name is an identifier for this match condition, used for strategic merging of MatchConditions, - as well as providing an identifier for logging purposes. A good name should be descriptive of - the associated expression. - Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and - must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or - '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an - optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName') - - Required. - type: string - required: - - expression - - name - type: object - type: array - context: - description: Context defines variables and data sources that - can be used during rule execution. - items: - description: |- - ContextEntry adds variables and data sources to a rule Context. Either a - ConfigMap reference or a APILookup must be provided. - properties: - apiCall: - description: |- - APICall is an HTTP request to the Kubernetes API server, or other JSON web service. - The data returned is stored in the context with the name for the context entry. - properties: - data: - description: Data specifies the POST data sent to - the server. - items: - description: RequestData contains the HTTP POST - data - properties: - key: - description: Key is a unique identifier for - the data value - type: string - value: - description: Value is the data value - x-kubernetes-preserve-unknown-fields: true - required: - - key - - value - type: object - type: array - jmesPath: - description: |- - JMESPath is an optional JSON Match Expression that can be used to - transform the JSON response returned from the server. For example - a JMESPath of "items | length(@)" applied to the API server response - for the URLPath "/apis/apps/v1/deployments" will return the total count - of deployments across all namespaces. - type: string - method: - default: GET - description: Method is the HTTP request type (GET - or POST). - enum: - - GET - - POST - type: string - service: - description: Service is an API call to a JSON web - service - properties: - caBundle: - description: |- - CABundle is a PEM encoded CA bundle which will be used to validate - the server certificate. - type: string - url: - description: |- - URL is the JSON web service URL. A typical form is - `https://{service}.{namespace}:{port}/{path}`. - type: string - required: - - url - type: object - urlPath: - description: |- - URLPath is the URL path to be used in the HTTP GET or POST request to the - Kubernetes API server (e.g. "/api/v1/namespaces" or "/apis/apps/v1/deployments"). - The format required is the same format used by the `kubectl get --raw` command. - See https://kyverno.io/docs/writing-policies/external-data-sources/#variables-from-kubernetes-api-server-calls - for details. - type: string - type: object - configMap: - description: ConfigMap is the ConfigMap reference. - properties: - name: - description: Name is the ConfigMap name. - type: string - namespace: - description: Namespace is the ConfigMap namespace. - type: string - required: - - name - type: object - imageRegistry: - description: |- - ImageRegistry defines requests to an OCI/Docker V2 registry to fetch image - details. - properties: - imageRegistryCredentials: - description: ImageRegistryCredentials provides credentials - that will be used for authentication with registry - properties: - allowInsecureRegistry: - description: AllowInsecureRegistry allows insecure - access to a registry. - type: boolean - providers: - description: |- - Providers specifies a list of OCI Registry names, whose authentication providers are provided. - It can be of one of these values: default,google,azure,amazon,github. - items: - description: ImageRegistryCredentialsProvidersType - provides the list of credential providers - required. - enum: - - default - - amazon - - azure - - google - - github - type: string - type: array - secrets: - description: |- - Secrets specifies a list of secrets that are provided for credentials. - Secrets must live in the Kyverno namespace. - items: - type: string - type: array - type: object - jmesPath: - description: |- - JMESPath is an optional JSON Match Expression that can be used to - transform the ImageData struct returned as a result of processing - the image reference. - type: string - reference: - description: |- - Reference is image reference to a container image in the registry. - Example: ghcr.io/kyverno/kyverno:latest - type: string - required: - - reference - type: object - name: - description: Name is the variable name. - type: string - variable: - description: Variable defines an arbitrary JMESPath context - variable that can be defined inline. - properties: - default: - description: |- - Default is an optional arbitrary JSON object that the variable may take if the JMESPath - expression evaluates to nil - x-kubernetes-preserve-unknown-fields: true - jmesPath: - description: |- - JMESPath is an optional JMESPath Expression that can be used to - transform the variable. - type: string - value: - description: Value is any arbitrary JSON object representable - in YAML or JSON form. - x-kubernetes-preserve-unknown-fields: true - type: object - type: object - type: array - exclude: - description: |- - ExcludeResources defines when this policy rule should not be applied. The exclude - criteria can include resource information (e.g. kind, name, namespace, labels) - and admission review request information like the name or role. - properties: - all: - description: All allows specifying resources which will - be ANDed - items: - description: ResourceFilter allow users to "AND" or "OR" - between resources - properties: - clusterRoles: - description: ClusterRoles is the list of cluster-wide - role names for the user. - items: - type: string - type: array - resources: - description: ResourceDescription contains information - about the resource being created or modified. - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations is a map of annotations (key-value pairs of type string). Annotation keys - and values support the wildcard characters "*" (matches zero or many characters) and - "?" (matches at least one character). - type: object - kinds: - description: Kinds is a list of resource kinds. - items: - type: string - type: array - name: - description: |- - Name is the name of the resource. The name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - NOTE: "Name" is being deprecated in favor of "Names". - type: string - names: - description: |- - Names are the names of the resources. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - namespaceSelector: - description: |- - NamespaceSelector is a label selector for the resource namespace. Label keys and values - in `matchLabels` support the wildcard characters `*` (matches zero or many characters) - and `?` (matches one character).Wildcards allows writing label selectors like - ["storage.k8s.io/*": "*"]. Note that using ["*" : "*"] matches any key and value but - does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - Namespaces is a list of namespaces names. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - operations: - description: Operations can contain values ["CREATE, - "UPDATE", "CONNECT", "DELETE"], which are used - to match a specific action. - items: - description: AdmissionOperation can have one - of the values CREATE, UPDATE, CONNECT, DELETE, - which are used to match a specific action. - enum: - - CREATE - - CONNECT - - UPDATE - - DELETE - type: string - type: array - selector: - description: |- - Selector is a label selector. Label keys and values in `matchLabels` support the wildcard - characters `*` (matches zero or many characters) and `?` (matches one character). - Wildcards allows writing label selectors like ["storage.k8s.io/*": "*"]. Note that - using ["*" : "*"] matches any key and value but does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: object - roles: - description: Roles is the list of namespaced role - names for the user. - items: - type: string - type: array - subjects: - description: Subjects is the list of subject names - like users, user groups, and service accounts. - items: - description: |- - Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, - or a value for non-objects such as user and group names. - properties: - apiGroup: - description: |- - APIGroup holds the API group of the referenced subject. - Defaults to "" for ServiceAccount subjects. - Defaults to "rbac.authorization.k8s.io" for User and Group subjects. - type: string - kind: - description: |- - Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". - If the Authorizer does not recognized the kind value, the Authorizer should report an error. - type: string - name: - description: Name of the object being referenced. - type: string - namespace: - description: |- - Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty - the Authorizer should report an error. - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - type: array - type: object - type: array - any: - description: Any allows specifying resources which will - be ORed - items: - description: ResourceFilter allow users to "AND" or "OR" - between resources - properties: - clusterRoles: - description: ClusterRoles is the list of cluster-wide - role names for the user. - items: - type: string - type: array - resources: - description: ResourceDescription contains information - about the resource being created or modified. - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations is a map of annotations (key-value pairs of type string). Annotation keys - and values support the wildcard characters "*" (matches zero or many characters) and - "?" (matches at least one character). - type: object - kinds: - description: Kinds is a list of resource kinds. - items: - type: string - type: array - name: - description: |- - Name is the name of the resource. The name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - NOTE: "Name" is being deprecated in favor of "Names". - type: string - names: - description: |- - Names are the names of the resources. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - namespaceSelector: - description: |- - NamespaceSelector is a label selector for the resource namespace. Label keys and values - in `matchLabels` support the wildcard characters `*` (matches zero or many characters) - and `?` (matches one character).Wildcards allows writing label selectors like - ["storage.k8s.io/*": "*"]. Note that using ["*" : "*"] matches any key and value but - does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - Namespaces is a list of namespaces names. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - operations: - description: Operations can contain values ["CREATE, - "UPDATE", "CONNECT", "DELETE"], which are used - to match a specific action. - items: - description: AdmissionOperation can have one - of the values CREATE, UPDATE, CONNECT, DELETE, - which are used to match a specific action. - enum: - - CREATE - - CONNECT - - UPDATE - - DELETE - type: string - type: array - selector: - description: |- - Selector is a label selector. Label keys and values in `matchLabels` support the wildcard - characters `*` (matches zero or many characters) and `?` (matches one character). - Wildcards allows writing label selectors like ["storage.k8s.io/*": "*"]. Note that - using ["*" : "*"] matches any key and value but does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: object - roles: - description: Roles is the list of namespaced role - names for the user. - items: - type: string - type: array - subjects: - description: Subjects is the list of subject names - like users, user groups, and service accounts. - items: - description: |- - Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, - or a value for non-objects such as user and group names. - properties: - apiGroup: - description: |- - APIGroup holds the API group of the referenced subject. - Defaults to "" for ServiceAccount subjects. - Defaults to "rbac.authorization.k8s.io" for User and Group subjects. - type: string - kind: - description: |- - Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". - If the Authorizer does not recognized the kind value, the Authorizer should report an error. - type: string - name: - description: Name of the object being referenced. - type: string - namespace: - description: |- - Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty - the Authorizer should report an error. - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - type: array - type: object - type: array - clusterRoles: - description: ClusterRoles is the list of cluster-wide role - names for the user. - items: - type: string - type: array - resources: - description: |- - ResourceDescription contains information about the resource being created or modified. - Requires at least one tag to be specified when under MatchResources. - Specifying ResourceDescription directly under match is being deprecated. - Please specify under "any" or "all" instead. - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations is a map of annotations (key-value pairs of type string). Annotation keys - and values support the wildcard characters "*" (matches zero or many characters) and - "?" (matches at least one character). - type: object - kinds: - description: Kinds is a list of resource kinds. - items: - type: string - type: array - name: - description: |- - Name is the name of the resource. The name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - NOTE: "Name" is being deprecated in favor of "Names". - type: string - names: - description: |- - Names are the names of the resources. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - namespaceSelector: - description: |- - NamespaceSelector is a label selector for the resource namespace. Label keys and values - in `matchLabels` support the wildcard characters `*` (matches zero or many characters) - and `?` (matches one character).Wildcards allows writing label selectors like - ["storage.k8s.io/*": "*"]. Note that using ["*" : "*"] matches any key and value but - does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - Namespaces is a list of namespaces names. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - operations: - description: Operations can contain values ["CREATE, - "UPDATE", "CONNECT", "DELETE"], which are used to - match a specific action. - items: - description: AdmissionOperation can have one of the - values CREATE, UPDATE, CONNECT, DELETE, which are - used to match a specific action. - enum: - - CREATE - - CONNECT - - UPDATE - - DELETE - type: string - type: array - selector: - description: |- - Selector is a label selector. Label keys and values in `matchLabels` support the wildcard - characters `*` (matches zero or many characters) and `?` (matches one character). - Wildcards allows writing label selectors like ["storage.k8s.io/*": "*"]. Note that - using ["*" : "*"] matches any key and value but does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: object - roles: - description: Roles is the list of namespaced role names - for the user. - items: - type: string - type: array - subjects: - description: Subjects is the list of subject names like - users, user groups, and service accounts. - items: - description: |- - Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, - or a value for non-objects such as user and group names. - properties: - apiGroup: - description: |- - APIGroup holds the API group of the referenced subject. - Defaults to "" for ServiceAccount subjects. - Defaults to "rbac.authorization.k8s.io" for User and Group subjects. - type: string - kind: - description: |- - Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". - If the Authorizer does not recognized the kind value, the Authorizer should report an error. - type: string - name: - description: Name of the object being referenced. - type: string - namespace: - description: |- - Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty - the Authorizer should report an error. - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - type: array - type: object - generate: - description: Generation is used to create new resources. - properties: - apiVersion: - description: APIVersion specifies resource apiVersion. - type: string - clone: - description: |- - Clone specifies the source resource used to populate each generated resource. - At most one of Data or Clone can be specified. If neither are provided, the generated - resource will be created with default data only. - properties: - name: - description: Name specifies name of the resource. - type: string - namespace: - description: Namespace specifies source resource namespace. - type: string - type: object - cloneList: - description: CloneList specifies the list of source resource - used to populate each generated resource. - properties: - kinds: - description: Kinds is a list of resource kinds. - items: - type: string - type: array - namespace: - description: Namespace specifies source resource namespace. - type: string - selector: - description: |- - Selector is a label selector. Label keys and values in `matchLabels`. - wildcard characters are not supported. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: object - data: - description: |- - Data provides the resource declaration used to populate each generated resource. - At most one of Data or Clone must be specified. If neither are provided, the generated - resource will be created with default data only. - x-kubernetes-preserve-unknown-fields: true - kind: - description: Kind specifies resource kind. - type: string - name: - description: Name specifies the resource name. - type: string - namespace: - description: Namespace specifies resource namespace. - type: string - synchronize: - description: |- - Synchronize controls if generated resources should be kept in-sync with their source resource. - If Synchronize is set to "true" changes to generated resources will be overwritten with resource - data from Data or the resource specified in the Clone declaration. - Optional. Defaults to "false" if not specified. - type: boolean - uid: - description: UID specifies the resource uid. - type: string - type: object - imageExtractors: - additionalProperties: - items: - properties: - jmesPath: - description: |- - JMESPath is an optional JMESPath expression to apply to the image value. - This is useful when the extracted image begins with a prefix like 'docker://'. - The 'trim_prefix' function may be used to trim the prefix: trim_prefix(@, 'docker://'). - Note - Image digest mutation may not be used when applying a JMESPAth to an image. - type: string - key: - description: |- - Key is an optional name of the field within 'path' that will be used to uniquely identify an image. - Note - this field MUST be unique. - type: string - name: - description: |- - Name is the entry the image will be available under 'images.' in the context. - If this field is not defined, image entries will appear under 'images.custom'. - type: string - path: - description: |- - Path is the path to the object containing the image field in a custom resource. - It should be slash-separated. Each slash-separated key must be a valid YAML key or a wildcard '*'. - Wildcard keys are expanded in case of arrays or objects. - type: string - value: - description: |- - Value is an optional name of the field within 'path' that points to the image URI. - This is useful when a custom 'key' is also defined. - type: string - required: - - path - type: object - type: array - description: |- - ImageExtractors defines a mapping from kinds to ImageExtractorConfigs. - This config is only valid for verifyImages rules. - type: object - match: - description: |- - MatchResources defines when this policy rule should be applied. The match - criteria can include resource information (e.g. kind, name, namespace, labels) - and admission review request information like the user name or role. - At least one kind is required. - properties: - all: - description: All allows specifying resources which will - be ANDed - items: - description: ResourceFilter allow users to "AND" or "OR" - between resources - properties: - clusterRoles: - description: ClusterRoles is the list of cluster-wide - role names for the user. - items: - type: string - type: array - resources: - description: ResourceDescription contains information - about the resource being created or modified. - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations is a map of annotations (key-value pairs of type string). Annotation keys - and values support the wildcard characters "*" (matches zero or many characters) and - "?" (matches at least one character). - type: object - kinds: - description: Kinds is a list of resource kinds. - items: - type: string - type: array - name: - description: |- - Name is the name of the resource. The name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - NOTE: "Name" is being deprecated in favor of "Names". - type: string - names: - description: |- - Names are the names of the resources. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - namespaceSelector: - description: |- - NamespaceSelector is a label selector for the resource namespace. Label keys and values - in `matchLabels` support the wildcard characters `*` (matches zero or many characters) - and `?` (matches one character).Wildcards allows writing label selectors like - ["storage.k8s.io/*": "*"]. Note that using ["*" : "*"] matches any key and value but - does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - Namespaces is a list of namespaces names. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - operations: - description: Operations can contain values ["CREATE, - "UPDATE", "CONNECT", "DELETE"], which are used - to match a specific action. - items: - description: AdmissionOperation can have one - of the values CREATE, UPDATE, CONNECT, DELETE, - which are used to match a specific action. - enum: - - CREATE - - CONNECT - - UPDATE - - DELETE - type: string - type: array - selector: - description: |- - Selector is a label selector. Label keys and values in `matchLabels` support the wildcard - characters `*` (matches zero or many characters) and `?` (matches one character). - Wildcards allows writing label selectors like ["storage.k8s.io/*": "*"]. Note that - using ["*" : "*"] matches any key and value but does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: object - roles: - description: Roles is the list of namespaced role - names for the user. - items: - type: string - type: array - subjects: - description: Subjects is the list of subject names - like users, user groups, and service accounts. - items: - description: |- - Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, - or a value for non-objects such as user and group names. - properties: - apiGroup: - description: |- - APIGroup holds the API group of the referenced subject. - Defaults to "" for ServiceAccount subjects. - Defaults to "rbac.authorization.k8s.io" for User and Group subjects. - type: string - kind: - description: |- - Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". - If the Authorizer does not recognized the kind value, the Authorizer should report an error. - type: string - name: - description: Name of the object being referenced. - type: string - namespace: - description: |- - Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty - the Authorizer should report an error. - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - type: array - type: object - type: array - any: - description: Any allows specifying resources which will - be ORed - items: - description: ResourceFilter allow users to "AND" or "OR" - between resources - properties: - clusterRoles: - description: ClusterRoles is the list of cluster-wide - role names for the user. - items: - type: string - type: array - resources: - description: ResourceDescription contains information - about the resource being created or modified. - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations is a map of annotations (key-value pairs of type string). Annotation keys - and values support the wildcard characters "*" (matches zero or many characters) and - "?" (matches at least one character). - type: object - kinds: - description: Kinds is a list of resource kinds. - items: - type: string - type: array - name: - description: |- - Name is the name of the resource. The name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - NOTE: "Name" is being deprecated in favor of "Names". - type: string - names: - description: |- - Names are the names of the resources. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - namespaceSelector: - description: |- - NamespaceSelector is a label selector for the resource namespace. Label keys and values - in `matchLabels` support the wildcard characters `*` (matches zero or many characters) - and `?` (matches one character).Wildcards allows writing label selectors like - ["storage.k8s.io/*": "*"]. Note that using ["*" : "*"] matches any key and value but - does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - Namespaces is a list of namespaces names. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - operations: - description: Operations can contain values ["CREATE, - "UPDATE", "CONNECT", "DELETE"], which are used - to match a specific action. - items: - description: AdmissionOperation can have one - of the values CREATE, UPDATE, CONNECT, DELETE, - which are used to match a specific action. - enum: - - CREATE - - CONNECT - - UPDATE - - DELETE - type: string - type: array - selector: - description: |- - Selector is a label selector. Label keys and values in `matchLabels` support the wildcard - characters `*` (matches zero or many characters) and `?` (matches one character). - Wildcards allows writing label selectors like ["storage.k8s.io/*": "*"]. Note that - using ["*" : "*"] matches any key and value but does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of - label selector requirements. The requirements - are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: object - roles: - description: Roles is the list of namespaced role - names for the user. - items: - type: string - type: array - subjects: - description: Subjects is the list of subject names - like users, user groups, and service accounts. - items: - description: |- - Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, - or a value for non-objects such as user and group names. - properties: - apiGroup: - description: |- - APIGroup holds the API group of the referenced subject. - Defaults to "" for ServiceAccount subjects. - Defaults to "rbac.authorization.k8s.io" for User and Group subjects. - type: string - kind: - description: |- - Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". - If the Authorizer does not recognized the kind value, the Authorizer should report an error. - type: string - name: - description: Name of the object being referenced. - type: string - namespace: - description: |- - Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty - the Authorizer should report an error. - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - type: array - type: object - type: array - clusterRoles: - description: ClusterRoles is the list of cluster-wide role - names for the user. - items: - type: string - type: array - resources: - description: |- - ResourceDescription contains information about the resource being created or modified. - Requires at least one tag to be specified when under MatchResources. - Specifying ResourceDescription directly under match is being deprecated. - Please specify under "any" or "all" instead. - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations is a map of annotations (key-value pairs of type string). Annotation keys - and values support the wildcard characters "*" (matches zero or many characters) and - "?" (matches at least one character). - type: object - kinds: - description: Kinds is a list of resource kinds. - items: - type: string - type: array - name: - description: |- - Name is the name of the resource. The name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - NOTE: "Name" is being deprecated in favor of "Names". - type: string - names: - description: |- - Names are the names of the resources. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - namespaceSelector: - description: |- - NamespaceSelector is a label selector for the resource namespace. Label keys and values - in `matchLabels` support the wildcard characters `*` (matches zero or many characters) - and `?` (matches one character).Wildcards allows writing label selectors like - ["storage.k8s.io/*": "*"]. Note that using ["*" : "*"] matches any key and value but - does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - description: |- - Namespaces is a list of namespaces names. Each name supports wildcard characters - "*" (matches zero or many characters) and "?" (at least one character). - items: - type: string - type: array - operations: - description: Operations can contain values ["CREATE, - "UPDATE", "CONNECT", "DELETE"], which are used to - match a specific action. - items: - description: AdmissionOperation can have one of the - values CREATE, UPDATE, CONNECT, DELETE, which are - used to match a specific action. - enum: - - CREATE - - CONNECT - - UPDATE - - DELETE - type: string - type: array - selector: - description: |- - Selector is a label selector. Label keys and values in `matchLabels` support the wildcard - characters `*` (matches zero or many characters) and `?` (matches one character). - Wildcards allows writing label selectors like ["storage.k8s.io/*": "*"]. Note that - using ["*" : "*"] matches any key and value but does not match an empty label set. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: object - roles: - description: Roles is the list of namespaced role names - for the user. - items: - type: string - type: array - subjects: - description: Subjects is the list of subject names like - users, user groups, and service accounts. - items: - description: |- - Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, - or a value for non-objects such as user and group names. - properties: - apiGroup: - description: |- - APIGroup holds the API group of the referenced subject. - Defaults to "" for ServiceAccount subjects. - Defaults to "rbac.authorization.k8s.io" for User and Group subjects. - type: string - kind: - description: |- - Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". - If the Authorizer does not recognized the kind value, the Authorizer should report an error. - type: string - name: - description: Name of the object being referenced. - type: string - namespace: - description: |- - Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty - the Authorizer should report an error. - type: string - required: - - kind - - name - type: object - x-kubernetes-map-type: atomic - type: array - type: object - mutate: - description: Mutation is used to modify matching resources. - properties: - foreach: - description: ForEach applies mutation rules to a list of - sub-elements by creating a context for each entry in the - list and looping over it to apply the specified logic. - items: - description: ForEachMutation applies mutation rules to - a list of sub-elements by creating a context for each - entry in the list and looping over it to apply the specified - logic. - properties: - context: - description: Context defines variables and data sources - that can be used during rule execution. - items: - description: |- - ContextEntry adds variables and data sources to a rule Context. Either a - ConfigMap reference or a APILookup must be provided. - properties: - apiCall: - description: |- - APICall is an HTTP request to the Kubernetes API server, or other JSON web service. - The data returned is stored in the context with the name for the context entry. - properties: - data: - description: Data specifies the POST data - sent to the server. - items: - description: RequestData contains the - HTTP POST data - properties: - key: - description: Key is a unique identifier - for the data value - type: string - value: - description: Value is the data value - x-kubernetes-preserve-unknown-fields: true - required: - - key - - value - type: object - type: array - jmesPath: - description: |- - JMESPath is an optional JSON Match Expression that can be used to - transform the JSON response returned from the server. For example - a JMESPath of "items | length(@)" applied to the API server response - for the URLPath "/apis/apps/v1/deployments" will return the total count - of deployments across all namespaces. - type: string - method: - default: GET - description: Method is the HTTP request - type (GET or POST). - enum: - - GET - - POST - type: string - service: - description: Service is an API call to a - JSON web service - properties: - caBundle: - description: |- - CABundle is a PEM encoded CA bundle which will be used to validate - the server certificate. - type: string - url: - description: |- - URL is the JSON web service URL. A typical form is - `https://{service}.{namespace}:{port}/{path}`. - type: string - required: - - url - type: object - urlPath: - description: |- - URLPath is the URL path to be used in the HTTP GET or POST request to the - Kubernetes API server (e.g. "/api/v1/namespaces" or "/apis/apps/v1/deployments"). - The format required is the same format used by the `kubectl get --raw` command. - See https://kyverno.io/docs/writing-policies/external-data-sources/#variables-from-kubernetes-api-server-calls - for details. - type: string - type: object - configMap: - description: ConfigMap is the ConfigMap reference. - properties: - name: - description: Name is the ConfigMap name. - type: string - namespace: - description: Namespace is the ConfigMap - namespace. - type: string - required: - - name - type: object - imageRegistry: - description: |- - ImageRegistry defines requests to an OCI/Docker V2 registry to fetch image - details. - properties: - imageRegistryCredentials: - description: ImageRegistryCredentials provides - credentials that will be used for authentication - with registry - properties: - allowInsecureRegistry: - description: AllowInsecureRegistry allows - insecure access to a registry. - type: boolean - providers: - description: |- - Providers specifies a list of OCI Registry names, whose authentication providers are provided. - It can be of one of these values: default,google,azure,amazon,github. - items: - description: ImageRegistryCredentialsProvidersType - provides the list of credential - providers required. - enum: - - default - - amazon - - azure - - google - - github - type: string - type: array - secrets: - description: |- - Secrets specifies a list of secrets that are provided for credentials. - Secrets must live in the Kyverno namespace. - items: - type: string - type: array - type: object - jmesPath: - description: |- - JMESPath is an optional JSON Match Expression that can be used to - transform the ImageData struct returned as a result of processing - the image reference. - type: string - reference: - description: |- - Reference is image reference to a container image in the registry. - Example: ghcr.io/kyverno/kyverno:latest - type: string - required: - - reference - type: object - name: - description: Name is the variable name. - type: string - variable: - description: Variable defines an arbitrary JMESPath - context variable that can be defined inline. - properties: - default: - description: |- - Default is an optional arbitrary JSON object that the variable may take if the JMESPath - expression evaluates to nil - x-kubernetes-preserve-unknown-fields: true - jmesPath: - description: |- - JMESPath is an optional JMESPath Expression that can be used to - transform the variable. - type: string - value: - description: Value is any arbitrary JSON - object representable in YAML or JSON form. - x-kubernetes-preserve-unknown-fields: true - type: object - type: object - type: array - foreach: - description: Foreach declares a nested foreach iterator - x-kubernetes-preserve-unknown-fields: true - list: - description: |- - List specifies a JMESPath expression that results in one or more elements - to which the validation logic is applied. - type: string - order: - description: |- - Order defines the iteration order on the list. - Can be Ascending to iterate from first to last element or Descending to iterate in from last to first element. - enum: - - Ascending - - Descending - type: string - patchStrategicMerge: - description: |- - PatchStrategicMerge is a strategic merge patch used to modify resources. - See https://kubernetes.io/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/ - and https://kubectl.docs.kubernetes.io/references/kustomize/patchesstrategicmerge/. - x-kubernetes-preserve-unknown-fields: true - patchesJson6902: - description: |- - PatchesJSON6902 is a list of RFC 6902 JSON Patch declarations used to modify resources. - See https://tools.ietf.org/html/rfc6902 and https://kubectl.docs.kubernetes.io/references/kustomize/patchesjson6902/. - type: string - preconditions: - description: |- - AnyAllConditions are used to determine if a policy rule should be applied by evaluating a - set of conditions. The declaration can contain nested `any` or `all` statements. - See: https://kyverno.io/docs/writing-policies/preconditions/ - properties: - all: - description: |- - AllConditions enable variable-based conditional rule execution. This is useful for - finer control of when an rule is applied. A condition can reference object data - using JMESPath notation. - Here, all of the conditions need to pass - items: - description: Condition defines variable-based - conditional criteria for rule execution. - properties: - key: - description: Key is the context entry (using - JMESPath) for conditional rule evaluation. - x-kubernetes-preserve-unknown-fields: true - message: - description: Message is an optional display - message - type: string - operator: - description: |- - Operator is the conditional operation to perform. Valid operators are: - Equals, NotEquals, In, AnyIn, AllIn, NotIn, AnyNotIn, AllNotIn, GreaterThanOrEquals, - GreaterThan, LessThanOrEquals, LessThan, DurationGreaterThanOrEquals, DurationGreaterThan, - DurationLessThanOrEquals, DurationLessThan - enum: - - Equals - - NotEquals - - In - - AnyIn - - AllIn - - NotIn - - AnyNotIn - - AllNotIn - - GreaterThanOrEquals - - GreaterThan - - LessThanOrEquals - - LessThan - - DurationGreaterThanOrEquals - - DurationGreaterThan - - DurationLessThanOrEquals - - DurationLessThan - type: string - value: - description: |- - Value is the conditional value, or set of values. The values can be fixed set - or can be variables declared using JMESPath. - x-kubernetes-preserve-unknown-fields: true - type: object - type: array - any: - description: |- - AnyConditions enable variable-based conditional rule execution. This is useful for - finer control of when an rule is applied. A condition can reference object data - using JMESPath notation. - Here, at least one of the conditions need to pass - items: - description: Condition defines variable-based - conditional criteria for rule execution. - properties: - key: - description: Key is the context entry (using - JMESPath) for conditional rule evaluation. - x-kubernetes-preserve-unknown-fields: true - message: - description: Message is an optional display - message - type: string - operator: - description: |- - Operator is the conditional operation to perform. Valid operators are: - Equals, NotEquals, In, AnyIn, AllIn, NotIn, AnyNotIn, AllNotIn, GreaterThanOrEquals, - GreaterThan, LessThanOrEquals, LessThan, DurationGreaterThanOrEquals, DurationGreaterThan, - DurationLessThanOrEquals, DurationLessThan - enum: - - Equals - - NotEquals - - In - - AnyIn - - AllIn - - NotIn - - AnyNotIn - - AllNotIn - - GreaterThanOrEquals - - GreaterThan - - LessThanOrEquals - - LessThan - - DurationGreaterThanOrEquals - - DurationGreaterThan - - DurationLessThanOrEquals - - DurationLessThan - type: string - value: - description: |- - Value is the conditional value, or set of values. The values can be fixed set - or can be variables declared using JMESPath. - x-kubernetes-preserve-unknown-fields: true - type: object - type: array - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - type: array - patchStrategicMerge: - description: |- - PatchStrategicMerge is a strategic merge patch used to modify resources. - See https://kubernetes.io/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/ - and https://kubectl.docs.kubernetes.io/references/kustomize/patchesstrategicmerge/. - x-kubernetes-preserve-unknown-fields: true - patchesJson6902: - description: |- - PatchesJSON6902 is a list of RFC 6902 JSON Patch declarations used to modify resources. - See https://tools.ietf.org/html/rfc6902 and https://kubectl.docs.kubernetes.io/references/kustomize/patchesjson6902/. - type: string - targets: - description: Targets defines the target resources to be - mutated. - items: - description: TargetResourceSpec defines targets for mutating - existing resources. - properties: - apiVersion: - description: APIVersion specifies resource apiVersion. - type: string - context: - description: Context defines variables and data sources - that can be used during rule execution. - items: - description: |- - ContextEntry adds variables and data sources to a rule Context. Either a - ConfigMap reference or a APILookup must be provided. - properties: - apiCall: - description: |- - APICall is an HTTP request to the Kubernetes API server, or other JSON web service. - The data returned is stored in the context with the name for the context entry. - properties: - data: - description: Data specifies the POST data - sent to the server. - items: - description: RequestData contains the - HTTP POST data - properties: - key: - description: Key is a unique identifier - for the data value - type: string - value: - description: Value is the data value - x-kubernetes-preserve-unknown-fields: true - required: - - key - - value - type: object - type: array - jmesPath: - description: |- - JMESPath is an optional JSON Match Expression that can be used to - transform the JSON response returned from the server. For example - a JMESPath of "items | length(@)" applied to the API server response - for the URLPath "/apis/apps/v1/deployments" will return the total count - of deployments across all namespaces. - type: string - method: - default: GET - description: Method is the HTTP request - type (GET or POST). - enum: - - GET - - POST - type: string - service: - description: Service is an API call to a - JSON web service - properties: - caBundle: - description: |- - CABundle is a PEM encoded CA bundle which will be used to validate - the server certificate. - type: string - url: - description: |- - URL is the JSON web service URL. A typical form is - `https://{service}.{namespace}:{port}/{path}`. - type: string - required: - - url - type: object - urlPath: - description: |- - URLPath is the URL path to be used in the HTTP GET or POST request to the - Kubernetes API server (e.g. "/api/v1/namespaces" or "/apis/apps/v1/deployments"). - The format required is the same format used by the `kubectl get --raw` command. - See https://kyverno.io/docs/writing-policies/external-data-sources/#variables-from-kubernetes-api-server-calls - for details. - type: string - type: object - configMap: - description: ConfigMap is the ConfigMap reference. - properties: - name: - description: Name is the ConfigMap name. - type: string - namespace: - description: Namespace is the ConfigMap - namespace. - type: string - required: - - name - type: object - imageRegistry: - description: |- - ImageRegistry defines requests to an OCI/Docker V2 registry to fetch image - details. - properties: - imageRegistryCredentials: - description: ImageRegistryCredentials provides - credentials that will be used for authentication - with registry - properties: - allowInsecureRegistry: - description: AllowInsecureRegistry allows - insecure access to a registry. - type: boolean - providers: - description: |- - Providers specifies a list of OCI Registry names, whose authentication providers are provided. - It can be of one of these values: default,google,azure,amazon,github. - items: - description: ImageRegistryCredentialsProvidersType - provides the list of credential - providers required. - enum: - - default - - amazon - - azure - - google - - github - type: string - type: array - secrets: - description: |- - Secrets specifies a list of secrets that are provided for credentials. - Secrets must live in the Kyverno namespace. - items: - type: string - type: array - type: object - jmesPath: - description: |- - JMESPath is an optional JSON Match Expression that can be used to - transform the ImageData struct returned as a result of processing - the image reference. - type: string - reference: - description: |- - Reference is image reference to a container image in the registry. - Example: ghcr.io/kyverno/kyverno:latest - type: string - required: - - reference - type: object - name: - description: Name is the variable name. - type: string - variable: - description: Variable defines an arbitrary JMESPath - context variable that can be defined inline. - properties: - default: - description: |- - Default is an optional arbitrary JSON object that the variable may take if the JMESPath - expression evaluates to nil - x-kubernetes-preserve-unknown-fields: true - jmesPath: - description: |- - JMESPath is an optional JMESPath Expression that can be used to - transform the variable. - type: string - value: - description: Value is any arbitrary JSON - object representable in YAML or JSON form. - x-kubernetes-preserve-unknown-fields: true - type: object - type: object - type: array - kind: - description: Kind specifies resource kind. - type: string - name: - description: Name specifies the resource name. - type: string - namespace: - description: Namespace specifies resource namespace. - type: string - preconditions: - description: |- - Preconditions are used to determine if a policy rule should be applied by evaluating a - set of conditions. The declaration can contain nested `any` or `all` statements. A direct list - of conditions (without `any` or `all` statements is supported for backwards compatibility but - will be deprecated in the next major release. - See: https://kyverno.io/docs/writing-policies/preconditions/ - x-kubernetes-preserve-unknown-fields: true - uid: - description: UID specifies the resource uid. - type: string - type: object - type: array - type: object - name: - description: Name is a label to identify the rule, It must be - unique within the policy. - maxLength: 63 - type: string - preconditions: - description: |- - Preconditions are used to determine if a policy rule should be applied by evaluating a - set of conditions. The declaration can contain nested `any` or `all` statements. A direct list - of conditions (without `any` or `all` statements is supported for backwards compatibility but - will be deprecated in the next major release. - See: https://kyverno.io/docs/writing-policies/preconditions/ - x-kubernetes-preserve-unknown-fields: true - skipBackgroundRequests: - default: true - description: |- - SkipBackgroundRequests bypasses admission requests that are sent by the background controller. - The default value is set to "true", it must be set to "false" to apply - generate and mutateExisting rules to those requests. - type: boolean - validate: - description: Validation is used to validate matching resources. - properties: - anyPattern: - description: |- - AnyPattern specifies list of validation patterns. At least one of the patterns - must be satisfied for the validation rule to succeed. - x-kubernetes-preserve-unknown-fields: true - cel: - description: CEL allows validation checks using the Common - Expression Language (https://kubernetes.io/docs/reference/using-api/cel/). - properties: - auditAnnotations: - description: AuditAnnotations contains CEL expressions - which are used to produce audit annotations for the - audit event of the API request. - items: - description: AuditAnnotation describes how to produce - an audit annotation for an API request. - properties: - key: - description: |- - key specifies the audit annotation key. The audit annotation keys of - a ValidatingAdmissionPolicy must be unique. The key must be a qualified - name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length. - - The key is combined with the resource name of the - ValidatingAdmissionPolicy to construct an audit annotation key: - "{ValidatingAdmissionPolicy name}/{key}". - - If an admission webhook uses the same resource name as this ValidatingAdmissionPolicy - and the same audit annotation key, the annotation key will be identical. - In this case, the first annotation written with the key will be included - in the audit event and all subsequent annotations with the same key - will be discarded. - - Required. - type: string - valueExpression: - description: |- - valueExpression represents the expression which is evaluated by CEL to - produce an audit annotation value. The expression must evaluate to either - a string or null value. If the expression evaluates to a string, the - audit annotation is included with the string value. If the expression - evaluates to null or empty string the audit annotation will be omitted. - The valueExpression may be no longer than 5kb in length. - If the result of the valueExpression is more than 10kb in length, it - will be truncated to 10kb. - - If multiple ValidatingAdmissionPolicyBinding resources match an - API request, then the valueExpression will be evaluated for - each binding. All unique values produced by the valueExpressions - will be joined together in a comma-separated list. - - Required. - type: string - required: - - key - - valueExpression - type: object - type: array - expressions: - description: Expressions is a list of CELExpression - types. - items: - description: Validation specifies the CEL expression - which is used to apply the validation. - properties: - expression: - description: "Expression represents the expression - which will be evaluated by CEL.\nref: https://github.com/google/cel-spec\nCEL - expressions have access to the contents of the - API request/response, organized into CEL variables - as well as some other useful variables:\n\n- - 'object' - The object from the incoming request. - The value is null for DELETE requests.\n- 'oldObject' - - The existing object. The value is null for - CREATE requests.\n- 'request' - Attributes of - the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).\n- - 'params' - Parameter resource referred to by - the policy binding being evaluated. Only populated - if the policy has a ParamKind.\n- 'namespaceObject' - - The namespace object that the incoming object - belongs to. The value is null for cluster-scoped - resources.\n- 'variables' - Map of composited - variables, from its name to its lazily evaluated - value.\n For example, a variable named 'foo' - can be accessed as 'variables.foo'.\n- 'authorizer' - - A CEL Authorizer. May be used to perform authorization - checks for the principal (user or service account) - of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- - 'authorizer.requestResource' - A CEL ResourceCheck - constructed from the 'authorizer' and configured - with the\n request resource.\n\nThe `apiVersion`, - `kind`, `metadata.name` and `metadata.generateName` - are always accessible from the root of the\nobject. - No other metadata properties are accessible.\n\nOnly - property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` - are accessible.\nAccessible property names are - escaped according to the following rules when - accessed in the expression:\n- '__' escapes - to '__underscores__'\n- '.' escapes to '__dot__'\n- - '-' escapes to '__dash__'\n- '/' escapes to - '__slash__'\n- Property names that exactly match - a CEL RESERVED keyword escape to '__{keyword}__'. - The keywords are:\n\t \"true\", \"false\", - \"null\", \"in\", \"as\", \"break\", \"const\", - \"continue\", \"else\", \"for\", \"function\", - \"if\",\n\t \"import\", \"let\", \"loop\", - \"package\", \"namespace\", \"return\".\nExamples:\n - \ - Expression accessing a property named \"namespace\": - {\"Expression\": \"object.__namespace__ > 0\"}\n - \ - Expression accessing a property named \"x-prop\": - {\"Expression\": \"object.x__dash__prop > 0\"}\n - \ - Expression accessing a property named \"redact__d\": - {\"Expression\": \"object.redact__underscores__d - > 0\"}\n\nEquality on arrays with list type - of 'set' or 'map' ignores element order, i.e. - [1, 2] == [2, 1].\nConcatenation on arrays with - x-kubernetes-list-type use the semantics of - the list type:\n - 'set': `X + Y` performs - a union where the array positions of all elements - in `X` are preserved and\n non-intersecting - elements in `Y` are appended, retaining their - partial order.\n - 'map': `X + Y` performs - a merge where the array positions of all keys - in `X` are preserved but the values\n are - overwritten by values in `Y` when the key sets - of `X` and `Y` intersect. Elements in `Y` with\n - \ non-intersecting keys are appended, retaining - their partial order.\nRequired." - type: string - message: - description: |- - Message represents the message displayed when validation fails. The message is required if the Expression contains - line breaks. The message must not contain line breaks. - If unset, the message is "failed rule: {Rule}". - e.g. "must be a URL with the host matching spec.host" - If the Expression contains line breaks. Message is required. - The message must not contain line breaks. - If unset, the message is "failed Expression: {Expression}". - type: string - messageExpression: - description: |- - messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. - Since messageExpression is used as a failure message, it must evaluate to a string. - If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. - If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced - as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string - that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and - the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. - messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. - Example: - "object.x must be less than max ("+string(params.max)+")" - type: string - reason: - description: |- - Reason represents a machine-readable description of why this validation failed. - If this is the first validation in the list to fail, this reason, as well as the - corresponding HTTP response code, are used in the - HTTP response to the client. - The currently supported reasons are: "Unauthorized", "Forbidden", "Invalid", "RequestEntityTooLarge". - If not set, StatusReasonInvalid is used in the response to the client. - type: string - required: - - expression - type: object - type: array - paramKind: - description: ParamKind is a tuple of Group Kind and - Version. - properties: - apiVersion: - description: |- - APIVersion is the API group version the resources belong to. - In format of "group/version". - Required. - type: string - kind: - description: |- - Kind is the API kind the resources belong to. - Required. - type: string - type: object - x-kubernetes-map-type: atomic - paramRef: - description: ParamRef references a parameter resource. - properties: - name: - description: |- - `name` is the name of the resource being referenced. - - `name` and `selector` are mutually exclusive properties. If one is set, - the other must be unset. - type: string - namespace: - description: |- - namespace is the namespace of the referenced resource. Allows limiting - the search for params to a specific namespace. Applies to both `name` and - `selector` fields. - - A per-namespace parameter may be used by specifying a namespace-scoped - `paramKind` in the policy and leaving this field empty. - - - If `paramKind` is cluster-scoped, this field MUST be unset. Setting this - field results in a configuration error. - - - If `paramKind` is namespace-scoped, the namespace of the object being - evaluated for admission will be used when this field is left unset. Take - care that if this is left empty the binding must not match any cluster-scoped - resources, which will result in an error. - type: string - parameterNotFoundAction: - description: |- - `parameterNotFoundAction` controls the behavior of the binding when the resource - exists, and name or selector is valid, but there are no parameters - matched by the binding. If the value is set to `Allow`, then no - matched parameters will be treated as successful validation by the binding. - If set to `Deny`, then no matched parameters will be subject to the - `failurePolicy` of the policy. - - Allowed values are `Allow` or `Deny` - Default to `Deny` - type: string - selector: - description: |- - selector can be used to match multiple param objects based on their labels. - Supply selector: {} to match all resources of the ParamKind. - - If multiple params are found, they are all evaluated with the policy expressions - and the results are ANDed together. - - One of `name` or `selector` must be set, but `name` and `selector` are - mutually exclusive properties. If one is set, the other must be unset. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: object - x-kubernetes-map-type: atomic - variables: - description: |- - Variables contain definitions of variables that can be used in composition of other expressions. - Each variable is defined as a named CEL expression. - The variables defined here will be available under `variables` in other expressions of the policy. - items: - description: Variable is the definition of a variable - that is used for composition. - properties: - expression: - description: |- - Expression is the expression that will be evaluated as the value of the variable. - The CEL expression has access to the same identifiers as the CEL expressions in Validation. - type: string - name: - description: |- - Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. - The variable can be accessed in other expressions through `variables` - For example, if name is "foo", the variable will be available as `variables.foo` - type: string - required: - - expression - - name - type: object - type: array - type: object - deny: - description: Deny defines conditions used to pass or fail - a validation rule. - properties: - conditions: - description: |- - Multiple conditions can be declared under an `any` or `all` statement. A direct list - of conditions (without `any` or `all` statements) is also supported for backwards compatibility - but will be deprecated in the next major release. - See: https://kyverno.io/docs/writing-policies/validate/#deny-rules - x-kubernetes-preserve-unknown-fields: true - type: object - foreach: - description: ForEach applies validate rules to a list of - sub-elements by creating a context for each entry in the - list and looping over it to apply the specified logic. - items: - description: ForEachValidation applies validate rules - to a list of sub-elements by creating a context for - each entry in the list and looping over it to apply - the specified logic. - properties: - anyPattern: - description: |- - AnyPattern specifies list of validation patterns. At least one of the patterns - must be satisfied for the validation rule to succeed. - x-kubernetes-preserve-unknown-fields: true - context: - description: Context defines variables and data sources - that can be used during rule execution. - items: - description: |- - ContextEntry adds variables and data sources to a rule Context. Either a - ConfigMap reference or a APILookup must be provided. - properties: - apiCall: - description: |- - APICall is an HTTP request to the Kubernetes API server, or other JSON web service. - The data returned is stored in the context with the name for the context entry. - properties: - data: - description: Data specifies the POST data - sent to the server. - items: - description: RequestData contains the - HTTP POST data - properties: - key: - description: Key is a unique identifier - for the data value - type: string - value: - description: Value is the data value - x-kubernetes-preserve-unknown-fields: true - required: - - key - - value - type: object - type: array - jmesPath: - description: |- - JMESPath is an optional JSON Match Expression that can be used to - transform the JSON response returned from the server. For example - a JMESPath of "items | length(@)" applied to the API server response - for the URLPath "/apis/apps/v1/deployments" will return the total count - of deployments across all namespaces. - type: string - method: - default: GET - description: Method is the HTTP request - type (GET or POST). - enum: - - GET - - POST - type: string - service: - description: Service is an API call to a - JSON web service - properties: - caBundle: - description: |- - CABundle is a PEM encoded CA bundle which will be used to validate - the server certificate. - type: string - url: - description: |- - URL is the JSON web service URL. A typical form is - `https://{service}.{namespace}:{port}/{path}`. - type: string - required: - - url - type: object - urlPath: - description: |- - URLPath is the URL path to be used in the HTTP GET or POST request to the - Kubernetes API server (e.g. "/api/v1/namespaces" or "/apis/apps/v1/deployments"). - The format required is the same format used by the `kubectl get --raw` command. - See https://kyverno.io/docs/writing-policies/external-data-sources/#variables-from-kubernetes-api-server-calls - for details. - type: string - type: object - configMap: - description: ConfigMap is the ConfigMap reference. - properties: - name: - description: Name is the ConfigMap name. - type: string - namespace: - description: Namespace is the ConfigMap - namespace. - type: string - required: - - name - type: object - imageRegistry: - description: |- - ImageRegistry defines requests to an OCI/Docker V2 registry to fetch image - details. - properties: - imageRegistryCredentials: - description: ImageRegistryCredentials provides - credentials that will be used for authentication - with registry - properties: - allowInsecureRegistry: - description: AllowInsecureRegistry allows - insecure access to a registry. - type: boolean - providers: - description: |- - Providers specifies a list of OCI Registry names, whose authentication providers are provided. - It can be of one of these values: default,google,azure,amazon,github. - items: - description: ImageRegistryCredentialsProvidersType - provides the list of credential - providers required. - enum: - - default - - amazon - - azure - - google - - github - type: string - type: array - secrets: - description: |- - Secrets specifies a list of secrets that are provided for credentials. - Secrets must live in the Kyverno namespace. - items: - type: string - type: array - type: object - jmesPath: - description: |- - JMESPath is an optional JSON Match Expression that can be used to - transform the ImageData struct returned as a result of processing - the image reference. - type: string - reference: - description: |- - Reference is image reference to a container image in the registry. - Example: ghcr.io/kyverno/kyverno:latest - type: string - required: - - reference - type: object - name: - description: Name is the variable name. - type: string - variable: - description: Variable defines an arbitrary JMESPath - context variable that can be defined inline. - properties: - default: - description: |- - Default is an optional arbitrary JSON object that the variable may take if the JMESPath - expression evaluates to nil - x-kubernetes-preserve-unknown-fields: true - jmesPath: - description: |- - JMESPath is an optional JMESPath Expression that can be used to - transform the variable. - type: string - value: - description: Value is any arbitrary JSON - object representable in YAML or JSON form. - x-kubernetes-preserve-unknown-fields: true - type: object - type: object - type: array - deny: - description: Deny defines conditions used to pass - or fail a validation rule. - properties: - conditions: - description: |- - Multiple conditions can be declared under an `any` or `all` statement. A direct list - of conditions (without `any` or `all` statements) is also supported for backwards compatibility - but will be deprecated in the next major release. - See: https://kyverno.io/docs/writing-policies/validate/#deny-rules - x-kubernetes-preserve-unknown-fields: true - type: object - elementScope: - description: |- - ElementScope specifies whether to use the current list element as the scope for validation. Defaults to "true" if not specified. - When set to "false", "request.object" is used as the validation scope within the foreach - block to allow referencing other elements in the subtree. - type: boolean - foreach: - description: Foreach declares a nested foreach iterator - x-kubernetes-preserve-unknown-fields: true - list: - description: |- - List specifies a JMESPath expression that results in one or more elements - to which the validation logic is applied. - type: string - pattern: - description: Pattern specifies an overlay-style pattern - used to check resources. - x-kubernetes-preserve-unknown-fields: true - preconditions: - description: |- - AnyAllConditions are used to determine if a policy rule should be applied by evaluating a - set of conditions. The declaration can contain nested `any` or `all` statements. - See: https://kyverno.io/docs/writing-policies/preconditions/ - properties: - all: - description: |- - AllConditions enable variable-based conditional rule execution. This is useful for - finer control of when an rule is applied. A condition can reference object data - using JMESPath notation. - Here, all of the conditions need to pass - items: - description: Condition defines variable-based - conditional criteria for rule execution. - properties: - key: - description: Key is the context entry (using - JMESPath) for conditional rule evaluation. - x-kubernetes-preserve-unknown-fields: true - message: - description: Message is an optional display - message - type: string - operator: - description: |- - Operator is the conditional operation to perform. Valid operators are: - Equals, NotEquals, In, AnyIn, AllIn, NotIn, AnyNotIn, AllNotIn, GreaterThanOrEquals, - GreaterThan, LessThanOrEquals, LessThan, DurationGreaterThanOrEquals, DurationGreaterThan, - DurationLessThanOrEquals, DurationLessThan - enum: - - Equals - - NotEquals - - In - - AnyIn - - AllIn - - NotIn - - AnyNotIn - - AllNotIn - - GreaterThanOrEquals - - GreaterThan - - LessThanOrEquals - - LessThan - - DurationGreaterThanOrEquals - - DurationGreaterThan - - DurationLessThanOrEquals - - DurationLessThan - type: string - value: - description: |- - Value is the conditional value, or set of values. The values can be fixed set - or can be variables declared using JMESPath. - x-kubernetes-preserve-unknown-fields: true - type: object - type: array - any: - description: |- - AnyConditions enable variable-based conditional rule execution. This is useful for - finer control of when an rule is applied. A condition can reference object data - using JMESPath notation. - Here, at least one of the conditions need to pass - items: - description: Condition defines variable-based - conditional criteria for rule execution. - properties: - key: - description: Key is the context entry (using - JMESPath) for conditional rule evaluation. - x-kubernetes-preserve-unknown-fields: true - message: - description: Message is an optional display - message - type: string - operator: - description: |- - Operator is the conditional operation to perform. Valid operators are: - Equals, NotEquals, In, AnyIn, AllIn, NotIn, AnyNotIn, AllNotIn, GreaterThanOrEquals, - GreaterThan, LessThanOrEquals, LessThan, DurationGreaterThanOrEquals, DurationGreaterThan, - DurationLessThanOrEquals, DurationLessThan - enum: - - Equals - - NotEquals - - In - - AnyIn - - AllIn - - NotIn - - AnyNotIn - - AllNotIn - - GreaterThanOrEquals - - GreaterThan - - LessThanOrEquals - - LessThan - - DurationGreaterThanOrEquals - - DurationGreaterThan - - DurationLessThanOrEquals - - DurationLessThan - type: string - value: - description: |- - Value is the conditional value, or set of values. The values can be fixed set - or can be variables declared using JMESPath. - x-kubernetes-preserve-unknown-fields: true - type: object - type: array - type: object - x-kubernetes-preserve-unknown-fields: true - type: object - type: array - manifests: - description: Manifest specifies conditions for manifest - verification - properties: - annotationDomain: - description: AnnotationDomain is custom domain of annotation - for message and signature. Default is "cosign.sigstore.dev". - type: string - attestors: - description: Attestors specified the required attestors - (i.e. authorities) - items: - properties: - count: - description: |- - Count specifies the required number of entries that must match. If the count is null, all entries must match - (a logical AND). If the count is 1, at least one entry must match (a logical OR). If the count contains a - value N, then N must be less than or equal to the size of entries, and at least N entries must match. - minimum: 1 - type: integer - entries: - description: |- - Entries contains the available attestors. An attestor can be a static key, - attributes for keyless verification, or a nested attestor declaration. - items: - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations are used for image verification. - Every specified key-value pair must exist and match in the verified payload. - The payload may contain other key-value pairs. - type: object - attestor: - description: Attestor is a nested set of - Attestor used to specify a more complex - set of match authorities. - x-kubernetes-preserve-unknown-fields: true - certificates: - description: Certificates specifies one - or more certificates. - properties: - cert: - description: Cert is an optional PEM-encoded - public certificate. - type: string - certChain: - description: CertChain is an optional - PEM encoded set of certificates used - to verify. - type: string - ctlog: - description: |- - CTLog (certificate timestamp log) provides a configuration for validation of Signed Certificate - Timestamps (SCTs). If the value is unset, the default behavior by Cosign is used. - properties: - ignoreSCT: - description: |- - IgnoreSCT defines whether to use the Signed Certificate Timestamp (SCT) log to check for a certificate - timestamp. Default is false. Set to true if this was opted out during signing. - type: boolean - pubkey: - description: PubKey, if set, is - used to validate SCTs against - a custom source. - type: string - type: object - rekor: - description: |- - Rekor provides configuration for the Rekor transparency log service. If an empty object - is provided the public instance of Rekor (https://rekor.sigstore.dev) is used. - properties: - ignoreTlog: - description: IgnoreTlog skips transparency - log verification. - type: boolean - pubkey: - description: |- - RekorPubKey is an optional PEM-encoded public key to use for a custom Rekor. - If set, this will be used to validate transparency log signatures from a custom Rekor. - type: string - url: - description: URL is the address - of the transparency log. Defaults - to the public Rekor log instance - https://rekor.sigstore.dev. - type: string - required: - - url - type: object - type: object - keyless: - description: |- - Keyless is a set of attribute used to verify a Sigstore keyless attestor. - See https://github.com/sigstore/cosign/blob/main/KEYLESS.md. - properties: - additionalExtensions: - additionalProperties: - type: string - description: AdditionalExtensions are - certificate-extensions used for keyless - signing. - type: object - ctlog: - description: |- - CTLog (certificate timestamp log) provides a configuration for validation of Signed Certificate - Timestamps (SCTs). If the value is unset, the default behavior by Cosign is used. - properties: - ignoreSCT: - description: |- - IgnoreSCT defines whether to use the Signed Certificate Timestamp (SCT) log to check for a certificate - timestamp. Default is false. Set to true if this was opted out during signing. - type: boolean - pubkey: - description: PubKey, if set, is - used to validate SCTs against - a custom source. - type: string - type: object - issuer: - description: Issuer is the certificate - issuer used for keyless signing. - type: string - rekor: - description: |- - Rekor provides configuration for the Rekor transparency log service. If an empty object - is provided the public instance of Rekor (https://rekor.sigstore.dev) is used. - properties: - ignoreTlog: - description: IgnoreTlog skips transparency - log verification. - type: boolean - pubkey: - description: |- - RekorPubKey is an optional PEM-encoded public key to use for a custom Rekor. - If set, this will be used to validate transparency log signatures from a custom Rekor. - type: string - url: - description: URL is the address - of the transparency log. Defaults - to the public Rekor log instance - https://rekor.sigstore.dev. - type: string - required: - - url - type: object - roots: - description: |- - Roots is an optional set of PEM encoded trusted root certificates. - If not provided, the system roots are used. - type: string - subject: - description: Subject is the verified - identity used for keyless signing, - for example the email address. - type: string - type: object - keys: - description: Keys specifies one or more - public keys. - properties: - ctlog: - description: |- - CTLog (certificate timestamp log) provides a configuration for validation of Signed Certificate - Timestamps (SCTs). If the value is unset, the default behavior by Cosign is used. - properties: - ignoreSCT: - description: |- - IgnoreSCT defines whether to use the Signed Certificate Timestamp (SCT) log to check for a certificate - timestamp. Default is false. Set to true if this was opted out during signing. - type: boolean - pubkey: - description: PubKey, if set, is - used to validate SCTs against - a custom source. - type: string - type: object - kms: - description: |- - KMS provides the URI to the public key stored in a Key Management System. See: - https://github.com/sigstore/cosign/blob/main/KMS.md - type: string - publicKeys: - description: |- - Keys is a set of X.509 public keys used to verify image signatures. The keys can be directly - specified or can be a variable reference to a key specified in a ConfigMap (see - https://kyverno.io/docs/writing-policies/variables/), or reference a standard Kubernetes Secret - elsewhere in the cluster by specifying it in the format "k8s:///". - The named Secret must specify a key `cosign.pub` containing the public key used for - verification, (see https://github.com/sigstore/cosign/blob/main/KMS.md#kubernetes-secret). - When multiple keys are specified each key is processed as a separate staticKey entry - (.attestors[*].entries.keys) within the set of attestors and the count is applied across the keys. - type: string - rekor: - description: |- - Rekor provides configuration for the Rekor transparency log service. If an empty object - is provided the public instance of Rekor (https://rekor.sigstore.dev) is used. - properties: - ignoreTlog: - description: IgnoreTlog skips transparency - log verification. - type: boolean - pubkey: - description: |- - RekorPubKey is an optional PEM-encoded public key to use for a custom Rekor. - If set, this will be used to validate transparency log signatures from a custom Rekor. - type: string - url: - description: URL is the address - of the transparency log. Defaults - to the public Rekor log instance - https://rekor.sigstore.dev. - type: string - required: - - url - type: object - secret: - description: Reference to a Secret resource - that contains a public key - properties: - name: - description: Name of the secret. - The provided secret must contain - a key named cosign.pub. - type: string - namespace: - description: Namespace name where - the Secret exists. - type: string - required: - - name - - namespace - type: object - signatureAlgorithm: - default: sha256 - description: Specify signature algorithm - for public keys. Supported values - are sha224, sha256, sha384 and sha512. - type: string - type: object - repository: - description: |- - Repository is an optional alternate OCI repository to use for signatures and attestations that match this rule. - If specified Repository will override other OCI image repository locations for this Attestor. - type: string - type: object - type: array - type: object - type: array - dryRun: - description: DryRun configuration - properties: - enable: - type: boolean - namespace: - type: string - type: object - ignoreFields: - description: Fields which will be ignored while comparing - manifests. - items: - properties: - fields: - items: - type: string - type: array - objects: - items: - properties: - group: - type: string - kind: - type: string - name: - type: string - namespace: - type: string - version: - type: string - type: object - type: array - type: object - type: array - repository: - description: |- - Repository is an optional alternate OCI repository to use for resource bundle reference. - The repository can be overridden per Attestor or Attestation. - type: string - type: object - message: - description: Message specifies a custom message to be displayed - on failure. - type: string - pattern: - description: Pattern specifies an overlay-style pattern - used to check resources. - x-kubernetes-preserve-unknown-fields: true - podSecurity: - description: |- - PodSecurity applies exemptions for Kubernetes Pod Security admission - by specifying exclusions for Pod Security Standards controls. - properties: - exclude: - description: Exclude specifies the Pod Security Standard - controls to be excluded. - items: - description: PodSecurityStandard specifies the Pod - Security Standard controls to be excluded. - properties: - controlName: - description: |- - ControlName specifies the name of the Pod Security Standard control. - See: https://kubernetes.io/docs/concepts/security/pod-security-standards/ - enum: - - HostProcess - - Host Namespaces - - Privileged Containers - - Capabilities - - HostPath Volumes - - Host Ports - - AppArmor - - SELinux - - /proc Mount Type - - Seccomp - - Sysctls - - Volume Types - - Privilege Escalation - - Running as Non-root - - Running as Non-root user - type: string - images: - description: |- - Images selects matching containers and applies the container level PSS. - Each image is the image name consisting of the registry address, repository, image, and tag. - Empty list matches no containers, PSS checks are applied at the pod level only. - Wildcards ('*' and '?') are allowed. See: https://kubernetes.io/docs/concepts/containers/images. - items: - type: string - type: array - required: - - controlName - type: object - type: array - level: - description: |- - Level defines the Pod Security Standard level to be applied to workloads. - Allowed values are privileged, baseline, and restricted. - enum: - - privileged - - baseline - - restricted - type: string - version: - description: |- - Version defines the Pod Security Standard versions that Kubernetes supports. - Allowed values are v1.19, v1.20, v1.21, v1.22, v1.23, v1.24, v1.25, v1.26, latest. Defaults to latest. - enum: - - v1.19 - - v1.20 - - v1.21 - - v1.22 - - v1.23 - - v1.24 - - v1.25 - - v1.26 - - latest - type: string - type: object - type: object - verifyImages: - description: VerifyImages is used to verify image signatures - and mutate them to add a digest - items: - description: |- - ImageVerification validates that images that match the specified pattern - are signed with the supplied public key. Once the image is verified it is - mutated to include the SHA digest retrieved during the registration. - properties: - additionalExtensions: - additionalProperties: - type: string - description: Deprecated. - type: object - annotations: - additionalProperties: - type: string - description: Deprecated. Use annotations per Attestor - instead. - type: object - attestations: - description: |- - Attestations are optional checks for signed in-toto Statements used to verify the image. - See https://github.com/in-toto/attestation. Kyverno fetches signed attestations from the - OCI registry and decodes them into a list of Statement declarations. - items: - description: |- - Attestation are checks for signed in-toto Statements that are used to verify the image. - See https://github.com/in-toto/attestation. Kyverno fetches signed attestations from the - OCI registry and decodes them into a list of Statements. - properties: - attestors: - description: Attestors specify the required attestors - (i.e. authorities). - items: - properties: - count: - description: |- - Count specifies the required number of entries that must match. If the count is null, all entries must match - (a logical AND). If the count is 1, at least one entry must match (a logical OR). If the count contains a - value N, then N must be less than or equal to the size of entries, and at least N entries must match. - minimum: 1 - type: integer - entries: - description: |- - Entries contains the available attestors. An attestor can be a static key, - attributes for keyless verification, or a nested attestor declaration. - items: - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations are used for image verification. - Every specified key-value pair must exist and match in the verified payload. - The payload may contain other key-value pairs. - type: object - attestor: - description: Attestor is a nested set - of Attestor used to specify a more - complex set of match authorities. - x-kubernetes-preserve-unknown-fields: true - certificates: - description: Certificates specifies - one or more certificates. - properties: - cert: - description: Cert is an optional - PEM-encoded public certificate. - type: string - certChain: - description: CertChain is an optional - PEM encoded set of certificates - used to verify. - type: string - ctlog: - description: |- - CTLog (certificate timestamp log) provides a configuration for validation of Signed Certificate - Timestamps (SCTs). If the value is unset, the default behavior by Cosign is used. - properties: - ignoreSCT: - description: |- - IgnoreSCT defines whether to use the Signed Certificate Timestamp (SCT) log to check for a certificate - timestamp. Default is false. Set to true if this was opted out during signing. - type: boolean - pubkey: - description: PubKey, if set, - is used to validate SCTs against - a custom source. - type: string - type: object - rekor: - description: |- - Rekor provides configuration for the Rekor transparency log service. If an empty object - is provided the public instance of Rekor (https://rekor.sigstore.dev) is used. - properties: - ignoreTlog: - description: IgnoreTlog skips - transparency log verification. - type: boolean - pubkey: - description: |- - RekorPubKey is an optional PEM-encoded public key to use for a custom Rekor. - If set, this will be used to validate transparency log signatures from a custom Rekor. - type: string - url: - description: URL is the address - of the transparency log. Defaults - to the public Rekor log instance - https://rekor.sigstore.dev. - type: string - required: - - url - type: object - type: object - keyless: - description: |- - Keyless is a set of attribute used to verify a Sigstore keyless attestor. - See https://github.com/sigstore/cosign/blob/main/KEYLESS.md. - properties: - additionalExtensions: - additionalProperties: - type: string - description: AdditionalExtensions - are certificate-extensions used - for keyless signing. - type: object - ctlog: - description: |- - CTLog (certificate timestamp log) provides a configuration for validation of Signed Certificate - Timestamps (SCTs). If the value is unset, the default behavior by Cosign is used. - properties: - ignoreSCT: - description: |- - IgnoreSCT defines whether to use the Signed Certificate Timestamp (SCT) log to check for a certificate - timestamp. Default is false. Set to true if this was opted out during signing. - type: boolean - pubkey: - description: PubKey, if set, - is used to validate SCTs against - a custom source. - type: string - type: object - issuer: - description: Issuer is the certificate - issuer used for keyless signing. - type: string - rekor: - description: |- - Rekor provides configuration for the Rekor transparency log service. If an empty object - is provided the public instance of Rekor (https://rekor.sigstore.dev) is used. - properties: - ignoreTlog: - description: IgnoreTlog skips - transparency log verification. - type: boolean - pubkey: - description: |- - RekorPubKey is an optional PEM-encoded public key to use for a custom Rekor. - If set, this will be used to validate transparency log signatures from a custom Rekor. - type: string - url: - description: URL is the address - of the transparency log. Defaults - to the public Rekor log instance - https://rekor.sigstore.dev. - type: string - required: - - url - type: object - roots: - description: |- - Roots is an optional set of PEM encoded trusted root certificates. - If not provided, the system roots are used. - type: string - subject: - description: Subject is the verified - identity used for keyless signing, - for example the email address. - type: string - type: object - keys: - description: Keys specifies one or more - public keys. - properties: - ctlog: - description: |- - CTLog (certificate timestamp log) provides a configuration for validation of Signed Certificate - Timestamps (SCTs). If the value is unset, the default behavior by Cosign is used. - properties: - ignoreSCT: - description: |- - IgnoreSCT defines whether to use the Signed Certificate Timestamp (SCT) log to check for a certificate - timestamp. Default is false. Set to true if this was opted out during signing. - type: boolean - pubkey: - description: PubKey, if set, - is used to validate SCTs against - a custom source. - type: string - type: object - kms: - description: |- - KMS provides the URI to the public key stored in a Key Management System. See: - https://github.com/sigstore/cosign/blob/main/KMS.md - type: string - publicKeys: - description: |- - Keys is a set of X.509 public keys used to verify image signatures. The keys can be directly - specified or can be a variable reference to a key specified in a ConfigMap (see - https://kyverno.io/docs/writing-policies/variables/), or reference a standard Kubernetes Secret - elsewhere in the cluster by specifying it in the format "k8s:///". - The named Secret must specify a key `cosign.pub` containing the public key used for - verification, (see https://github.com/sigstore/cosign/blob/main/KMS.md#kubernetes-secret). - When multiple keys are specified each key is processed as a separate staticKey entry - (.attestors[*].entries.keys) within the set of attestors and the count is applied across the keys. - type: string - rekor: - description: |- - Rekor provides configuration for the Rekor transparency log service. If an empty object - is provided the public instance of Rekor (https://rekor.sigstore.dev) is used. - properties: - ignoreTlog: - description: IgnoreTlog skips - transparency log verification. - type: boolean - pubkey: - description: |- - RekorPubKey is an optional PEM-encoded public key to use for a custom Rekor. - If set, this will be used to validate transparency log signatures from a custom Rekor. - type: string - url: - description: URL is the address - of the transparency log. Defaults - to the public Rekor log instance - https://rekor.sigstore.dev. - type: string - required: - - url - type: object - secret: - description: Reference to a Secret - resource that contains a public - key - properties: - name: - description: Name of the secret. - The provided secret must contain - a key named cosign.pub. - type: string - namespace: - description: Namespace name - where the Secret exists. - type: string - required: - - name - - namespace - type: object - signatureAlgorithm: - default: sha256 - description: Specify signature algorithm - for public keys. Supported values - are sha224, sha256, sha384 and - sha512. - type: string - type: object - repository: - description: |- - Repository is an optional alternate OCI repository to use for signatures and attestations that match this rule. - If specified Repository will override other OCI image repository locations for this Attestor. - type: string - type: object - type: array - type: object - type: array - conditions: - description: |- - Conditions are used to verify attributes within a Predicate. If no Conditions are specified - the attestation check is satisfied as long there are predicates that match the predicate type. - items: - description: |- - AnyAllConditions consists of conditions wrapped denoting a logical criteria to be fulfilled. - AnyConditions get fulfilled when at least one of its sub-conditions passes. - AllConditions get fulfilled only when all of its sub-conditions pass. - properties: - all: - description: |- - AllConditions enable variable-based conditional rule execution. This is useful for - finer control of when an rule is applied. A condition can reference object data - using JMESPath notation. - Here, all of the conditions need to pass - items: - description: Condition defines variable-based - conditional criteria for rule execution. - properties: - key: - description: Key is the context entry - (using JMESPath) for conditional rule - evaluation. - x-kubernetes-preserve-unknown-fields: true - message: - description: Message is an optional - display message - type: string - operator: - description: |- - Operator is the conditional operation to perform. Valid operators are: - Equals, NotEquals, In, AnyIn, AllIn, NotIn, AnyNotIn, AllNotIn, GreaterThanOrEquals, - GreaterThan, LessThanOrEquals, LessThan, DurationGreaterThanOrEquals, DurationGreaterThan, - DurationLessThanOrEquals, DurationLessThan - enum: - - Equals - - NotEquals - - In - - AnyIn - - AllIn - - NotIn - - AnyNotIn - - AllNotIn - - GreaterThanOrEquals - - GreaterThan - - LessThanOrEquals - - LessThan - - DurationGreaterThanOrEquals - - DurationGreaterThan - - DurationLessThanOrEquals - - DurationLessThan - type: string - value: - description: |- - Value is the conditional value, or set of values. The values can be fixed set - or can be variables declared using JMESPath. - x-kubernetes-preserve-unknown-fields: true - type: object - type: array - any: - description: |- - AnyConditions enable variable-based conditional rule execution. This is useful for - finer control of when an rule is applied. A condition can reference object data - using JMESPath notation. - Here, at least one of the conditions need to pass - items: - description: Condition defines variable-based - conditional criteria for rule execution. - properties: - key: - description: Key is the context entry - (using JMESPath) for conditional rule - evaluation. - x-kubernetes-preserve-unknown-fields: true - message: - description: Message is an optional - display message - type: string - operator: - description: |- - Operator is the conditional operation to perform. Valid operators are: - Equals, NotEquals, In, AnyIn, AllIn, NotIn, AnyNotIn, AllNotIn, GreaterThanOrEquals, - GreaterThan, LessThanOrEquals, LessThan, DurationGreaterThanOrEquals, DurationGreaterThan, - DurationLessThanOrEquals, DurationLessThan - enum: - - Equals - - NotEquals - - In - - AnyIn - - AllIn - - NotIn - - AnyNotIn - - AllNotIn - - GreaterThanOrEquals - - GreaterThan - - LessThanOrEquals - - LessThan - - DurationGreaterThanOrEquals - - DurationGreaterThan - - DurationLessThanOrEquals - - DurationLessThan - type: string - value: - description: |- - Value is the conditional value, or set of values. The values can be fixed set - or can be variables declared using JMESPath. - x-kubernetes-preserve-unknown-fields: true - type: object - type: array - type: object - type: array - predicateType: - description: Deprecated in favour of 'Type', to - be removed soon - type: string - type: - description: Type defines the type of attestation - contained within the Statement. - type: string - type: object - type: array - attestors: - description: Attestors specified the required attestors - (i.e. authorities) - items: - properties: - count: - description: |- - Count specifies the required number of entries that must match. If the count is null, all entries must match - (a logical AND). If the count is 1, at least one entry must match (a logical OR). If the count contains a - value N, then N must be less than or equal to the size of entries, and at least N entries must match. - minimum: 1 - type: integer - entries: - description: |- - Entries contains the available attestors. An attestor can be a static key, - attributes for keyless verification, or a nested attestor declaration. - items: - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations are used for image verification. - Every specified key-value pair must exist and match in the verified payload. - The payload may contain other key-value pairs. - type: object - attestor: - description: Attestor is a nested set of Attestor - used to specify a more complex set of match - authorities. - x-kubernetes-preserve-unknown-fields: true - certificates: - description: Certificates specifies one or - more certificates. - properties: - cert: - description: Cert is an optional PEM-encoded - public certificate. - type: string - certChain: - description: CertChain is an optional - PEM encoded set of certificates used - to verify. - type: string - ctlog: - description: |- - CTLog (certificate timestamp log) provides a configuration for validation of Signed Certificate - Timestamps (SCTs). If the value is unset, the default behavior by Cosign is used. - properties: - ignoreSCT: - description: |- - IgnoreSCT defines whether to use the Signed Certificate Timestamp (SCT) log to check for a certificate - timestamp. Default is false. Set to true if this was opted out during signing. - type: boolean - pubkey: - description: PubKey, if set, is used - to validate SCTs against a custom - source. - type: string - type: object - rekor: - description: |- - Rekor provides configuration for the Rekor transparency log service. If an empty object - is provided the public instance of Rekor (https://rekor.sigstore.dev) is used. - properties: - ignoreTlog: - description: IgnoreTlog skips transparency - log verification. - type: boolean - pubkey: - description: |- - RekorPubKey is an optional PEM-encoded public key to use for a custom Rekor. - If set, this will be used to validate transparency log signatures from a custom Rekor. - type: string - url: - description: URL is the address of - the transparency log. Defaults to - the public Rekor log instance https://rekor.sigstore.dev. - type: string - required: - - url - type: object - type: object - keyless: - description: |- - Keyless is a set of attribute used to verify a Sigstore keyless attestor. - See https://github.com/sigstore/cosign/blob/main/KEYLESS.md. - properties: - additionalExtensions: - additionalProperties: - type: string - description: AdditionalExtensions are - certificate-extensions used for keyless - signing. - type: object - ctlog: - description: |- - CTLog (certificate timestamp log) provides a configuration for validation of Signed Certificate - Timestamps (SCTs). If the value is unset, the default behavior by Cosign is used. - properties: - ignoreSCT: - description: |- - IgnoreSCT defines whether to use the Signed Certificate Timestamp (SCT) log to check for a certificate - timestamp. Default is false. Set to true if this was opted out during signing. - type: boolean - pubkey: - description: PubKey, if set, is used - to validate SCTs against a custom - source. - type: string - type: object - issuer: - description: Issuer is the certificate - issuer used for keyless signing. - type: string - rekor: - description: |- - Rekor provides configuration for the Rekor transparency log service. If an empty object - is provided the public instance of Rekor (https://rekor.sigstore.dev) is used. - properties: - ignoreTlog: - description: IgnoreTlog skips transparency - log verification. - type: boolean - pubkey: - description: |- - RekorPubKey is an optional PEM-encoded public key to use for a custom Rekor. - If set, this will be used to validate transparency log signatures from a custom Rekor. - type: string - url: - description: URL is the address of - the transparency log. Defaults to - the public Rekor log instance https://rekor.sigstore.dev. - type: string - required: - - url - type: object - roots: - description: |- - Roots is an optional set of PEM encoded trusted root certificates. - If not provided, the system roots are used. - type: string - subject: - description: Subject is the verified identity - used for keyless signing, for example - the email address. - type: string - type: object - keys: - description: Keys specifies one or more public - keys. - properties: - ctlog: - description: |- - CTLog (certificate timestamp log) provides a configuration for validation of Signed Certificate - Timestamps (SCTs). If the value is unset, the default behavior by Cosign is used. - properties: - ignoreSCT: - description: |- - IgnoreSCT defines whether to use the Signed Certificate Timestamp (SCT) log to check for a certificate - timestamp. Default is false. Set to true if this was opted out during signing. - type: boolean - pubkey: - description: PubKey, if set, is used - to validate SCTs against a custom - source. - type: string - type: object - kms: - description: |- - KMS provides the URI to the public key stored in a Key Management System. See: - https://github.com/sigstore/cosign/blob/main/KMS.md - type: string - publicKeys: - description: |- - Keys is a set of X.509 public keys used to verify image signatures. The keys can be directly - specified or can be a variable reference to a key specified in a ConfigMap (see - https://kyverno.io/docs/writing-policies/variables/), or reference a standard Kubernetes Secret - elsewhere in the cluster by specifying it in the format "k8s:///". - The named Secret must specify a key `cosign.pub` containing the public key used for - verification, (see https://github.com/sigstore/cosign/blob/main/KMS.md#kubernetes-secret). - When multiple keys are specified each key is processed as a separate staticKey entry - (.attestors[*].entries.keys) within the set of attestors and the count is applied across the keys. - type: string - rekor: - description: |- - Rekor provides configuration for the Rekor transparency log service. If an empty object - is provided the public instance of Rekor (https://rekor.sigstore.dev) is used. - properties: - ignoreTlog: - description: IgnoreTlog skips transparency - log verification. - type: boolean - pubkey: - description: |- - RekorPubKey is an optional PEM-encoded public key to use for a custom Rekor. - If set, this will be used to validate transparency log signatures from a custom Rekor. - type: string - url: - description: URL is the address of - the transparency log. Defaults to - the public Rekor log instance https://rekor.sigstore.dev. - type: string - required: - - url - type: object - secret: - description: Reference to a Secret resource - that contains a public key - properties: - name: - description: Name of the secret. The - provided secret must contain a key - named cosign.pub. - type: string - namespace: - description: Namespace name where - the Secret exists. - type: string - required: - - name - - namespace - type: object - signatureAlgorithm: - default: sha256 - description: Specify signature algorithm - for public keys. Supported values are - sha224, sha256, sha384 and sha512. - type: string - type: object - repository: - description: |- - Repository is an optional alternate OCI repository to use for signatures and attestations that match this rule. - If specified Repository will override other OCI image repository locations for this Attestor. - type: string - type: object - type: array - type: object - type: array - image: - description: Deprecated. Use ImageReferences instead. - type: string - imageReferences: - description: |- - ImageReferences is a list of matching image reference patterns. At least one pattern in the - list must match the image for the rule to apply. Each image reference consists of a registry - address (defaults to docker.io), repository, image, and tag (defaults to latest). - Wildcards ('*' and '?') are allowed. See: https://kubernetes.io/docs/concepts/containers/images. - items: - type: string - type: array - imageRegistryCredentials: - description: ImageRegistryCredentials provides credentials - that will be used for authentication with registry. - properties: - allowInsecureRegistry: - description: AllowInsecureRegistry allows insecure - access to a registry. - type: boolean - providers: - description: |- - Providers specifies a list of OCI Registry names, whose authentication providers are provided. - It can be of one of these values: default,google,azure,amazon,github. - items: - description: ImageRegistryCredentialsProvidersType - provides the list of credential providers required. - enum: - - default - - amazon - - azure - - google - - github - type: string - type: array - secrets: - description: |- - Secrets specifies a list of secrets that are provided for credentials. - Secrets must live in the Kyverno namespace. - items: - type: string - type: array - type: object - issuer: - description: Deprecated. Use KeylessAttestor instead. - type: string - key: - description: Deprecated. Use StaticKeyAttestor instead. - type: string - mutateDigest: - default: true - description: |- - MutateDigest enables replacement of image tags with digests. - Defaults to true. - type: boolean - repository: - description: |- - Repository is an optional alternate OCI repository to use for image signatures and attestations that match this rule. - If specified Repository will override the default OCI image repository configured for the installation. - The repository can also be overridden per Attestor or Attestation. - type: string - required: - default: true - description: Required validates that images are verified - i.e. have matched passed a signature or attestation - check. - type: boolean - roots: - description: Deprecated. Use KeylessAttestor instead. - type: string - subject: - description: Deprecated. Use KeylessAttestor instead. - type: string - type: - description: |- - Type specifies the method of signature validation. The allowed options - are Cosign and Notary. By default Cosign is used if a type is not specified. - enum: - - Cosign - - Notary - type: string - useCache: - default: true - description: UseCache enables caching of image verify - responses for this rule. - type: boolean - verifyDigest: - default: true - description: VerifyDigest validates that images have a - digest. - type: boolean - type: object - type: array - required: - - name - type: object - type: array - schemaValidation: - description: |- - SchemaValidation skips validation checks for policies as well as patched resources. - Optional. The default value is set to "true", it must be set to "false" to disable the validation checks. - type: boolean - useServerSideApply: - description: |- - UseServerSideApply controls whether to use server-side apply for generate rules - If is set to "true" create & update for generate rules will use apply instead of create/update. - Defaults to "false" if not specified. - type: boolean - validationFailureAction: - default: Audit - description: |- - ValidationFailureAction defines if a validation policy rule violation should block - the admission review request (enforce), or allow (audit) the admission review request - and report an error in a policy report. Optional. - Allowed values are audit or enforce. The default value is "Audit". - enum: - - audit - - enforce - - Audit - - Enforce - type: string - validationFailureActionOverrides: - description: |- - ValidationFailureActionOverrides is a Cluster Policy attribute that specifies ValidationFailureAction - namespace-wise. It overrides ValidationFailureAction for the specified namespaces. - items: - properties: - action: - description: ValidationFailureAction defines the policy validation - failure action - enum: - - audit - - enforce - - Audit - - Enforce - type: string - namespaceSelector: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - namespaces: - items: - type: string - type: array - type: object - type: array - webhookTimeoutSeconds: - description: |- - WebhookTimeoutSeconds specifies the maximum time in seconds allowed to apply this policy. - After the configured time expires, the admission request may fail, or may simply ignore the policy results, - based on the failure policy. The default timeout is 10s, the value must be between 1 and 30 seconds. - format: int32 - type: integer - required: - - controlPlaneSelector - type: object - x-kubernetes-validations: - - message: policyName is immutable - rule: has(self.policyName) == has(oldSelf.policyName) - status: - description: SharedUpboundPolicyStatus defines the observed state of the - projected polcies. - properties: - failed: - description: list of provisioning failures. - items: - description: SharedUpboundPolicyProvisioningFailure defines policy - provisioning failure. - properties: - conditions: - description: List of conditions. - items: - description: Condition contains details for one aspect of - the current state of this API Resource. - properties: - lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. - format: date-time - type: string - message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. - maxLength: 32768 - type: string - observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - minimum: 0 - type: integer - reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. - This field may not be empty. - maxLength: 1024 - minLength: 1 - pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ - type: string - status: - description: status of the condition, one of True, False, - Unknown. - enum: - - "True" - - "False" - - Unknown - type: string - type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. - maxLength: 316 - pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ - type: string - required: - - lastTransitionTime - - message - - reason - - status - - type - type: object - type: array - controlPlane: - description: ControlPlane name where the failure occurred. - type: string - required: - - controlPlane - type: object - type: array - x-kubernetes-list-map-keys: - - controlPlane - x-kubernetes-list-type: map - observedGeneration: - description: observed resource generation. - format: int64 - type: integer - provisioned: - description: List of successfully provisioned targets. - items: - description: SharedUpboundPolicyProvisioningSuccess defines policy - provisioning success. - properties: - controlPlane: - description: ControlPlane name where the external secret got - successfully projected. - type: string - required: - - controlPlane - type: object - type: array - x-kubernetes-list-map-keys: - - controlPlane - x-kubernetes-list-type: map - type: object - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.9/spaces.upbound.io_backups.yaml b/static/crds/space/v1.9/spaces.upbound.io_backups.yaml deleted file mode 100644 index 9f338e5c9..000000000 --- a/static/crds/space/v1.9/spaces.upbound.io_backups.yaml +++ /dev/null @@ -1,200 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: backups.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - spaces - kind: Backup - listKind: BackupList - plural: backups - singular: backup - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .status.phase - name: Phase - type: string - - jsonPath: .status.retries - name: Retries - type: integer - - jsonPath: .spec.ttl - name: TTL - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: Backup represents a single backup of a ControlPlane. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: BackupSpec defines a backup over a set of ControlPlanes. - properties: - configRef: - description: |- - ConfigRef is a reference to the backup configuration. - ApiGroup is optional and defaults to "spaces.upbound.io". - Kind is required, and the only supported value is "SharedBackupConfig" at - the moment. - Name is required. - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. - type: string - kind: - description: Kind is the type of resource being referenced. - minLength: 1 - type: string - name: - description: Name is the name of resource being referenced. - minLength: 1 - type: string - type: object - x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: backup config ref must be a reference to a SharedBackupConfig - rule: (!has(self.apiGroup) || self.apiGroup == 'spaces.upbound.io') - && self.kind == 'SharedBackupConfig' - - message: backup config ref must have a name - rule: size(self.name) > 0 - controlPlane: - description: |- - ControlPlane is the name of the ControlPlane to backup. - Requires "backup" permission on the referenced ControlPlane. - minLength: 1 - type: string - deletionPolicy: - default: Orphan - description: DeletionPolicy is the policy for the backup. - enum: - - Orphan - - Delete - type: string - excludedResources: - description: |- - ExcludedResources is a slice of resource names that are not - included in the backup. Used to filter the included extra resources. - items: - type: string - type: array - x-kubernetes-list-type: set - ttl: - description: |- - TTL is the time to live for the backup. After this time, the backup - will be eligible for garbage collection. If not set, the backup will - not be garbage collected. - type: string - required: - - configRef - - controlPlane - type: object - x-kubernetes-validations: - - message: backup target controlplane can not be changed after creation - rule: self.controlPlane == oldSelf.controlPlane - - message: backup excluded resources can not be changed after creation - rule: (!has(self.excludedResources) && !has(oldSelf.excludedResources)) - || self.excludedResources == oldSelf.excludedResources - - message: backup config ref can not be changed after creation - rule: self.configRef == oldSelf.configRef - status: - description: BackupStatus represents the observed state of a Backup. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - phase: - default: Pending - description: Phase is the current phase of the backup. - enum: - - Pending - - InProgress - - Failed - - Completed - - Deleted - type: string - retries: - description: Retries is the number of times the backup has been retried. - format: int32 - type: integer - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.9/spaces.upbound.io_backupschedules.yaml b/static/crds/space/v1.9/spaces.upbound.io_backupschedules.yaml deleted file mode 100644 index e3dd879ee..000000000 --- a/static/crds/space/v1.9/spaces.upbound.io_backupschedules.yaml +++ /dev/null @@ -1,213 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: backupschedules.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - spaces - kind: BackupSchedule - listKind: BackupScheduleList - plural: backupschedules - singular: backupschedule - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .spec.schedule - name: Schedule - type: string - - jsonPath: .status.lastBackup - name: LastBackup - type: date - - jsonPath: .spec.ttl - name: TTL - type: string - - jsonPath: .spec.controlPlane - name: ControlPlane - type: string - - jsonPath: .spec.suspend - name: Suspended - type: boolean - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: BackupSchedule represents a single ControlPlane schedule for - Backups. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: BackupScheduleSpec defines a backup schedule over a set of - ControlPlanes. - properties: - configRef: - description: |- - ConfigRef is a reference to the backup configuration. - ApiGroup is optional and defaults to "spaces.upbound.io". - Kind is required, and the only supported value is "SharedBackupConfig" at - the moment. - Name is required. - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. - type: string - kind: - description: Kind is the type of resource being referenced. - minLength: 1 - type: string - name: - description: Name is the name of resource being referenced. - minLength: 1 - type: string - type: object - x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: backup config ref must be a reference to a SharedBackupConfig - rule: (!has(self.apiGroup) || self.apiGroup == 'spaces.upbound.io') - && self.kind == 'SharedBackupConfig' - - message: backup config ref must have a name - rule: size(self.name) > 0 - controlPlane: - description: |- - ControlPlane is the name of the ControlPlane to which the schedule - applies. - Requires "get" permission on the referenced ControlPlane. - type: string - x-kubernetes-validations: - - message: target can not be changed after creation - rule: self == oldSelf - deletionPolicy: - default: Orphan - description: DeletionPolicy is the policy for the backup. - enum: - - Orphan - - Delete - type: string - excludedResources: - description: |- - ExcludedResources is a slice of resource names that are not - included in the backup. Used to filter the included extra resources. - items: - type: string - type: array - x-kubernetes-list-type: set - schedule: - description: Schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - minLength: 1 - type: string - suspend: - description: |- - Suspend specifies whether the schedule is suspended. If true, no - Backups will be created, but running backups will be allowed to - complete. - type: boolean - ttl: - description: |- - TTL is the time to live for the backup. After this time, the backup - will be eligible for garbage collection. If not set, the backup will - not be garbage collected. - type: string - useOwnerReferencesInBackup: - description: |- - UseOwnerReferencesBackup specifies whether an ownership chain should be - established between this resource and the Backup it creates. - If set to true, the Backup will be garbage collected when this resource - is deleted. - type: boolean - required: - - configRef - - controlPlane - - schedule - type: object - status: - description: BackupScheduleStatus represents the observed state of a BackupSchedule. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - lastBackup: - description: |- - LastBackup is the last time a Backup was run for this - Schedule schedule - format: date-time - type: string - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.9/spaces.upbound.io_controlplanes.yaml b/static/crds/space/v1.9/spaces.upbound.io_controlplanes.yaml deleted file mode 100644 index 10f3b5524..000000000 --- a/static/crds/space/v1.9/spaces.upbound.io_controlplanes.yaml +++ /dev/null @@ -1,276 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: controlplanes.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - spaces - kind: ControlPlane - listKind: ControlPlaneList - plural: controlplanes - shortNames: - - ctp - - ctps - singular: controlplane - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .spec.crossplane.version - name: Crossplane - type: string - - jsonPath: .status.conditions[?(@.type=='Ready')].status - name: Ready - type: string - - jsonPath: .status.message - name: Message - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1beta1 - schema: - openAPIV3Schema: - description: ControlPlane defines a managed Crossplane instance. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: A ControlPlaneSpec represents the desired state of the ControlPlane. - properties: - class: - default: default - description: |- - [[GATE:EnableControlPlaneClasses]] - Class specifies the class of the control plane. This affects the - control plane sizing, including component replicas and resource - requirements. There are multiple predefined classes, with "default" - being the standard Spaces control plane without any additional class - configuration. Check the Upbound Cloud documentation for a list of all - available classes. Defaults to "default". - type: string - x-kubernetes-validations: - - message: class is immutable - rule: self == oldSelf - crossplane: - description: Crossplane defines the configuration for Crossplane. - properties: - autoUpgrade: - default: - channel: Stable - description: AutoUpgrades defines the auto upgrade configuration - for Crossplane. - properties: - channel: - default: Stable - description: |- - Channel defines the upgrade channels for Crossplane. We support the following channels where 'Stable' is the - default: - - None: disables auto-upgrades and keeps the control plane at its current version of Crossplane. - - Patch: automatically upgrades the control plane to the latest supported patch version when it - becomes available while keeping the minor version the same. - - Stable: automatically upgrades the control plane to the latest supported patch release on minor - version N-1, where N is the latest supported minor version. - - Rapid: automatically upgrades the cluster to the latest supported patch release on the latest - supported minor version. - enum: - - None - - Patch - - Stable - - Rapid - type: string - type: object - state: - default: Running - description: |- - State defines the state for crossplane and provider workloads. We support - the following states where 'Running' is the default: - - Running: Starts/Scales up all crossplane and provider workloads in the ControlPlane - - Paused: Pauses/Scales down all crossplane and provider workloads in the ControlPlane - enum: - - Running - - Paused - type: string - version: - description: Version is the version of Universal Crossplane to - install. - type: string - type: object - restore: - description: |- - [[GATE:EnableSharedBackup]] THIS IS AN ALPHA FIELD. Do not use it in production. - Restore specifies details about the control planes restore configuration. - properties: - finishedAt: - description: |- - FinishedAt is the time at which the control plane was restored, it's not - meant to be set by the user, but rather by the system when the control - plane is restored. - format: date-time - type: string - source: - description: |- - Source of the Backup or BackupSchedule to restore from. - Require "restore" permission on the referenced Backup or BackupSchedule. - ApiGroup is optional and defaults to "spaces.upbound.io". - Kind is required, and the only supported kinds are Backup and - BackupSchedule at the moment. - Name is required. - properties: - apiGroup: - description: APIGroup is the group for the resource being - referenced. - type: string - kind: - description: Kind is the type of resource being referenced. - minLength: 1 - type: string - name: - description: Name is the name of resource being referenced. - minLength: 1 - type: string - type: object - x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: source must be a reference to a Backup or BackupSchedule - (v1alpha1) - rule: (!has(self.apiGroup) || self.apiGroup == 'spaces.upbound.io') - && (self.kind == 'Backup' || self.kind == 'BackupSchedule') - - message: source is immutable - rule: oldSelf == self - required: - - source - type: object - x-kubernetes-validations: - - message: finishedAt is immutable once set - rule: '!has(oldSelf.finishedAt) || oldSelf.finishedAt == self.finishedAt' - writeConnectionSecretToRef: - description: |- - WriteConnectionSecretToReference specifies the namespace and name of a - Secret to which any connection details for this managed resource should - be written. Connection details frequently include the endpoint, username, - and password required to connect to the managed resource. - This field is planned to be replaced in a future release in favor of - PublishConnectionDetailsTo. Currently, both could be set independently - and connection details would be published to both without affecting - each other. - - If omitted, it is defaulted to the namespace of the ControlPlane. - Deprecated: Use Hub or Upbound identities instead. - properties: - name: - description: Name of the secret. - type: string - namespace: - description: |- - Namespace of the secret. If omitted, it is equal to - the namespace of the resource containing this reference as a field. - type: string - required: - - name - type: object - type: object - x-kubernetes-validations: - - message: '[[GATE:EnableSharedBackup]] restore source can not be unset' - rule: '!has(oldSelf.restore) || has(self.restore)' - - message: '[[GATE:EnableSharedBackup]] restore source can not be set - after creation' - rule: has(oldSelf.restore) || !has(self.restore) - - message: '"version" cannot be empty when upgrade channel is "None"' - rule: '!has(self.crossplane.autoUpgrade) || self.crossplane.autoUpgrade.channel - != "None" || self.crossplane.version != ""' - status: - description: A ControlPlaneStatus represents the observed state of a ControlPlane. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - controlPlaneID: - type: string - firstAvailableAt: - description: FirstAvailableAt is the time at which the control plane - was available for the first time. - format: date-time - type: string - message: - description: |- - Message is a human-readable message indicating details about why the - ControlPlane is in this condition. - type: string - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.9/spaces.upbound.io_incontrolplaneoverrides.yaml b/static/crds/space/v1.9/spaces.upbound.io_incontrolplaneoverrides.yaml deleted file mode 100644 index 295fc05d0..000000000 --- a/static/crds/space/v1.9/spaces.upbound.io_incontrolplaneoverrides.yaml +++ /dev/null @@ -1,256 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: incontrolplaneoverrides.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - spaces - kind: InControlPlaneOverride - listKind: InControlPlaneOverrideList - plural: incontrolplaneoverrides - singular: incontrolplaneoverride - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .status.conditions[?(@.type=='Synced')].status - name: SYNCED - type: string - - jsonPath: .status.conditions[?(@.type=='Ready')].status - name: READY - type: string - name: v1alpha1 - schema: - openAPIV3Schema: - description: |- - InControlPlaneOverride represents resource configuration overrides in - a ControlPlane. The specified override can be applied on single objects - as well as claim/XR object hierarchies. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: |- - InControlPlaneOverrideSpec defines a configuration override - on a target object hierarchy in a target ControlPlane with the - given name. - properties: - controlPlaneName: - description: |- - ControlPlaneName is the name of the target ControlPlane where - the resource configuration overrides will be applied. - minLength: 1 - type: string - x-kubernetes-validations: - - message: controlPlaneName is immutable - rule: self == oldSelf - deletionPolicy: - default: RollBack - description: |- - DeletionPolicy specifies whether when the InControlPlaneOverride object - is deleted, the configuration override should be kept (Keep) or - rolled back (RollBack). - enum: - - RollBack - - Keep - type: string - override: - description: |- - Override denotes the configuration override to be applied on the target - object hierarchy. The fully specified intent is obtained by serializing - the Override. - properties: - metadata: - description: Metadata specifies the patch metadata. - properties: - annotations: - additionalProperties: - type: string - description: |- - Annotations represents the Kube object annotations. - Only the following annotations are allowed to be patched: - - crossplane.io/paused - - spaces.upbound.io/force-reconcile-at - type: object - x-kubernetes-validations: - - message: Only the crossplane.io/paused and spaces.upbound.io/force-reconcile-at - annotations are allowed - rule: self.all(k, k == 'crossplane.io/paused' || k == 'spaces.upbound.io/force-reconcile-at') - type: object - type: object - propagationPolicy: - default: None - description: |- - PropagationPolicy specifies whether the configuration override will be - applied only to the object referenced in TargetRef (None), after an - ascending or descending hierarchy traversal will be done starting with - the target object. - enum: - - None - - Ascending - - Descending - type: string - targetRef: - description: |- - TargetRef is the object reference to a Kubernetes API object where the - configuration override will start. The controller will traverse the - target object's hierarchy depending on the PropagationPolicy. If - PropagationPolicy is None, then only the target object will be updated. - properties: - apiVersion: - description: APIVersion of the referenced object. - minLength: 1 - type: string - kind: - description: Kind of the referenced object. - minLength: 1 - type: string - name: - description: Name of the referenced object. - minLength: 1 - type: string - namespace: - description: Namespace of the referenced object. - type: string - required: - - apiVersion - - kind - - name - type: object - required: - - controlPlaneName - - override - - targetRef - type: object - status: - description: |- - InControlPlaneOverrideStatus defines the status of an InControlPlaneOverride - object. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - objectRefs: - items: - description: |- - PatchedObjectStatus represents the state of an applied patch to an object - in the target hierarchy. - properties: - apiVersion: - description: APIVersion of the referenced object. - minLength: 1 - type: string - kind: - description: Kind of the referenced object. - minLength: 1 - type: string - message: - description: Message holds an optional detail message detailing - the observed state. - type: string - name: - description: Name of the referenced object. - minLength: 1 - type: string - namespace: - description: Namespace of the referenced object. - type: string - reason: - description: Reason is the reason for the target objects override - Status. - type: string - status: - description: Status of the configuration override. - enum: - - Success - - Skipped - - Error - type: string - uid: - description: Metadata UID of the patch target object. - type: string - required: - - apiVersion - - kind - - name - - reason - - status - type: object - type: array - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.9/spaces.upbound.io_sharedbackupconfigs.yaml b/static/crds/space/v1.9/spaces.upbound.io_sharedbackupconfigs.yaml deleted file mode 100644 index d716be334..000000000 --- a/static/crds/space/v1.9/spaces.upbound.io_sharedbackupconfigs.yaml +++ /dev/null @@ -1,143 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: sharedbackupconfigs.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - spaces - kind: SharedBackupConfig - listKind: SharedBackupConfigList - plural: sharedbackupconfigs - singular: sharedbackupconfig - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .spec.objectStorage.provider - name: Provider - type: string - - jsonPath: .spec.objectStorage.bucket - name: Bucket - type: string - - jsonPath: .spec.objectStorage.credentials.source - name: Auth - type: string - - jsonPath: .spec.objectStorage.credentials.secretRef.name - name: Secret - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: SharedBackupConfig defines the configuration to backup and restore - ControlPlanes. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: |- - A SharedBackupConfigSpec represents the configuration to backup or restore - ControlPlanes. - properties: - objectStorage: - description: ObjectStorage specifies the object storage configuration - for the given provider. - properties: - bucket: - description: Bucket is the name of the bucket to store backups - in. - minLength: 1 - type: string - config: - description: |- - Config is a free-form map of configuration options for the object storage provider. - See https://github.com/thanos-io/objstore?tab=readme-ov-file for more - information on the formats for each supported cloud provider. Bucket and - Provider will override the required values in the config. - type: object - x-kubernetes-preserve-unknown-fields: true - credentials: - description: Credentials specifies the credentials to access the - object storage. - properties: - secretRef: - description: |- - A SecretRef is a reference to a secret key that contains the credentials - that must be used to connect to the provider. - properties: - key: - default: credentials - description: The key to select. - type: string - name: - description: Name of the secret. - type: string - required: - - key - - name - type: object - source: - description: |- - Source of the credentials. - Source "Secret" requires "get" permissions on the referenced Secret. - enum: - - Secret - - InjectedIdentity - type: string - required: - - source - type: object - prefix: - description: |- - Prefix is the prefix to use for all backups using this - SharedBackupConfig, e.g. "prod/cluster1", resulting in backups for - controlplane "ctp1" in namespace "ns1" being stored in - "prod/cluster1/ns1/ctp1". - type: string - provider: - description: Provider is the name of the object storage provider. - enum: - - AWS - - Azure - - GCP - type: string - required: - - bucket - - credentials - - provider - type: object - x-kubernetes-validations: - - message: credentials.secretRef.name must be set when source is Secret - rule: self.credentials.source != 'Secret' || (has(self.credentials.secretRef) - && has(self.credentials.secretRef.name)) - required: - - objectStorage - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.9/spaces.upbound.io_sharedbackups.yaml b/static/crds/space/v1.9/spaces.upbound.io_sharedbackups.yaml deleted file mode 100644 index ffa7b41c5..000000000 --- a/static/crds/space/v1.9/spaces.upbound.io_sharedbackups.yaml +++ /dev/null @@ -1,291 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: sharedbackups.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - spaces - kind: SharedBackup - listKind: SharedBackupList - plural: sharedbackups - singular: sharedbackup - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.annotations.sharedbackup\.internal\.spaces\.upbound\.io/completed - name: Completed - type: string - - jsonPath: .metadata.annotations.sharedbackup\.internal\.spaces\.upbound\.io/failed - name: Failed - type: string - - jsonPath: .metadata.annotations.sharedbackup\.internal\.spaces\.upbound\.io/provisioned - name: Provisioned - type: string - - jsonPath: .status.phase - name: Phase - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: SharedBackup defines a backup over a set of ControlPlanes. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: SharedBackupSpec defines a backup over a set of ControlPlanes. - properties: - configRef: - description: |- - ConfigRef is a reference to the backup configuration. - ApiGroup is optional and defaults to "spaces.upbound.io". - Kind is required, and the only supported value is "SharedBackupConfig" at - the moment. - Name is required. - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. - type: string - kind: - description: Kind is the type of resource being referenced. - minLength: 1 - type: string - name: - description: Name is the name of resource being referenced. - minLength: 1 - type: string - type: object - x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: backup config ref must be a reference to a SharedBackupConfig - rule: (!has(self.apiGroup) || self.apiGroup == 'spaces.upbound.io') - && self.kind == 'SharedBackupConfig' - - message: backup config ref must have a name - rule: size(self.name) > 0 - controlPlaneSelector: - description: |- - ControlPlaneSelector defines the selector for ControlPlanes to backup. - Requires "backup" permission on all ControlPlanes in the same namespace. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: either names or a labelSelector must be specified - rule: (has(self.labelSelectors) || has(self.names)) && (size(self.labelSelectors) - > 0 || size(self.names) > 0) - deletionPolicy: - default: Orphan - description: DeletionPolicy is the policy for the backup. - enum: - - Orphan - - Delete - type: string - excludedResources: - description: |- - ExcludedResources is a slice of resource names that are not - included in the backup. Used to filter the included extra resources. - items: - type: string - type: array - x-kubernetes-list-type: set - ttl: - description: |- - TTL is the time to live for the backup. After this time, the backup - will be eligible for garbage collection. If not set, the backup will - not be garbage collected. - type: string - useOwnerReferencesInBackup: - description: |- - UseOwnerReferencesBackup specifies whether an ownership chain should be - established between this resource and the Backup it creates. - If set to true, the Backup will be garbage collected when this resource - is deleted. - type: boolean - required: - - configRef - - controlPlaneSelector - type: object - x-kubernetes-validations: - - message: shared backup ControlPlane selectors can not be changed after - creation - rule: self.controlPlaneSelector == oldSelf.controlPlaneSelector - - message: shared backup excluded resources can not be changed after creation - rule: (!has(self.excludedResources) && !has(oldSelf.excludedResources)) - || self.excludedResources == oldSelf.excludedResources - status: - description: SharedBackupStatus represents the observed state of a SharedBackup. - properties: - completed: - description: Completed is the list of ControlPlanes for which the - backup completed successfully. - items: - type: string - type: array - x-kubernetes-list-type: set - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - failed: - description: Failed is the list of ControlPlanes for which the backup - failed. - items: - type: string - type: array - x-kubernetes-list-type: set - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - phase: - default: Pending - description: Phase represents the current phase of the SharedBackup. - enum: - - Pending - - InProgress - - Failed - - Completed - type: string - selectedControlPlanes: - description: SelectedControlPlanes represents the names of the selected - ControlPlanes. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.9/spaces.upbound.io_sharedbackupschedules.yaml b/static/crds/space/v1.9/spaces.upbound.io_sharedbackupschedules.yaml deleted file mode 100644 index 1c173c0a8..000000000 --- a/static/crds/space/v1.9/spaces.upbound.io_sharedbackupschedules.yaml +++ /dev/null @@ -1,273 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: sharedbackupschedules.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - spaces - kind: SharedBackupSchedule - listKind: SharedBackupScheduleList - plural: sharedbackupschedules - singular: sharedbackupschedule - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .spec.schedule - name: Schedule - type: string - - jsonPath: .spec.suspend - name: Suspended - type: boolean - - jsonPath: .metadata.annotations.sharedbackupschedule\.internal\.spaces\.upbound\.io/provisioned-total - name: Provisioned - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: |- - SharedBackupSchedule defines a schedule for SharedBackup on a set of - ControlPlanes. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: SharedBackupScheduleSpec defines the desired state of a SharedBackupSchedule. - properties: - configRef: - description: |- - ConfigRef is a reference to the backup configuration. - ApiGroup is optional and defaults to "spaces.upbound.io". - Kind is required, and the only supported value is "SharedBackupConfig" at - the moment. - Name is required. - properties: - apiGroup: - description: APIGroup is the group for the resource being referenced. - type: string - kind: - description: Kind is the type of resource being referenced. - minLength: 1 - type: string - name: - description: Name is the name of resource being referenced. - minLength: 1 - type: string - type: object - x-kubernetes-map-type: atomic - x-kubernetes-validations: - - message: backup config ref must be a reference to a SharedBackupConfig - rule: (!has(self.apiGroup) || self.apiGroup == 'spaces.upbound.io') - && self.kind == 'SharedBackupConfig' - - message: backup config ref must have a name - rule: size(self.name) > 0 - controlPlaneSelector: - description: |- - ControlPlaneSelector defines the selector for ControlPlanes to backup. - Requires "backup" permission on all ControlPlanes in the same namespace. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: either names or a labelSelector must be specified - rule: (has(self.labelSelectors) || has(self.names)) && (size(self.labelSelectors) - > 0 || size(self.names) > 0) - deletionPolicy: - default: Orphan - description: DeletionPolicy is the policy for the backup. - enum: - - Orphan - - Delete - type: string - excludedResources: - description: |- - ExcludedResources is a slice of resource names that are not - included in the backup. Used to filter the included extra resources. - items: - type: string - type: array - x-kubernetes-list-type: set - schedule: - description: Schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - minLength: 1 - type: string - suspend: - description: |- - Suspend specifies whether the schedule is suspended. If true, no - Backups will be created, but running backups will be allowed to - complete. - type: boolean - ttl: - description: |- - TTL is the time to live for the backup. After this time, the backup - will be eligible for garbage collection. If not set, the backup will - not be garbage collected. - type: string - useOwnerReferencesInBackup: - description: |- - UseOwnerReferencesBackup specifies whether an ownership chain should be - established between this resource and the Backup it creates. - If set to true, the Backup will be garbage collected when this resource - is deleted. - type: boolean - required: - - configRef - - controlPlaneSelector - - schedule - type: object - status: - description: SharedBackupScheduleStatus represents the observed state - of a SharedBackupSchedule. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - selectedControlPlanes: - description: |- - SelectedControlPlanes is the list of ControlPlanes that are selected - for backup. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.9/spaces.upbound.io_sharedexternalsecrets.yaml b/static/crds/space/v1.9/spaces.upbound.io_sharedexternalsecrets.yaml deleted file mode 100644 index 00c2dd3ab..000000000 --- a/static/crds/space/v1.9/spaces.upbound.io_sharedexternalsecrets.yaml +++ /dev/null @@ -1,745 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: sharedexternalsecrets.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - externalsecrets - kind: SharedExternalSecret - listKind: SharedExternalSecretList - plural: sharedexternalsecrets - shortNames: - - ses - singular: sharedexternalsecret - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.annotations.sharedexternalsecrets\.internal\.spaces\.upbound\.io/provisioned-total - name: Provisioned - type: string - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: |- - SharedExternalSecret specifies a shared ExternalSecret projected into the specified - ControlPlanes of the same namespace as ClusterExternalSecret and with that - propagated into the specified namespaces. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: SharedExternalSecretSpec defines the desired state of SharedExternalSecret. - properties: - controlPlaneSelector: - description: |- - The secret is projected only to control planes - matching the provided selector. Either names or a labelSelector must be specified. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: either names or a labelSelector must be specified - rule: (has(self.labelSelectors) || has(self.names)) && (size(self.labelSelectors) - > 0 || size(self.names) > 0) - externalSecretMetadata: - description: The metadata of the secret store to be created. - properties: - annotations: - additionalProperties: - type: string - description: Annotations that are set on projected resource. - type: object - labels: - additionalProperties: - type: string - description: Labels that are set on projected resource. - type: object - type: object - externalSecretName: - description: |- - ExternalSecretName is the name to use when creating external secret within a control plane. - optional, if not set, SharedExternalSecret name will be used. - When set, it is immutable. - maxLength: 253 - minLength: 1 - type: string - x-kubernetes-validations: - - message: externalSecretName is immutable - rule: self == oldSelf - externalSecretSpec: - description: The spec for the ExternalSecrets to be created. - properties: - data: - description: Data defines the connection between the Kubernetes - Secret keys and the Provider data - items: - description: ExternalSecretData defines the connection between - the Kubernetes Secret key (spec.data.) and the Provider - data. - properties: - remoteRef: - description: |- - RemoteRef points to the remote secret and defines - which secret (version/property/..) to fetch. - properties: - conversionStrategy: - default: Default - description: Used to define a conversion Strategy - enum: - - Default - - Unicode - type: string - decodingStrategy: - default: None - description: Used to define a decoding Strategy - enum: - - Auto - - Base64 - - Base64URL - - None - type: string - key: - description: Key is the key used in the Provider, mandatory - type: string - metadataPolicy: - default: None - description: Policy for fetching tags/labels from provider - secrets, possible options are Fetch, None. Defaults - to None - enum: - - None - - Fetch - type: string - property: - description: Used to select a specific property of the - Provider value (if a map), if supported - type: string - version: - description: Used to select a specific version of the - Provider value, if supported - type: string - required: - - key - type: object - secretKey: - description: |- - SecretKey defines the key in which the controller stores - the value. This is the key in the Kind=Secret - type: string - sourceRef: - description: |- - SourceRef allows you to override the source - from which the value will pulled from. - maxProperties: 1 - properties: - generatorRef: - description: |- - GeneratorRef points to a generator custom resource. - - Deprecated: The generatorRef is not implemented in .data[]. - this will be removed with v1. - properties: - apiVersion: - default: generators.external-secrets.io/v1alpha1 - description: Specify the apiVersion of the generator - resource - type: string - kind: - description: Specify the Kind of the resource, e.g. - Password, ACRAccessToken etc. - type: string - name: - description: Specify the name of the generator resource - type: string - required: - - kind - - name - type: object - storeRef: - description: SecretStoreRef defines which SecretStore - to fetch the ExternalSecret data. - properties: - kind: - description: |- - Kind of the SecretStore resource (SecretStore or ClusterSecretStore) - Defaults to `SecretStore` - type: string - name: - description: Name of the SecretStore resource - type: string - required: - - name - type: object - type: object - required: - - remoteRef - - secretKey - type: object - type: array - dataFrom: - description: |- - DataFrom is used to fetch all properties from a specific Provider data - If multiple entries are specified, the Secret keys are merged in the specified order - items: - properties: - extract: - description: |- - Used to extract multiple key/value pairs from one secret - Note: Extract does not support sourceRef.Generator or sourceRef.GeneratorRef. - properties: - conversionStrategy: - default: Default - description: Used to define a conversion Strategy - enum: - - Default - - Unicode - type: string - decodingStrategy: - default: None - description: Used to define a decoding Strategy - enum: - - Auto - - Base64 - - Base64URL - - None - type: string - key: - description: Key is the key used in the Provider, mandatory - type: string - metadataPolicy: - default: None - description: Policy for fetching tags/labels from provider - secrets, possible options are Fetch, None. Defaults - to None - enum: - - None - - Fetch - type: string - property: - description: Used to select a specific property of the - Provider value (if a map), if supported - type: string - version: - description: Used to select a specific version of the - Provider value, if supported - type: string - required: - - key - type: object - find: - description: |- - Used to find secrets based on tags or regular expressions - Note: Find does not support sourceRef.Generator or sourceRef.GeneratorRef. - properties: - conversionStrategy: - default: Default - description: Used to define a conversion Strategy - enum: - - Default - - Unicode - type: string - decodingStrategy: - default: None - description: Used to define a decoding Strategy - enum: - - Auto - - Base64 - - Base64URL - - None - type: string - name: - description: Finds secrets based on the name. - properties: - regexp: - description: Finds secrets base - type: string - type: object - path: - description: A root path to start the find operations. - type: string - tags: - additionalProperties: - type: string - description: Find secrets based on tags. - type: object - type: object - rewrite: - description: |- - Used to rewrite secret Keys after getting them from the secret Provider - Multiple Rewrite operations can be provided. They are applied in a layered order (first to last) - items: - properties: - regexp: - description: |- - Used to rewrite with regular expressions. - The resulting key will be the output of a regexp.ReplaceAll operation. - properties: - source: - description: Used to define the regular expression - of a re.Compiler. - type: string - target: - description: Used to define the target pattern - of a ReplaceAll operation. - type: string - required: - - source - - target - type: object - transform: - description: |- - Used to apply string transformation on the secrets. - The resulting key will be the output of the template applied by the operation. - properties: - template: - description: |- - Used to define the template to apply on the secret name. - `.value ` will specify the secret name in the template. - type: string - required: - - template - type: object - type: object - type: array - sourceRef: - description: |- - SourceRef points to a store or generator - which contains secret values ready to use. - Use this in combination with Extract or Find pull values out of - a specific SecretStore. - When sourceRef points to a generator Extract or Find is not supported. - The generator returns a static map of values - maxProperties: 1 - properties: - generatorRef: - description: GeneratorRef points to a generator custom - resource. - properties: - apiVersion: - default: generators.external-secrets.io/v1alpha1 - description: Specify the apiVersion of the generator - resource - type: string - kind: - description: Specify the Kind of the resource, e.g. - Password, ACRAccessToken etc. - type: string - name: - description: Specify the name of the generator resource - type: string - required: - - kind - - name - type: object - storeRef: - description: SecretStoreRef defines which SecretStore - to fetch the ExternalSecret data. - properties: - kind: - description: |- - Kind of the SecretStore resource (SecretStore or ClusterSecretStore) - Defaults to `SecretStore` - type: string - name: - description: Name of the SecretStore resource - type: string - required: - - name - type: object - type: object - type: object - type: array - refreshInterval: - default: 1h - description: |- - RefreshInterval is the amount of time before the values are read again from the SecretStore provider - Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h" - May be set to zero to fetch and create it once. Defaults to 1h. - type: string - secretStoreRef: - description: SecretStoreRef defines which SecretStore to fetch - the ExternalSecret data. - properties: - kind: - description: |- - Kind of the SecretStore resource (SecretStore or ClusterSecretStore) - Defaults to `SecretStore` - type: string - name: - description: Name of the SecretStore resource - type: string - required: - - name - type: object - target: - default: - creationPolicy: Owner - deletionPolicy: Retain - description: |- - ExternalSecretTarget defines the Kubernetes Secret to be created - There can be only one target per ExternalSecret. - properties: - creationPolicy: - default: Owner - description: |- - CreationPolicy defines rules on how to create the resulting Secret - Defaults to 'Owner' - enum: - - Owner - - Orphan - - Merge - - None - type: string - deletionPolicy: - default: Retain - description: |- - DeletionPolicy defines rules on how to delete the resulting Secret - Defaults to 'Retain' - enum: - - Delete - - Merge - - Retain - type: string - immutable: - description: Immutable defines if the final secret will be - immutable - type: boolean - name: - description: |- - Name defines the name of the Secret resource to be managed - This field is immutable - Defaults to the .metadata.name of the ExternalSecret resource - type: string - template: - description: Template defines a blueprint for the created - Secret resource. - properties: - data: - additionalProperties: - type: string - type: object - engineVersion: - default: v2 - description: |- - EngineVersion specifies the template engine version - that should be used to compile/execute the - template specified in .data and .templateFrom[]. - enum: - - v1 - - v2 - type: string - mergePolicy: - default: Replace - enum: - - Replace - - Merge - type: string - metadata: - description: ExternalSecretTemplateMetadata defines metadata - fields for the Secret blueprint. - properties: - annotations: - additionalProperties: - type: string - type: object - labels: - additionalProperties: - type: string - type: object - type: object - templateFrom: - items: - properties: - configMap: - properties: - items: - items: - properties: - key: - type: string - templateAs: - default: Values - enum: - - Values - - KeysAndValues - type: string - required: - - key - type: object - type: array - name: - type: string - required: - - items - - name - type: object - literal: - type: string - secret: - properties: - items: - items: - properties: - key: - type: string - templateAs: - default: Values - enum: - - Values - - KeysAndValues - type: string - required: - - key - type: object - type: array - name: - type: string - required: - - items - - name - type: object - target: - default: Data - enum: - - Data - - Annotations - - Labels - type: string - type: object - type: array - type: - type: string - type: object - type: object - type: object - namespaceSelector: - description: |- - The projected secret can be consumed - only within namespaces matching the provided selector. - Either names or a labelSelector must be specified. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: either names or a labelSelector must be specified - rule: (has(self.labelSelectors) || has(self.names)) && (size(self.labelSelectors) - > 0 || size(self.names) > 0) - refreshTime: - description: Used to configure secret refresh interval in seconds. - type: string - required: - - controlPlaneSelector - - externalSecretSpec - - namespaceSelector - type: object - x-kubernetes-validations: - - message: externalSecretName is immutable - rule: has(self.externalSecretName) == has(oldSelf.externalSecretName) - status: - description: SharedExternalSecretStatus defines the observed state of - the ExternalSecret. - properties: - failed: - description: list of provisioning failures. - items: - description: |- - SharedExternalSecretProvisioningFailure describes a external secret provisioning - failure in a specific control plane. - properties: - conditions: - description: List of conditions. - items: - properties: - message: - type: string - status: - type: string - type: - type: string - required: - - status - - type - type: object - type: array - controlPlane: - description: ControlPlane name where the failure occurred. - type: string - required: - - controlPlane - type: object - type: array - x-kubernetes-list-map-keys: - - controlPlane - x-kubernetes-list-type: map - observedGeneration: - description: observed resource generation. - format: int64 - type: integer - provisioned: - description: List of successfully provisioned targets. - items: - description: SharedExternalSecretProvisioningSuccess defines external - secret provisioning success. - properties: - controlPlane: - description: ControlPlane name where the external secret got - successfully projected. - type: string - required: - - controlPlane - type: object - type: array - x-kubernetes-list-map-keys: - - controlPlane - x-kubernetes-list-type: map - type: object - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.9/spaces.upbound.io_sharedsecretstores.yaml b/static/crds/space/v1.9/spaces.upbound.io_sharedsecretstores.yaml deleted file mode 100644 index 499a2208f..000000000 --- a/static/crds/space/v1.9/spaces.upbound.io_sharedsecretstores.yaml +++ /dev/null @@ -1,2702 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: sharedsecretstores.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - externalsecrets - kind: SharedSecretStore - listKind: SharedSecretStoreList - plural: sharedsecretstores - shortNames: - - sss - singular: sharedsecretstore - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .metadata.annotations.sharedsecretstores\.internal\.spaces\.upbound\.io/provisioned-total - name: Provisioned - type: string - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: |- - SharedSecretStore represents a shared SecretStore projected as ClusterSecretStore - into matching ControlPlanes in the same namespace. Once projected into a ControlPlane, - it can be referenced from ExternalSecret instances, as part of `storeRef` fields. - The secret store configuration including referenced credential are not leaked into the - ControlPlanes and in that sense can be called secure as they are invisible to the - ControlPlane workloads. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: SharedSecretStoreSpec defines the desired state of SecretStore. - properties: - controlPlaneSelector: - description: |- - The store is projected only to control planes - matching the provided selector. Either names or a labelSelector must be specified. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: either names or a labelSelector must be specified - rule: (has(self.labelSelectors) || has(self.names)) && (size(self.labelSelectors) - > 0 || size(self.names) > 0) - namespaceSelector: - description: |- - The projected secret store can be consumed - only within namespaces matching the provided selector. - Either names or a labelSelector must be specified. - properties: - labelSelectors: - description: |- - A resource is matched if any of the label selector matches. - In case when the list is empty, resource is matched too. - items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. - properties: - matchExpressions: - description: matchExpressions is a list of label selector - requirements. The requirements are ANDed. - items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. - properties: - key: - description: key is the label key that the selector - applies to. - type: string - operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. - type: string - values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. - items: - type: string - type: array - x-kubernetes-list-type: atomic - required: - - key - - operator - type: object - type: array - x-kubernetes-list-type: atomic - matchLabels: - additionalProperties: - type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - x-kubernetes-map-type: atomic - type: array - names: - description: |- - A resource is selected if its metadata.name matches any of the provided names. - In case when the list is empty, resource is matched too. - items: - type: string - type: array - x-kubernetes-list-type: set - type: object - x-kubernetes-validations: - - message: either names or a labelSelector must be specified - rule: (has(self.labelSelectors) || has(self.names)) && (size(self.labelSelectors) - > 0 || size(self.names) > 0) - provider: - description: Used to configure the provider. Only one provider may - be set. - maxProperties: 1 - minProperties: 1 - properties: - akeyless: - description: Akeyless configures this store to sync secrets using - Akeyless Vault provider - properties: - akeylessGWApiURL: - description: Akeyless GW API Url from which the secrets to - be fetched from. - type: string - authSecretRef: - description: Auth configures how the operator authenticates - with Akeyless. - properties: - kubernetesAuth: - description: |- - Kubernetes authenticates with Akeyless by passing the ServiceAccount - token stored in the named Secret resource. - properties: - accessID: - description: the Akeyless Kubernetes auth-method access-id - type: string - k8sConfName: - description: Kubernetes-auth configuration name in - Akeyless-Gateway - type: string - secretRef: - description: |- - Optional secret field containing a Kubernetes ServiceAccount JWT used - for authenticating with Akeyless. If a name is specified without a key, - `token` is the default. If one is not specified, the one bound to - the controller will be used. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - serviceAccountRef: - description: |- - Optional service account field containing the name of a kubernetes ServiceAccount. - If the service account is specified, the service account secret token JWT will be used - for authenticating with Akeyless. If the service account selector is not supplied, - the secretRef will be used instead. - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount resource - being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - required: - - accessID - - k8sConfName - type: object - secretRef: - description: |- - Reference to a Secret that contains the details - to authenticate with Akeyless. - properties: - accessID: - description: The SecretAccessID is used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - accessType: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - accessTypeParam: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - type: object - caBundle: - description: |- - PEM/base64 encoded CA bundle used to validate Akeyless Gateway certificate. Only used - if the AkeylessGWApiURL URL is using HTTPS protocol. If not set the system root certificates - are used to validate the TLS connection. - format: byte - type: string - caProvider: - description: The provider for the CA bundle to use to validate - Akeyless Gateway certificate. - properties: - key: - description: The key where the CA certificate can be found - in the Secret or ConfigMap. - type: string - name: - description: The name of the object located at the provider - type. - type: string - namespace: - description: |- - The namespace the Provider type is in. - Can only be defined when used in a ClusterSecretStore. - type: string - type: - description: The type of provider to use such as "Secret", - or "ConfigMap". - enum: - - Secret - - ConfigMap - type: string - required: - - name - - type - type: object - required: - - akeylessGWApiURL - - authSecretRef - type: object - alibaba: - description: Alibaba configures this store to sync secrets using - Alibaba Cloud provider - properties: - auth: - description: AlibabaAuth contains a secretRef for credentials. - properties: - rrsa: - description: Authenticate against Alibaba using RRSA. - properties: - oidcProviderArn: - type: string - oidcTokenFilePath: - type: string - roleArn: - type: string - sessionName: - type: string - required: - - oidcProviderArn - - oidcTokenFilePath - - roleArn - - sessionName - type: object - secretRef: - description: AlibabaAuthSecretRef holds secret references - for Alibaba credentials. - properties: - accessKeyIDSecretRef: - description: The AccessKeyID is used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - accessKeySecretSecretRef: - description: The AccessKeySecret is used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - required: - - accessKeyIDSecretRef - - accessKeySecretSecretRef - type: object - type: object - regionID: - description: Alibaba Region to be used for the provider - type: string - required: - - auth - - regionID - type: object - aws: - description: AWS configures this store to sync secrets using AWS - Secret Manager provider - properties: - additionalRoles: - description: AdditionalRoles is a chained list of Role ARNs - which the provider will sequentially assume before assuming - the Role - items: - type: string - type: array - auth: - description: |- - Auth defines the information necessary to authenticate against AWS - if not set aws sdk will infer credentials from your environment - see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials - properties: - jwt: - description: Authenticate against AWS using service account - tokens. - properties: - serviceAccountRef: - description: A reference to a ServiceAccount resource. - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount resource - being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - type: object - secretRef: - description: |- - AWSAuthSecretRef holds secret references for AWS credentials - both AccessKeyID and SecretAccessKey must be defined in order to properly authenticate. - properties: - accessKeyIDSecretRef: - description: The AccessKeyID is used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - secretAccessKeySecretRef: - description: The SecretAccessKey is used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - sessionTokenSecretRef: - description: |- - The SessionToken used for authentication - This must be defined if AccessKeyID and SecretAccessKey are temporary credentials - see: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - type: object - externalID: - description: AWS External ID set on assumed IAM roles - type: string - region: - description: AWS Region to be used for the provider - type: string - role: - description: Role is a Role ARN which the provider will assume - type: string - secretsManager: - description: SecretsManager defines how the provider behaves - when interacting with AWS SecretsManager - properties: - forceDeleteWithoutRecovery: - description: |- - Specifies whether to delete the secret without any recovery window. You - can't use both this parameter and RecoveryWindowInDays in the same call. - If you don't use either, then by default Secrets Manager uses a 30 day - recovery window. - see: https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_DeleteSecret.html#SecretsManager-DeleteSecret-request-ForceDeleteWithoutRecovery - type: boolean - recoveryWindowInDays: - description: |- - The number of days from 7 to 30 that Secrets Manager waits before - permanently deleting the secret. You can't use both this parameter and - ForceDeleteWithoutRecovery in the same call. If you don't use either, - then by default Secrets Manager uses a 30 day recovery window. - see: https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_DeleteSecret.html#SecretsManager-DeleteSecret-request-RecoveryWindowInDays - format: int64 - type: integer - type: object - service: - description: Service defines which service should be used - to fetch the secrets - enum: - - SecretsManager - - ParameterStore - type: string - sessionTags: - description: AWS STS assume role session tags - items: - properties: - key: - type: string - value: - type: string - required: - - key - - value - type: object - type: array - transitiveTagKeys: - description: AWS STS assume role transitive session tags. - Required when multiple rules are used with the provider - items: - type: string - type: array - required: - - region - - service - type: object - azurekv: - description: AzureKV configures this store to sync secrets using - Azure Key Vault provider - properties: - authSecretRef: - description: Auth configures how the operator authenticates - with Azure. Required for ServicePrincipal auth type. - properties: - clientId: - description: The Azure clientId of the service principle - used for authentication. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - clientSecret: - description: The Azure ClientSecret of the service principle - used for authentication. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - authType: - default: ServicePrincipal - description: |- - Auth type defines how to authenticate to the keyvault service. - Valid values are: - - "ServicePrincipal" (default): Using a service principal (tenantId, clientId, clientSecret) - - "ManagedIdentity": Using Managed Identity assigned to the pod (see aad-pod-identity) - enum: - - ServicePrincipal - - ManagedIdentity - - WorkloadIdentity - type: string - environmentType: - default: PublicCloud - description: |- - EnvironmentType specifies the Azure cloud environment endpoints to use for - connecting and authenticating with Azure. By default it points to the public cloud AAD endpoint. - The following endpoints are available, also see here: https://github.com/Azure/go-autorest/blob/main/autorest/azure/environments.go#L152 - PublicCloud, USGovernmentCloud, ChinaCloud, GermanCloud - enum: - - PublicCloud - - USGovernmentCloud - - ChinaCloud - - GermanCloud - type: string - identityId: - description: If multiple Managed Identity is assigned to the - pod, you can select the one to be used - type: string - serviceAccountRef: - description: |- - ServiceAccountRef specified the service account - that should be used when authenticating with WorkloadIdentity. - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - tenantId: - description: TenantID configures the Azure Tenant to send - requests to. Required for ServicePrincipal auth type. - type: string - vaultUrl: - description: Vault Url from which the secrets to be fetched - from. - type: string - required: - - vaultUrl - type: object - conjur: - description: Conjur configures this store to sync secrets using - conjur provider - properties: - auth: - properties: - apikey: - properties: - account: - type: string - apiKeyRef: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - userRef: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - required: - - account - - apiKeyRef - - userRef - type: object - jwt: - properties: - account: - type: string - secretRef: - description: |- - Optional SecretRef that refers to a key in a Secret resource containing JWT token to - authenticate with Conjur using the JWT authentication method. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - serviceAccountRef: - description: |- - Optional ServiceAccountRef specifies the Kubernetes service account for which to request - a token for with the `TokenRequest` API. - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount resource - being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - serviceID: - description: The conjur authn jwt webservice id - type: string - required: - - account - - serviceID - type: object - type: object - caBundle: - type: string - caProvider: - description: |- - Used to provide custom certificate authority (CA) certificates - for a secret store. The CAProvider points to a Secret or ConfigMap resource - that contains a PEM-encoded certificate. - properties: - key: - description: The key where the CA certificate can be found - in the Secret or ConfigMap. - type: string - name: - description: The name of the object located at the provider - type. - type: string - namespace: - description: |- - The namespace the Provider type is in. - Can only be defined when used in a ClusterSecretStore. - type: string - type: - description: The type of provider to use such as "Secret", - or "ConfigMap". - enum: - - Secret - - ConfigMap - type: string - required: - - name - - type - type: object - url: - type: string - required: - - auth - - url - type: object - delinea: - description: |- - Delinea DevOps Secrets Vault - https://docs.delinea.com/online-help/products/devops-secrets-vault/current - properties: - clientId: - description: ClientID is the non-secret part of the credential. - properties: - secretRef: - description: SecretRef references a key in a secret that - will be used as value. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - value: - description: Value can be specified directly to set a - value without using a secret. - type: string - type: object - clientSecret: - description: ClientSecret is the secret part of the credential. - properties: - secretRef: - description: SecretRef references a key in a secret that - will be used as value. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - value: - description: Value can be specified directly to set a - value without using a secret. - type: string - type: object - tenant: - description: Tenant is the chosen hostname / site name. - type: string - tld: - description: |- - TLD is based on the server location that was chosen during provisioning. - If unset, defaults to "com". - type: string - urlTemplate: - description: |- - URLTemplate - If unset, defaults to "https://%s.secretsvaultcloud.%s/v1/%s%s". - type: string - required: - - clientId - - clientSecret - - tenant - type: object - doppler: - description: Doppler configures this store to sync secrets using - the Doppler provider - properties: - auth: - description: Auth configures how the Operator authenticates - with the Doppler API - properties: - secretRef: - properties: - dopplerToken: - description: |- - The DopplerToken is used for authentication. - See https://docs.doppler.com/reference/api#authentication for auth token types. - The Key attribute defaults to dopplerToken if not specified. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - required: - - dopplerToken - type: object - required: - - secretRef - type: object - config: - description: Doppler config (required if not using a Service - Token) - type: string - format: - description: Format enables the downloading of secrets as - a file (string) - enum: - - json - - dotnet-json - - env - - yaml - - docker - type: string - nameTransformer: - description: Environment variable compatible name transforms - that change secret names to a different format - enum: - - upper-camel - - camel - - lower-snake - - tf-var - - dotnet-env - - lower-kebab - type: string - project: - description: Doppler project (required if not using a Service - Token) - type: string - required: - - auth - type: object - fake: - description: Fake configures a store with static key/value pairs - properties: - data: - items: - properties: - key: - type: string - value: - type: string - valueMap: - additionalProperties: - type: string - description: 'Deprecated: ValueMap is deprecated and - is intended to be removed in the future, use the `value` - field instead.' - type: object - version: - type: string - required: - - key - type: object - type: array - required: - - data - type: object - gcpsm: - description: GCPSM configures this store to sync secrets using - Google Cloud Platform Secret Manager provider - properties: - auth: - description: Auth defines the information necessary to authenticate - against GCP - properties: - secretRef: - properties: - secretAccessKeySecretRef: - description: The SecretAccessKey is used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - workloadIdentity: - properties: - clusterLocation: - type: string - clusterName: - type: string - clusterProjectID: - type: string - serviceAccountRef: - description: A reference to a ServiceAccount resource. - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount resource - being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - required: - - clusterLocation - - clusterName - - serviceAccountRef - type: object - type: object - projectID: - description: ProjectID project where secret is located - type: string - type: object - gitlab: - description: GitLab configures this store to sync secrets using - GitLab Variables provider - properties: - auth: - description: Auth configures how secret-manager authenticates - with a GitLab instance. - properties: - SecretRef: - properties: - accessToken: - description: AccessToken is used for authentication. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - required: - - SecretRef - type: object - environment: - description: Environment environment_scope of gitlab CI/CD - variables (Please see https://docs.gitlab.com/ee/ci/environments/#create-a-static-environment - on how to create environments) - type: string - groupIDs: - description: GroupIDs specify, which gitlab groups to pull - secrets from. Group secrets are read from left to right - followed by the project variables. - items: - type: string - type: array - inheritFromGroups: - description: InheritFromGroups specifies whether parent groups - should be discovered and checked for secrets. - type: boolean - projectID: - description: ProjectID specifies a project where secrets are - located. - type: string - url: - description: URL configures the GitLab instance URL. Defaults - to https://gitlab.com/. - type: string - required: - - auth - type: object - ibm: - description: IBM configures this store to sync secrets using IBM - Cloud provider - properties: - auth: - description: Auth configures how secret-manager authenticates - with the IBM secrets manager. - maxProperties: 1 - minProperties: 1 - properties: - containerAuth: - description: IBM Container-based auth with IAM Trusted - Profile. - properties: - iamEndpoint: - type: string - profile: - description: the IBM Trusted Profile - type: string - tokenLocation: - description: Location the token is mounted on the - pod - type: string - required: - - profile - type: object - secretRef: - properties: - secretApiKeySecretRef: - description: The SecretAccessKey is used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - type: object - serviceUrl: - description: ServiceURL is the Endpoint URL that is specific - to the Secrets Manager service instance - type: string - required: - - auth - type: object - keepersecurity: - description: KeeperSecurity configures this store to sync secrets - using the KeeperSecurity provider - properties: - authRef: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being referred - to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - folderID: - type: string - required: - - authRef - - folderID - type: object - kubernetes: - description: Kubernetes configures this store to sync secrets - using a Kubernetes cluster provider - properties: - auth: - description: Auth configures how secret-manager authenticates - with a Kubernetes instance. - maxProperties: 1 - minProperties: 1 - properties: - cert: - description: has both clientCert and clientKey as secretKeySelector - properties: - clientCert: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - clientKey: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - serviceAccount: - description: points to a service account that should be - used for authentication - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount resource - being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - token: - description: use static token to authenticate with - properties: - bearerToken: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - type: object - remoteNamespace: - default: default - description: Remote namespace to fetch the secrets from - type: string - server: - description: configures the Kubernetes server Address. - properties: - caBundle: - description: CABundle is a base64-encoded CA certificate - format: byte - type: string - caProvider: - description: 'see: https://external-secrets.io/v0.4.1/spec/#external-secrets.io/v1alpha1.CAProvider' - properties: - key: - description: The key where the CA certificate can - be found in the Secret or ConfigMap. - type: string - name: - description: The name of the object located at the - provider type. - type: string - namespace: - description: |- - The namespace the Provider type is in. - Can only be defined when used in a ClusterSecretStore. - type: string - type: - description: The type of provider to use such as "Secret", - or "ConfigMap". - enum: - - Secret - - ConfigMap - type: string - required: - - name - - type - type: object - url: - default: kubernetes.default - description: configures the Kubernetes server Address. - type: string - type: object - required: - - auth - type: object - onepassword: - description: OnePassword configures this store to sync secrets - using the 1Password Cloud provider - properties: - auth: - description: Auth defines the information necessary to authenticate - against OnePassword Connect Server - properties: - secretRef: - description: OnePasswordAuthSecretRef holds secret references - for 1Password credentials. - properties: - connectTokenSecretRef: - description: The ConnectToken is used for authentication - to a 1Password Connect Server. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - required: - - connectTokenSecretRef - type: object - required: - - secretRef - type: object - connectHost: - description: ConnectHost defines the OnePassword Connect Server - to connect to - type: string - vaults: - additionalProperties: - type: integer - description: Vaults defines which OnePassword vaults to search - in which order - type: object - required: - - auth - - connectHost - - vaults - type: object - oracle: - description: Oracle configures this store to sync secrets using - Oracle Vault provider - properties: - auth: - description: |- - Auth configures how secret-manager authenticates with the Oracle Vault. - If empty, use the instance principal, otherwise the user credentials specified in Auth. - properties: - secretRef: - description: SecretRef to pass through sensitive information. - properties: - fingerprint: - description: Fingerprint is the fingerprint of the - API private key. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - privatekey: - description: PrivateKey is the user's API Signing - Key in PEM format, used for authentication. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - required: - - fingerprint - - privatekey - type: object - tenancy: - description: Tenancy is the tenancy OCID where user is - located. - type: string - user: - description: User is an access OCID specific to the account. - type: string - required: - - secretRef - - tenancy - - user - type: object - compartment: - description: |- - Compartment is the vault compartment OCID. - Required for PushSecret - type: string - encryptionKey: - description: |- - EncryptionKey is the OCID of the encryption key within the vault. - Required for PushSecret - type: string - principalType: - description: |- - The type of principal to use for authentication. If left blank, the Auth struct will - determine the principal type. This optional field must be specified if using - workload identity. - enum: - - "" - - UserPrincipal - - InstancePrincipal - - Workload - type: string - region: - description: Region is the region where vault is located. - type: string - serviceAccountRef: - description: |- - ServiceAccountRef specified the service account - that should be used when authenticating with WorkloadIdentity. - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - vault: - description: Vault is the vault's OCID of the specific vault - where secret is located. - type: string - required: - - region - - vault - type: object - scaleway: - description: Scaleway - properties: - accessKey: - description: AccessKey is the non-secret part of the api key. - properties: - secretRef: - description: SecretRef references a key in a secret that - will be used as value. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - value: - description: Value can be specified directly to set a - value without using a secret. - type: string - type: object - apiUrl: - description: APIURL is the url of the api to use. Defaults - to https://api.scaleway.com - type: string - projectId: - description: 'ProjectID is the id of your project, which you - can find in the console: https://console.scaleway.com/project/settings' - type: string - region: - description: 'Region where your secrets are located: https://developers.scaleway.com/en/quickstart/#region-and-zone' - type: string - secretKey: - description: SecretKey is the non-secret part of the api key. - properties: - secretRef: - description: SecretRef references a key in a secret that - will be used as value. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - value: - description: Value can be specified directly to set a - value without using a secret. - type: string - type: object - required: - - accessKey - - projectId - - region - - secretKey - type: object - senhasegura: - description: Senhasegura configures this store to sync secrets - using senhasegura provider - properties: - auth: - description: Auth defines parameters to authenticate in senhasegura - properties: - clientId: - type: string - clientSecretSecretRef: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - required: - - clientId - - clientSecretSecretRef - type: object - ignoreSslCertificate: - default: false - description: IgnoreSslCertificate defines if SSL certificate - must be ignored - type: boolean - module: - description: Module defines which senhasegura module should - be used to get secrets - type: string - url: - description: URL of senhasegura - type: string - required: - - auth - - module - - url - type: object - upboundspaces: - description: UpboundProvider configures a store to sync secrets - with Upbound Spaces. - properties: - storeRef: - description: StoreRef holds ref to Upbound Spaces secret store - properties: - name: - description: Name of the secret store on Upbound Spaces - type: string - required: - - name - type: object - required: - - storeRef - type: object - vault: - description: Vault configures this store to sync secrets using - Hashi provider - properties: - auth: - description: Auth configures how secret-manager authenticates - with the Vault server. - properties: - appRole: - description: |- - AppRole authenticates with Vault using the App Role auth mechanism, - with the role and secret stored in a Kubernetes Secret resource. - properties: - path: - default: approle - description: |- - Path where the App Role authentication backend is mounted - in Vault, e.g: "approle" - type: string - roleId: - description: |- - RoleID configured in the App Role authentication backend when setting - up the authentication backend in Vault. - type: string - roleRef: - description: |- - Reference to a key in a Secret that contains the App Role ID used - to authenticate with Vault. - The `key` field must be specified and denotes which entry within the Secret - resource is used as the app role id. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - secretRef: - description: |- - Reference to a key in a Secret that contains the App Role secret used - to authenticate with Vault. - The `key` field must be specified and denotes which entry within the Secret - resource is used as the app role secret. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - required: - - path - - secretRef - type: object - cert: - description: |- - Cert authenticates with TLS Certificates by passing client certificate, private key and ca certificate - Cert authentication method - properties: - clientCert: - description: |- - ClientCert is a certificate to authenticate using the Cert Vault - authentication method - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - secretRef: - description: |- - SecretRef to a key in a Secret resource containing client private key to - authenticate with Vault using the Cert authentication method - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - iam: - description: |- - Iam authenticates with vault by passing a special AWS request signed with AWS IAM credentials - AWS IAM authentication method - properties: - externalID: - description: AWS External ID set on assumed IAM roles - type: string - jwt: - description: Specify a service account with IRSA enabled - properties: - serviceAccountRef: - description: A reference to a ServiceAccount resource. - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount - resource being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - type: object - path: - description: 'Path where the AWS auth method is enabled - in Vault, e.g: "aws"' - type: string - region: - description: AWS region - type: string - role: - description: This is the AWS role to be assumed before - talking to vault - type: string - secretRef: - description: Specify credentials in a Secret object - properties: - accessKeyIDSecretRef: - description: The AccessKeyID is used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource - being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - secretAccessKeySecretRef: - description: The SecretAccessKey is used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource - being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - sessionTokenSecretRef: - description: |- - The SessionToken used for authentication - This must be defined if AccessKeyID and SecretAccessKey are temporary credentials - see: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource - being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - vaultAwsIamServerID: - description: 'X-Vault-AWS-IAM-Server-ID is an additional - header used by Vault IAM auth method to mitigate - against different types of replay attacks. More - details here: https://developer.hashicorp.com/vault/docs/auth/aws' - type: string - vaultRole: - description: Vault Role. In vault, a role describes - an identity with a set of permissions, groups, or - policies you want to attach a user of the secrets - engine - type: string - required: - - vaultRole - type: object - jwt: - description: |- - Jwt authenticates with Vault by passing role and JWT token using the - JWT/OIDC authentication method - properties: - kubernetesServiceAccountToken: - description: |- - Optional ServiceAccountToken specifies the Kubernetes service account for which to request - a token for with the `TokenRequest` API. - properties: - audiences: - description: |- - Optional audiences field that will be used to request a temporary Kubernetes service - account token for the service account referenced by `serviceAccountRef`. - Defaults to a single audience `vault` it not specified. - Deprecated: use serviceAccountRef.Audiences instead - items: - type: string - type: array - expirationSeconds: - description: |- - Optional expiration time in seconds that will be used to request a temporary - Kubernetes service account token for the service account referenced by - `serviceAccountRef`. - Deprecated: this will be removed in the future. - Defaults to 10 minutes. - format: int64 - type: integer - serviceAccountRef: - description: Service account field containing - the name of a kubernetes ServiceAccount. - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount - resource being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - required: - - serviceAccountRef - type: object - path: - default: jwt - description: |- - Path where the JWT authentication backend is mounted - in Vault, e.g: "jwt" - type: string - role: - description: |- - Role is a JWT role to authenticate using the JWT/OIDC Vault - authentication method - type: string - secretRef: - description: |- - Optional SecretRef that refers to a key in a Secret resource containing JWT token to - authenticate with Vault using the JWT/OIDC authentication method. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - required: - - path - type: object - kubernetes: - description: |- - Kubernetes authenticates with Vault by passing the ServiceAccount - token stored in the named Secret resource to the Vault server. - properties: - mountPath: - default: kubernetes - description: |- - Path where the Kubernetes authentication backend is mounted in Vault, e.g: - "kubernetes" - type: string - role: - description: |- - A required field containing the Vault Role to assume. A Role binds a - Kubernetes ServiceAccount with a set of Vault policies. - type: string - secretRef: - description: |- - Optional secret field containing a Kubernetes ServiceAccount JWT used - for authenticating with Vault. If a name is specified without a key, - `token` is the default. If one is not specified, the one bound to - the controller will be used. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - serviceAccountRef: - description: |- - Optional service account field containing the name of a kubernetes ServiceAccount. - If the service account is specified, the service account secret token JWT will be used - for authenticating with Vault. If the service account selector is not supplied, - the secretRef will be used instead. - properties: - audiences: - description: |- - Audience specifies the `aud` claim for the service account token - If the service account uses a well-known annotation for e.g. IRSA or GCP Workload Identity - then this audiences will be appended to the list - items: - type: string - type: array - name: - description: The name of the ServiceAccount resource - being referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - required: - - name - type: object - required: - - mountPath - - role - type: object - ldap: - description: |- - Ldap authenticates with Vault by passing username/password pair using - the LDAP authentication method - properties: - path: - default: ldap - description: |- - Path where the LDAP authentication backend is mounted - in Vault, e.g: "ldap" - type: string - secretRef: - description: |- - SecretRef to a key in a Secret resource containing password for the LDAP - user used to authenticate with Vault using the LDAP authentication - method - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - username: - description: |- - Username is a LDAP user name used to authenticate using the LDAP Vault - authentication method - type: string - required: - - path - - username - type: object - tokenSecretRef: - description: TokenSecretRef authenticates with Vault by - presenting a token. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - userPass: - description: UserPass authenticates with Vault by passing - username/password pair - properties: - path: - default: user - description: |- - Path where the UserPassword authentication backend is mounted - in Vault, e.g: "user" - type: string - secretRef: - description: |- - SecretRef to a key in a Secret resource containing password for the - user used to authenticate with Vault using the UserPass authentication - method - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - username: - description: |- - Username is a user name used to authenticate using the UserPass Vault - authentication method - type: string - required: - - path - - username - type: object - type: object - caBundle: - description: |- - PEM encoded CA bundle used to validate Vault server certificate. Only used - if the Server URL is using HTTPS protocol. This parameter is ignored for - plain HTTP protocol connection. If not set the system root certificates - are used to validate the TLS connection. - format: byte - type: string - caProvider: - description: The provider for the CA bundle to use to validate - Vault server certificate. - properties: - key: - description: The key where the CA certificate can be found - in the Secret or ConfigMap. - type: string - name: - description: The name of the object located at the provider - type. - type: string - namespace: - description: |- - The namespace the Provider type is in. - Can only be defined when used in a ClusterSecretStore. - type: string - type: - description: The type of provider to use such as "Secret", - or "ConfigMap". - enum: - - Secret - - ConfigMap - type: string - required: - - name - - type - type: object - forwardInconsistent: - description: |- - ForwardInconsistent tells Vault to forward read-after-write requests to the Vault - leader instead of simply retrying within a loop. This can increase performance if - the option is enabled serverside. - https://www.vaultproject.io/docs/configuration/replication#allow_forwarding_via_header - type: boolean - namespace: - description: |- - Name of the vault namespace. Namespaces is a set of features within Vault Enterprise that allows - Vault environments to support Secure Multi-tenancy. e.g: "ns1". - More about namespaces can be found here https://www.vaultproject.io/docs/enterprise/namespaces - type: string - path: - description: |- - Path is the mount path of the Vault KV backend endpoint, e.g: - "secret". The v2 KV secret engine version specific "/data" path suffix - for fetching secrets from Vault is optional and will be appended - if not present in specified path. - type: string - readYourWrites: - description: |- - ReadYourWrites ensures isolated read-after-write semantics by - providing discovered cluster replication states in each request. - More information about eventual consistency in Vault can be found here - https://www.vaultproject.io/docs/enterprise/consistency - type: boolean - server: - description: 'Server is the connection address for the Vault - server, e.g: "https://vault.example.com:8200".' - type: string - version: - default: v2 - description: |- - Version is the Vault KV secret engine version. This can be either "v1" or - "v2". Version defaults to "v2". - enum: - - v1 - - v2 - type: string - required: - - auth - - server - type: object - webhook: - description: Webhook configures this store to sync secrets using - a generic templated webhook - properties: - body: - description: Body - type: string - caBundle: - description: |- - PEM encoded CA bundle used to validate webhook server certificate. Only used - if the Server URL is using HTTPS protocol. This parameter is ignored for - plain HTTP protocol connection. If not set the system root certificates - are used to validate the TLS connection. - format: byte - type: string - caProvider: - description: The provider for the CA bundle to use to validate - webhook server certificate. - properties: - key: - description: The key the value inside of the provider - type to use, only used with "Secret" type - type: string - name: - description: The name of the object located at the provider - type. - type: string - namespace: - description: The namespace the Provider type is in. - type: string - type: - description: The type of provider to use such as "Secret", - or "ConfigMap". - enum: - - Secret - - ConfigMap - type: string - required: - - name - - type - type: object - headers: - additionalProperties: - type: string - description: Headers - type: object - method: - description: Webhook Method - type: string - result: - description: Result formatting - properties: - jsonPath: - description: Json path of return value - type: string - type: object - secrets: - description: |- - Secrets to fill in templates - These secrets will be passed to the templating function as key value pairs under the given name - items: - properties: - name: - description: Name of this secret in templates - type: string - secretRef: - description: Secret ref to fill in credentials - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - required: - - name - - secretRef - type: object - type: array - timeout: - description: Timeout - type: string - url: - description: Webhook url to call - type: string - required: - - result - - url - type: object - yandexcertificatemanager: - description: YandexCertificateManager configures this store to - sync secrets using Yandex Certificate Manager provider - properties: - apiEndpoint: - description: Yandex.Cloud API endpoint (e.g. 'api.cloud.yandex.net:443') - type: string - auth: - description: Auth defines the information necessary to authenticate - against Yandex Certificate Manager - properties: - authorizedKeySecretRef: - description: The authorized key used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - caProvider: - description: The provider for the CA bundle to use to validate - Yandex.Cloud server certificate. - properties: - certSecretRef: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - required: - - auth - type: object - yandexlockbox: - description: YandexLockbox configures this store to sync secrets - using Yandex Lockbox provider - properties: - apiEndpoint: - description: Yandex.Cloud API endpoint (e.g. 'api.cloud.yandex.net:443') - type: string - auth: - description: Auth defines the information necessary to authenticate - against Yandex Lockbox - properties: - authorizedKeySecretRef: - description: The authorized key used for authentication - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - caProvider: - description: The provider for the CA bundle to use to validate - Yandex.Cloud server certificate. - properties: - certSecretRef: - description: |- - A reference to a specific 'key' within a Secret resource, - In some instances, `key` is a required field. - properties: - key: - description: |- - The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be - defaulted, in others it may be required. - type: string - name: - description: The name of the Secret resource being - referred to. - type: string - namespace: - description: |- - Namespace of the resource being referred to. Ignored if referent is not cluster-scoped. cluster-scoped defaults - to the namespace of the referent. - type: string - type: object - type: object - required: - - auth - type: object - type: object - refreshInterval: - description: Used to configure store refresh interval in seconds. - type: integer - retrySettings: - description: Used to configure http retries if failed. - properties: - maxRetries: - format: int32 - type: integer - retryInterval: - type: string - type: object - secretStoreMetadata: - description: The metadata of the secret store to be created. - properties: - annotations: - additionalProperties: - type: string - description: Annotations that are set on projected resource. - type: object - labels: - additionalProperties: - type: string - description: Labels that are set on projected resource. - type: object - type: object - secretStoreName: - description: |- - SecretStoreName is the name to use when creating secret stores within a control plane. - optional, if not set, SharedSecretStore name will be used. - When set, it is immutable. - maxLength: 253 - minLength: 1 - type: string - x-kubernetes-validations: - - message: value is immutable - rule: self == oldSelf - required: - - controlPlaneSelector - - namespaceSelector - - provider - type: object - x-kubernetes-validations: - - message: secretStoreName is immutable - rule: has(self.secretStoreName) == has(oldSelf.secretStoreName) - status: - description: SharedSecretStoreStatus defines the observed state of the - SecretStore. - properties: - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - failed: - description: List of provisioning failures. - items: - description: SecretStoreProvisioningFailure defines secret store - provisioning failure. - properties: - conditions: - description: List of occurred conditions. - items: - properties: - lastTransitionTime: - format: date-time - type: string - message: - type: string - reason: - type: string - status: - type: string - type: - type: string - required: - - status - - type - type: object - type: array - controlPlane: - description: ControlPlane name where the failure occurred. - type: string - required: - - controlPlane - type: object - type: array - x-kubernetes-list-map-keys: - - controlPlane - x-kubernetes-list-type: map - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - provisioned: - description: List of successfully provisioned targets. - items: - description: SecretStoreProvisioningSuccess defines secret store - provision success. - properties: - controlPlane: - description: ControlPlane name where the secret store got projected - type: string - required: - - controlPlane - type: object - type: array - x-kubernetes-list-map-keys: - - controlPlane - x-kubernetes-list-type: map - type: object - type: object - served: true - storage: true - subresources: - status: {} diff --git a/static/crds/space/v1.9/spaces.upbound.io_simulations.yaml b/static/crds/space/v1.9/spaces.upbound.io_simulations.yaml deleted file mode 100644 index 856d1a82b..000000000 --- a/static/crds/space/v1.9/spaces.upbound.io_simulations.yaml +++ /dev/null @@ -1,243 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.3 - name: simulations.spaces.upbound.io -spec: - group: spaces.upbound.io - names: - categories: - - spaces - kind: Simulation - listKind: SimulationList - plural: simulations - singular: simulation - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .spec.controlPlaneName - name: SOURCE - type: string - - jsonPath: .status.simulatedControlPlaneName - name: SIMULATED - type: string - - jsonPath: .status.conditions[?(@.type=='AcceptingChanges')].status - name: ACCEPTING-CHANGES - type: string - - jsonPath: .status.conditions[?(@.type=='AcceptingChanges')].reason - name: STATE - type: string - - jsonPath: .metadata.creationTimestamp - name: AGE - type: date - name: v1alpha1 - schema: - openAPIV3Schema: - description: |- - A Simulation creates a simulation of a source ControlPlane. You can apply a - change set to the simulated control plane. When the Simulation is complete it - will detect the changes and report the difference compared to the source - control plane. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: SimulationSpec specifies how to run the simulation. - properties: - completionCriteria: - description: |- - CompletionCriteria specify how Spaces should determine when the - simulation is complete. If any of the criteria are met, Spaces will set - the Simulation's desired state to complete. Omit the criteria if you want - to manually mark the Simulation complete. - items: - description: A CompletionCriterion specifies when a simulation is - complete. - properties: - duration: - description: Duration after which the simulation is complete. - type: string - type: - description: Type of criterion. - enum: - - Duration - type: string - required: - - duration - - type - type: object - type: array - controlPlaneName: - description: |- - ControlPlaneName is the name of the ControlPlane to simulate a change to. - This control plane is known as the Simulation's 'source' control plane. - minLength: 1 - type: string - x-kubernetes-validations: - - message: The source controlplane can't be changed - rule: self == oldSelf - desiredState: - default: AcceptingChanges - description: DesiredState of the simulation. - enum: - - AcceptingChanges - - Complete - - Terminated - type: string - x-kubernetes-validations: - - message: A complete Simulation can only be terminated - rule: oldSelf != 'Complete' || self == 'Complete' || self == 'Terminated' - - message: A Simulation can't be un-terminated - rule: oldSelf != 'Terminated' || self == oldSelf - required: - - controlPlaneName - - desiredState - type: object - status: - description: SimulationStatus represents the observed state of a Simulation. - properties: - changes: - description: |- - Changes detected by the simulation. Only changes that happen while the - simulation is in the AcceptingChanges state are included. - items: - description: |- - A SimulationChange represents an object that changed while the simulation was - in the AcceptingChanges state. - properties: - change: - description: Change type. - enum: - - Unknown - - Create - - Update - - Delete - type: string - objectRef: - description: ObjectReference to the changed object. - properties: - apiVersion: - description: APIVersion of the changed resource. - type: string - kind: - description: Kind of the changed resource. - type: string - name: - description: Name of the changed resource. - type: string - namespace: - description: Namespace of the changed resource. - type: string - required: - - apiVersion - - kind - - name - type: object - required: - - change - - objectRef - type: object - type: array - conditions: - description: Conditions of the resource. - items: - description: A Condition that may apply to a resource. - properties: - lastTransitionTime: - description: |- - LastTransitionTime is the last time this condition transitioned from one - status to another. - format: date-time - type: string - message: - description: |- - A Message containing details about this condition's last transition from - one status to another, if any. - type: string - observedGeneration: - description: |- - ObservedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. - format: int64 - type: integer - reason: - description: A Reason for this condition's last transition from - one status to another. - type: string - status: - description: Status of this condition; is it currently True, - False, or Unknown? - type: string - type: - description: |- - Type of this condition. At most one of each condition type may apply to - a resource at any point in time. - type: string - required: - - lastTransitionTime - - reason - - status - - type - type: object - type: array - x-kubernetes-list-map-keys: - - type - x-kubernetes-list-type: map - controlPlaneData: - description: |- - ControlPlaneData exported from the source control plane and imported to - the simulated control plane. - properties: - exportTimestamp: - description: |- - ExportTimestamp is the time at which the source control plane's resources - were exported. Resources are exported to temporary storage before they're - imported to the simulated control plane. - format: date-time - type: string - importTimestamp: - description: |- - ImportTiemstamp is the time at which the source control plane's resources - were imported to the simulated control plane. - format: date-time - type: string - type: object - observedGeneration: - description: |- - ObservedGeneration is the latest metadata.generation - which resulted in either a ready state, or stalled due to error - it can not recover from without human intervention. - format: int64 - type: integer - simulatedControlPlaneName: - description: |- - SimulatedControlPlaneName is the name of the control plane used to run - the simulation. - minLength: 1 - type: string - type: object - required: - - spec - type: object - served: true - storage: true - subresources: - status: {} diff --git a/versions.json b/versions.json deleted file mode 100644 index e9cead82b..000000000 --- a/versions.json +++ /dev/null @@ -1 +0,0 @@ -["1.15","1.14", "1.13", "1.12", "1.11", "1.10", "1.9"] From 68135683b1803ce9e1470a13d84f050d57a178e3 Mon Sep 17 00:00:00 2001 From: Rae Sharp Date: Wed, 28 Jan 2026 23:21:07 -0500 Subject: [PATCH 10/11] Remove API versioning dupe warning --- spaces-docs/concepts/control-planes.md | 5 ----- spaces-docs/howtos/api-connector.md | 5 ----- spaces-docs/howtos/auto-upgrade.md | 5 ----- .../howtos/automation-and-gitops/overview.md | 5 ----- spaces-docs/howtos/backup-and-restore.md | 14 ------------- .../howtos/control-plane-topologies.md | 5 ----- spaces-docs/howtos/ctp-connector.md | 5 ----- spaces-docs/howtos/debugging-a-ctp.md | 5 ----- spaces-docs/howtos/mcp-connector-guide.md | 5 ----- spaces-docs/howtos/migrating-to-mcps.md | 5 ----- spaces-docs/howtos/observability.md | 20 ------------------- spaces-docs/howtos/query-api.md | 5 ----- spaces-docs/howtos/secrets-management.md | 5 ----- .../howtos/self-hosted/administer-features.md | 5 ----- .../howtos/self-hosted/attach-detach.md | 5 ----- spaces-docs/howtos/self-hosted/billing.md | 5 ----- .../howtos/self-hosted/configure-ha.md | 5 ----- .../howtos/self-hosted/ctp-audit-logs.md | 5 ----- .../howtos/self-hosted/declarative-ctps.md | 5 ----- .../howtos/self-hosted/oidc-configuration.md | 5 ----- .../howtos/self-hosted/proxies-config.md | 5 ----- spaces-docs/howtos/self-hosted/query-api.md | 8 -------- .../howtos/self-hosted/scaling-resources.md | 5 ----- .../howtos/self-hosted/space-observability.md | 8 -------- .../howtos/self-hosted/spaces-management.md | 5 ----- spaces-docs/howtos/self-hosted/use-argo.md | 5 ----- spaces-docs/howtos/simulations.md | 5 ----- .../version-1.13/concepts/control-planes.md | 5 ----- .../version-1.13/howtos/api-connector.md | 5 ----- .../version-1.13/howtos/auto-upgrade.md | 5 ----- .../howtos/automation-and-gitops/overview.md | 5 ----- .../version-1.13/howtos/backup-and-restore.md | 14 ------------- .../howtos/control-plane-topologies.md | 5 ----- .../version-1.13/howtos/ctp-connector.md | 5 ----- .../version-1.13/howtos/debugging-a-ctp.md | 5 ----- .../howtos/mcp-connector-guide.md | 5 ----- .../version-1.13/howtos/migrating-to-mcps.md | 5 ----- .../version-1.13/howtos/observability.md | 20 ------------------- .../version-1.13/howtos/query-api.md | 5 ----- .../version-1.13/howtos/secrets-management.md | 5 ----- .../howtos/self-hosted/administer-features.md | 5 ----- .../howtos/self-hosted/attach-detach.md | 5 ----- .../howtos/self-hosted/billing.md | 5 ----- .../howtos/self-hosted/configure-ha.md | 5 ----- .../howtos/self-hosted/ctp-audit-logs.md | 5 ----- .../howtos/self-hosted/declarative-ctps.md | 5 ----- .../version-1.13/howtos/self-hosted/dr.md | 8 -------- .../howtos/self-hosted/oidc-configuration.md | 5 ----- .../howtos/self-hosted/proxies-config.md | 5 ----- .../howtos/self-hosted/query-api.md | 8 -------- .../howtos/self-hosted/scaling-resources.md | 5 ----- .../howtos/self-hosted/space-observability.md | 8 -------- .../howtos/self-hosted/spaces-management.md | 5 ----- .../howtos/self-hosted/use-argo.md | 5 ----- .../version-1.13/howtos/simulations.md | 5 ----- .../version-1.14/concepts/control-planes.md | 5 ----- .../version-1.14/howtos/api-connector.md | 5 ----- .../version-1.14/howtos/auto-upgrade.md | 5 ----- .../howtos/automation-and-gitops/overview.md | 5 ----- .../version-1.14/howtos/backup-and-restore.md | 14 ------------- .../howtos/control-plane-topologies.md | 5 ----- .../version-1.14/howtos/ctp-connector.md | 5 ----- .../version-1.14/howtos/debugging-a-ctp.md | 5 ----- .../howtos/mcp-connector-guide.md | 5 ----- .../version-1.14/howtos/migrating-to-mcps.md | 5 ----- .../version-1.14/howtos/observability.md | 20 ------------------- .../version-1.14/howtos/query-api.md | 5 ----- .../version-1.14/howtos/secrets-management.md | 5 ----- .../howtos/self-hosted/administer-features.md | 5 ----- .../howtos/self-hosted/attach-detach.md | 5 ----- .../howtos/self-hosted/billing.md | 5 ----- .../howtos/self-hosted/configure-ha.md | 5 ----- .../howtos/self-hosted/ctp-audit-logs.md | 5 ----- .../howtos/self-hosted/declarative-ctps.md | 5 ----- .../version-1.14/howtos/self-hosted/dr.md | 8 -------- .../howtos/self-hosted/oidc-configuration.md | 5 ----- .../howtos/self-hosted/proxies-config.md | 5 ----- .../howtos/self-hosted/query-api.md | 8 -------- .../howtos/self-hosted/scaling-resources.md | 5 ----- .../howtos/self-hosted/space-observability.md | 8 -------- .../howtos/self-hosted/spaces-management.md | 5 ----- .../howtos/self-hosted/use-argo.md | 5 ----- .../version-1.14/howtos/simulations.md | 5 ----- .../version-1.15/concepts/control-planes.md | 5 ----- .../version-1.15/howtos/api-connector.md | 5 ----- .../version-1.15/howtos/auto-upgrade.md | 5 ----- .../howtos/automation-and-gitops/overview.md | 5 ----- .../version-1.15/howtos/backup-and-restore.md | 14 ------------- .../howtos/control-plane-topologies.md | 5 ----- .../version-1.15/howtos/ctp-connector.md | 5 ----- .../version-1.15/howtos/debugging-a-ctp.md | 5 ----- .../howtos/mcp-connector-guide.md | 5 ----- .../version-1.15/howtos/migrating-to-mcps.md | 5 ----- .../version-1.15/howtos/observability.md | 20 ------------------- .../version-1.15/howtos/query-api.md | 5 ----- .../version-1.15/howtos/secrets-management.md | 5 ----- .../howtos/self-hosted/administer-features.md | 5 ----- .../howtos/self-hosted/attach-detach.md | 5 ----- .../howtos/self-hosted/billing.md | 5 ----- .../howtos/self-hosted/configure-ha.md | 5 ----- .../howtos/self-hosted/ctp-audit-logs.md | 5 ----- .../howtos/self-hosted/declarative-ctps.md | 5 ----- .../version-1.15/howtos/self-hosted/dr.md | 8 -------- .../howtos/self-hosted/oidc-configuration.md | 5 ----- .../howtos/self-hosted/proxies-config.md | 5 ----- .../howtos/self-hosted/query-api.md | 8 -------- .../howtos/self-hosted/scaling-resources.md | 5 ----- .../howtos/self-hosted/space-observability.md | 8 -------- .../howtos/self-hosted/spaces-management.md | 5 ----- .../howtos/self-hosted/use-argo.md | 5 ----- .../version-1.15/howtos/simulations.md | 5 ----- 111 files changed, 684 deletions(-) diff --git a/spaces-docs/concepts/control-planes.md b/spaces-docs/concepts/control-planes.md index 7066343de..76c6386c8 100644 --- a/spaces-docs/concepts/control-planes.md +++ b/spaces-docs/concepts/control-planes.md @@ -13,11 +13,6 @@ Control planes in Upbound are fully isolated Crossplane control plane instances This lets users focus on building their APIs and operating their control planes, while Upbound handles the rest. Each control plane has its own dedicated API server connecting users to their control plane. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). - -For detailed information about Control Plane API specifications and CRD fields, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: ## Control plane architecture diff --git a/spaces-docs/howtos/api-connector.md b/spaces-docs/howtos/api-connector.md index a14468f52..4db30bac1 100644 --- a/spaces-docs/howtos/api-connector.md +++ b/spaces-docs/howtos/api-connector.md @@ -6,11 +6,6 @@ aliases: - /api-connector - /concepts/api-connector --- -:::info API Version Information -This guide covers API Connector, a preview feature available in all supported versions (v1.9-v1.14+). - -For related API specifications and available resources, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: :::warning API Connector is currently in **Preview**. The feature is under active diff --git a/spaces-docs/howtos/auto-upgrade.md b/spaces-docs/howtos/auto-upgrade.md index 249056fb4..edc50e38d 100644 --- a/spaces-docs/howtos/auto-upgrade.md +++ b/spaces-docs/howtos/auto-upgrade.md @@ -9,11 +9,6 @@ plan: "standard" Upbound Spaces can automatically upgrade the version of Upbound Crossplane in managed control plane instances. You can edit the `spec.crossplane.autoUpgrade` field in your `ControlPlane` specification with the available release channels below. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). The `spec.crossplane.autoUpgrade` field has been available since v1.9. - -For ControlPlane API specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: | Channel | Description | Example | |------------|-----------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------| diff --git a/spaces-docs/howtos/automation-and-gitops/overview.md b/spaces-docs/howtos/automation-and-gitops/overview.md index 57eeb15fc..7af47c032 100644 --- a/spaces-docs/howtos/automation-and-gitops/overview.md +++ b/spaces-docs/howtos/automation-and-gitops/overview.md @@ -8,11 +8,6 @@ plan: "business" Automating control plane deployments with GitOps enables declarative, version-controlled infrastructure management. This section covers integrating GitOps workflows with Upbound control planes using Argo CD and related tools. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). GitOps patterns are compatible across all versions of Upbound Spaces. - -For details on deployment models and their differences, see the [Deployment Modes](../../concepts/deployment-modes.md) concept guide. version-specific features, see the . -::: ## What is GitOps? diff --git a/spaces-docs/howtos/backup-and-restore.md b/spaces-docs/howtos/backup-and-restore.md index 3b8d026cb..e434552ea 100644 --- a/spaces-docs/howtos/backup-and-restore.md +++ b/spaces-docs/howtos/backup-and-restore.md @@ -9,20 +9,6 @@ plan: "enterprise" Upbound's _Shared Backups_ is a built-in backup and restore feature. Shared Backups lets you configure automatic schedules for taking snapshots of your control planes. You can restore data from these backups by making new control planes. This guide explains how to use Shared Backups for disaster recovery or upgrade scenarios. -:::info API Version Information & Available Versions -This guide applies to **all supported versions** (v1.9-v1.15+). - -**Select your API version**: -- [Spaces API v1.15 (Latest)](../../../next/reference/apis/spaces-api/) -- [Spaces API v1.14](../../../1.14/reference/apis/spaces-api/) -- [Spaces API v1.13](../../../1.13/reference/apis/spaces-api/) -- [Spaces API v1.12](../../../1.12/reference/apis/spaces-api/) -- [Spaces API v1.11](../../../1.11/reference/apis/spaces-api/) -- [Spaces API v1.10](../../../1.10/reference/apis/spaces-api/) -- [Spaces API v1.9](../../../1.9/reference/apis/spaces-api/) - -For version support policy, see . version compatibility details, see the . -::: ## Benefits diff --git a/spaces-docs/howtos/control-plane-topologies.md b/spaces-docs/howtos/control-plane-topologies.md index 9020e5a41..11cd5efcf 100644 --- a/spaces-docs/howtos/control-plane-topologies.md +++ b/spaces-docs/howtos/control-plane-topologies.md @@ -4,11 +4,6 @@ sidebar_position: 15 description: Configure scheduling of composites to remote control planes --- -:::info API Version Information -This guide is for the Control Plane Topology feature, which is in **private preview**. interested customers with access to this feature, it applies to v1.12+. - -For related API specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). details on feature availability, see the . -::: :::important This feature is in private preview for select customers in Upbound Spaces. If you're interested in this deployment mode, please [contact us](https://www.upbound.io/support/contact). diff --git a/spaces-docs/howtos/ctp-connector.md b/spaces-docs/howtos/ctp-connector.md index b2cc48c49..cf4a7b235 100644 --- a/spaces-docs/howtos/ctp-connector.md +++ b/spaces-docs/howtos/ctp-connector.md @@ -7,11 +7,6 @@ plan: "standard" -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). Control Plane Connector functionality is consistent across versions. - -For related API specifications and resources, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: Control Plane Connector connects arbitrary Kubernetes application clusters outside the Upbound Spaces to your control planes running in Upbound Spaces. diff --git a/spaces-docs/howtos/debugging-a-ctp.md b/spaces-docs/howtos/debugging-a-ctp.md index 521271e40..85a2ca688 100644 --- a/spaces-docs/howtos/debugging-a-ctp.md +++ b/spaces-docs/howtos/debugging-a-ctp.md @@ -6,11 +6,6 @@ description: A guide for how to debug resources on a control plane running in Up This guide provides troubleshooting guidance for how to identify and fix issues on a control plane. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). Debugging methods are consistent across versions. - -For related control plane specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version-specific features, see the . -::: ## Start from Upbound Console diff --git a/spaces-docs/howtos/mcp-connector-guide.md b/spaces-docs/howtos/mcp-connector-guide.md index 8a3866d07..98b64cf15 100644 --- a/spaces-docs/howtos/mcp-connector-guide.md +++ b/spaces-docs/howtos/mcp-connector-guide.md @@ -7,11 +7,6 @@ description: A tutorial to configure a Space with Argo to declaratively create a In this tutorial, you learn how to configure a Kubernetes app cluster to communicate with a control plane in an Upbound self-hosted Space. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Control plane connector functionality is consistent across versions. - -For related API specifications and resources, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . -::: The [control plane connector][control-plane-connector] bridges your Kubernetes application clusters---running outside of Upbound--to your control planes running in Upbound. This allows you to interact with your control plane's API right from the app cluster. The claim APIs you define via `CompositeResourceDefinitions` are available alongside Kubernetes workload APIs like `Pod`. In effect, control plane connector provides the same experience as a locally installed Crossplane. diff --git a/spaces-docs/howtos/migrating-to-mcps.md b/spaces-docs/howtos/migrating-to-mcps.md index 93b9c5ac2..61c54e7b5 100644 --- a/spaces-docs/howtos/migrating-to-mcps.md +++ b/spaces-docs/howtos/migrating-to-mcps.md @@ -6,11 +6,6 @@ description: A guide to how to migrate to a control plane in Upbound The Upbound migration tool is a [CLI command][cli-command] that helps you migrate your existing Crossplane control plane to a control plane in Upbound. This tool works for migrating from self-managed Crossplane installations as well as between Upbound managed control planes (MCPs). -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). Migration procedures are consistent across versions, though you may want to upgrade to a newer version after migration to get the latest features. - -For version-specific features and migration considerations, see the . the full compatibility matrix, see the . -::: To migrate a control plane to Upbound, you must: diff --git a/spaces-docs/howtos/observability.md b/spaces-docs/howtos/observability.md index 8fc5c3278..0ddd1f966 100644 --- a/spaces-docs/howtos/observability.md +++ b/spaces-docs/howtos/observability.md @@ -22,26 +22,6 @@ Upbound Spaces offers two levels of observability: -:::info API Version Information & Version Selector -This guide applies to all supported versions (v1.9-v1.15+). Observability features evolved: - -- **v1.9-v1.12**: SharedTelemetryConfig available (alpha until v1.11) -- **v1.11+**: Observability promoted to stable with logs export support -- **v1.14+**: Both space-level and control-plane observability GA - -**View API Reference for Your Version**: -| Version | Status | Link | -|---------|--------|------| -| v1.15 | Latest | [Spaces API v1.15](../../../next/reference/apis/spaces-api/) | -| v1.14 | Supported | [Spaces API v1.14](../../../1.14/reference/apis/spaces-api/) | -| v1.13 | Supported | [Spaces API v1.13](../../../1.13/reference/apis/spaces-api/) | -| v1.12 | Supported | [Spaces API v1.12](../../../1.12/reference/apis/spaces-api/) | -| v1.11 | Supported | [Spaces API v1.11](../../../1.11/reference/apis/spaces-api/) | -| v1.10 | Limited | [Spaces API v1.10](../../../1.10/reference/apis/spaces-api/) | -| v1.9 | EOL | [Spaces API v1.9](../../../1.9/reference/apis/spaces-api/) | - -For version support policy and feature availability, see and . -::: :::important **Space-level observability** (available since v1.6.0, GA in v1.14.0): diff --git a/spaces-docs/howtos/query-api.md b/spaces-docs/howtos/query-api.md index 78163de2f..c9703de55 100644 --- a/spaces-docs/howtos/query-api.md +++ b/spaces-docs/howtos/query-api.md @@ -9,11 +9,6 @@ description: Use the `up` CLI to query objects and resources Upbound's Query API allows users to inspect objects and resources within their control planes. The read-only `up alpha query` and `up alpha get` CLI commands allow you to gather information on your control planes in a fast and efficient package. These commands follow the [`kubectl` conventions][kubectl-conventions] for filtering, sorting, and retrieving information from your Space. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). Query API is available in Cloud Spaces since v1.6 and Self-Hosted since v1.8. - -For detailed deployment procedures, see [Deploy Query API infrastructure - Self-Hosted](./self-hosted/query-api.md). version compatibility details, see the . -::: diff --git a/spaces-docs/howtos/secrets-management.md b/spaces-docs/howtos/secrets-management.md index 88e730ae5..b901a7dad 100644 --- a/spaces-docs/howtos/secrets-management.md +++ b/spaces-docs/howtos/secrets-management.md @@ -12,11 +12,6 @@ planes as secrets in an external secret store. This feature is a wrapper around the [External Secrets Operator (ESO)][external-secrets-operator-eso] that pulls secrets from external vaults and distributes them across your platform. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). The SharedSecretStore and SharedExternalSecret CRDs have been stable since v1.9. - -For API specifications and configuration details, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: ## Benefits diff --git a/spaces-docs/howtos/self-hosted/administer-features.md b/spaces-docs/howtos/self-hosted/administer-features.md index ce878014e..87923b81a 100644 --- a/spaces-docs/howtos/self-hosted/administer-features.md +++ b/spaces-docs/howtos/self-hosted/administer-features.md @@ -4,11 +4,6 @@ sidebar_position: 12 description: Enable and disable features in Spaces --- -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Feature flags and their availability may vary by version. - -For detailed feature availability across versions, see the . -::: This guide shows how to enable or disable features in your self-hosted Space. diff --git a/spaces-docs/howtos/self-hosted/attach-detach.md b/spaces-docs/howtos/self-hosted/attach-detach.md index 1465921cf..d240f44de 100644 --- a/spaces-docs/howtos/self-hosted/attach-detach.md +++ b/spaces-docs/howtos/self-hosted/attach-detach.md @@ -3,11 +3,6 @@ title: Connect or disconnect a Space sidebar_position: 12 description: Enable and connect self-hosted Spaces to the Upbound console --- -:::info API Version Information -This guide applies to **Spaces v1.8.0 and later** (Self-Hosted only). Connection to Upbound console requires Query API and RBAC to be enabled. - -For version-specific features and requirements, see the . Query API setup details, see [Deploy Query API infrastructure](./query-api.md). -::: :::important This feature is in preview. Starting in Spaces `v1.8.0` and later, you must diff --git a/spaces-docs/howtos/self-hosted/billing.md b/spaces-docs/howtos/self-hosted/billing.md index 145ff9f03..9b3917d15 100644 --- a/spaces-docs/howtos/self-hosted/billing.md +++ b/spaces-docs/howtos/self-hosted/billing.md @@ -4,11 +4,6 @@ sidebar_position: 50 description: A guide for how billing works in an Upbound Space --- -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Billing models may evolve between versions; see Capacity Licensing for alternative models. - -For version-specific features and capacity-based licensing details, see the . reference specifications, see [Capacity Licensing](/spaces/howtos/self-hosted/capacity-licensing). -::: Spaces are a self-hosting feature of Upbound's [flagship product][flagship-product] for platform teams to deploy control planes in their self-managed environments. You can install Spaces into any Kubernetes cluster in your own cloud account, on-premises data center, or on the edge. The pricing usage-based and requires an Upbound account and subscription. The billing unit is a `Loop`. diff --git a/spaces-docs/howtos/self-hosted/configure-ha.md b/spaces-docs/howtos/self-hosted/configure-ha.md index ddf36c55e..e3f9f182f 100644 --- a/spaces-docs/howtos/self-hosted/configure-ha.md +++ b/spaces-docs/howtos/self-hosted/configure-ha.md @@ -12,11 +12,6 @@ production operation at scale. Use this guide when you're ready to deploy production scaling, high availability, and monitoring in your Space. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. - -For API specifications on ControlPlane resources and configurations, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . -::: ## Prerequisites diff --git a/spaces-docs/howtos/self-hosted/ctp-audit-logs.md b/spaces-docs/howtos/self-hosted/ctp-audit-logs.md index 52f52c776..e387b2873 100644 --- a/spaces-docs/howtos/self-hosted/ctp-audit-logs.md +++ b/spaces-docs/howtos/self-hosted/ctp-audit-logs.md @@ -11,11 +11,6 @@ updates, and deletions of Crossplane resources. Control plane audit logs use observability features to collect audit logs with `SharedTelemetryConfig` and send logs to an OpenTelemetry (`OTEL`) collector. -:::info API Version Information -This guide applies to **Spaces v1.14.0 and later** (Self-Hosted only). Audit logging is not available in earlier versions. - -For API specifications on observability resources, see the [Spaces API Reference - v1.14](../../../../../1.14/reference/apis/spaces-api/). details on observability evolution across versions, see the . -::: ## Prerequisites diff --git a/spaces-docs/howtos/self-hosted/declarative-ctps.md b/spaces-docs/howtos/self-hosted/declarative-ctps.md index 2c3e5331b..12447b6fb 100644 --- a/spaces-docs/howtos/self-hosted/declarative-ctps.md +++ b/spaces-docs/howtos/self-hosted/declarative-ctps.md @@ -7,11 +7,6 @@ description: A tutorial to configure a Space with Argo to declaratively create a In this tutorial, you learn how to configure [Argo CD][argo-cd] to communicate with a self-hosted Space. This flow allows you to declaratively create and manage control planes from Git. Argo CD is a continuous delivery tool for Kubernetes that you can use to drive GitOps flows for your control plane infrastructure. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. - -For API specifications on ControlPlane resources and their declarative creation, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . -::: ## Prerequisites diff --git a/spaces-docs/howtos/self-hosted/oidc-configuration.md b/spaces-docs/howtos/self-hosted/oidc-configuration.md index cbef4dc42..33f775422 100644 --- a/spaces-docs/howtos/self-hosted/oidc-configuration.md +++ b/spaces-docs/howtos/self-hosted/oidc-configuration.md @@ -15,11 +15,6 @@ This guide walks you through how to create and apply an authentication configuration to validate Upbound with an external identity provider. Each section focuses on a specific part of the configuration file. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. - -For details on authentication and access control across versions, see the . related platform authentication features, see the [Platform manual](../../../../platform/). -::: ## Creating the `AuthenticationConfiguration` file diff --git a/spaces-docs/howtos/self-hosted/proxies-config.md b/spaces-docs/howtos/self-hosted/proxies-config.md index 3802e4cb0..422e47088 100644 --- a/spaces-docs/howtos/self-hosted/proxies-config.md +++ b/spaces-docs/howtos/self-hosted/proxies-config.md @@ -4,11 +4,6 @@ sidebar_position: 20 description: Configure Upbound within a proxied environment --- -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments in proxied environments. Proxy configuration procedures are consistent across versions. - -For version-specific deployment considerations, see the . -::: diff --git a/spaces-docs/howtos/self-hosted/query-api.md b/spaces-docs/howtos/self-hosted/query-api.md index c112e9001..3a01165dc 100644 --- a/spaces-docs/howtos/self-hosted/query-api.md +++ b/spaces-docs/howtos/self-hosted/query-api.md @@ -11,14 +11,6 @@ aliases: -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). Query API has evolved across versions: - -- **Cloud Spaces**: Available since v1.6 (enabled by default) -- **Self-Hosted**: Available since v1.8 (requires manual enablement) - -For details on Query API availability across versions, see the . -::: :::important diff --git a/spaces-docs/howtos/self-hosted/scaling-resources.md b/spaces-docs/howtos/self-hosted/scaling-resources.md index 7bb04d2c2..0b3a21257 100644 --- a/spaces-docs/howtos/self-hosted/scaling-resources.md +++ b/spaces-docs/howtos/self-hosted/scaling-resources.md @@ -11,11 +11,6 @@ In large workloads or control plane migration, you may performance impacting resource constraints. This guide explains how to scale vCluster and `etcd` resources for optimal performance in your self-hosted Space. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Scaling procedures are consistent across versions. - -For version-specific resource requirements and capacity planning, see the . -::: ## Signs of resource constraints diff --git a/spaces-docs/howtos/self-hosted/space-observability.md b/spaces-docs/howtos/self-hosted/space-observability.md index 52f223f5b..e9f71c8d8 100644 --- a/spaces-docs/howtos/self-hosted/space-observability.md +++ b/spaces-docs/howtos/self-hosted/space-observability.md @@ -4,14 +4,6 @@ sidebar_position: 30 description: Configure Space-level observability --- -:::info API Version Information -This guide applies to **Space v1.6.0 and later** (Self-Hosted only). Space-level observability became GA in v1.14.0. - -- **v1.6.0-v1.13.x**: Available as alpha (flag: `features.alpha.observability.enabled=true`) -- **v1.14.0+**: GA (flag: `observability.enabled=true`) - -For details on observability evolution and related API resources, see the . control-plane observability (distinct from space-level), see the [main observability guide](../observability.md). -::: :::important This feature is GA since `v1.14.0`, requires Spaces `v1.6.0`, and is off by default. To enable, set `observability.enabled=true` (`features.alpha.observability.enabled=true` before `v1.14.0`) when installing Spaces: diff --git a/spaces-docs/howtos/self-hosted/spaces-management.md b/spaces-docs/howtos/self-hosted/spaces-management.md index 3df61c306..a9290acab 100644 --- a/spaces-docs/howtos/self-hosted/spaces-management.md +++ b/spaces-docs/howtos/self-hosted/spaces-management.md @@ -4,11 +4,6 @@ sidebar_position: 10 description: Common operations in Spaces --- -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Spaces management procedures are consistent across versions. - -For version compatibility details, see the . -::: ## Spaces management diff --git a/spaces-docs/howtos/self-hosted/use-argo.md b/spaces-docs/howtos/self-hosted/use-argo.md index 0862feb13..eff5558db 100644 --- a/spaces-docs/howtos/self-hosted/use-argo.md +++ b/spaces-docs/howtos/self-hosted/use-argo.md @@ -10,11 +10,6 @@ aliases: --- -:::info API Version Information -This guide covers the ArgoCD Plugin, a preview feature available in all supported versions (v1.9-v1.14+) for self-hosted deployments. - -For general GitOps guidance, see [Automation and GitOps Overview](/spaces/howtos/automation-and-gitops/overview/). -::: :::important This feature is in preview and is off by default. To enable, set `features.alpha.argocdPlugin.enabled=true` when installing Spaces: diff --git a/spaces-docs/howtos/simulations.md b/spaces-docs/howtos/simulations.md index 26cb0e657..537906b8d 100644 --- a/spaces-docs/howtos/simulations.md +++ b/spaces-docs/howtos/simulations.md @@ -4,11 +4,6 @@ sidebar_position: 100 description: Use the Up CLI to mock operations before deploying to your environments. --- -:::info API Version Information -This guide covers Simulations, available in v1.10+ (GA since v1.13). version-specific availability and features, see the . - -For API specifications on the Simulations CRD, see the [Spaces API Reference](../../../reference/apis/spaces-api/). -::: :::important The Simulations feature is in private preview. more information, [reach out to Upbound][reach-out-to-upbound]. diff --git a/spaces_versioned_docs/version-1.13/concepts/control-planes.md b/spaces_versioned_docs/version-1.13/concepts/control-planes.md index 7066343de..76c6386c8 100644 --- a/spaces_versioned_docs/version-1.13/concepts/control-planes.md +++ b/spaces_versioned_docs/version-1.13/concepts/control-planes.md @@ -13,11 +13,6 @@ Control planes in Upbound are fully isolated Crossplane control plane instances This lets users focus on building their APIs and operating their control planes, while Upbound handles the rest. Each control plane has its own dedicated API server connecting users to their control plane. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). - -For detailed information about Control Plane API specifications and CRD fields, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: ## Control plane architecture diff --git a/spaces_versioned_docs/version-1.13/howtos/api-connector.md b/spaces_versioned_docs/version-1.13/howtos/api-connector.md index a14468f52..4db30bac1 100644 --- a/spaces_versioned_docs/version-1.13/howtos/api-connector.md +++ b/spaces_versioned_docs/version-1.13/howtos/api-connector.md @@ -6,11 +6,6 @@ aliases: - /api-connector - /concepts/api-connector --- -:::info API Version Information -This guide covers API Connector, a preview feature available in all supported versions (v1.9-v1.14+). - -For related API specifications and available resources, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: :::warning API Connector is currently in **Preview**. The feature is under active diff --git a/spaces_versioned_docs/version-1.13/howtos/auto-upgrade.md b/spaces_versioned_docs/version-1.13/howtos/auto-upgrade.md index 249056fb4..edc50e38d 100644 --- a/spaces_versioned_docs/version-1.13/howtos/auto-upgrade.md +++ b/spaces_versioned_docs/version-1.13/howtos/auto-upgrade.md @@ -9,11 +9,6 @@ plan: "standard" Upbound Spaces can automatically upgrade the version of Upbound Crossplane in managed control plane instances. You can edit the `spec.crossplane.autoUpgrade` field in your `ControlPlane` specification with the available release channels below. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). The `spec.crossplane.autoUpgrade` field has been available since v1.9. - -For ControlPlane API specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: | Channel | Description | Example | |------------|-----------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------| diff --git a/spaces_versioned_docs/version-1.13/howtos/automation-and-gitops/overview.md b/spaces_versioned_docs/version-1.13/howtos/automation-and-gitops/overview.md index 57eeb15fc..7af47c032 100644 --- a/spaces_versioned_docs/version-1.13/howtos/automation-and-gitops/overview.md +++ b/spaces_versioned_docs/version-1.13/howtos/automation-and-gitops/overview.md @@ -8,11 +8,6 @@ plan: "business" Automating control plane deployments with GitOps enables declarative, version-controlled infrastructure management. This section covers integrating GitOps workflows with Upbound control planes using Argo CD and related tools. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). GitOps patterns are compatible across all versions of Upbound Spaces. - -For details on deployment models and their differences, see the [Deployment Modes](../../concepts/deployment-modes.md) concept guide. version-specific features, see the . -::: ## What is GitOps? diff --git a/spaces_versioned_docs/version-1.13/howtos/backup-and-restore.md b/spaces_versioned_docs/version-1.13/howtos/backup-and-restore.md index 3b8d026cb..e434552ea 100644 --- a/spaces_versioned_docs/version-1.13/howtos/backup-and-restore.md +++ b/spaces_versioned_docs/version-1.13/howtos/backup-and-restore.md @@ -9,20 +9,6 @@ plan: "enterprise" Upbound's _Shared Backups_ is a built-in backup and restore feature. Shared Backups lets you configure automatic schedules for taking snapshots of your control planes. You can restore data from these backups by making new control planes. This guide explains how to use Shared Backups for disaster recovery or upgrade scenarios. -:::info API Version Information & Available Versions -This guide applies to **all supported versions** (v1.9-v1.15+). - -**Select your API version**: -- [Spaces API v1.15 (Latest)](../../../next/reference/apis/spaces-api/) -- [Spaces API v1.14](../../../1.14/reference/apis/spaces-api/) -- [Spaces API v1.13](../../../1.13/reference/apis/spaces-api/) -- [Spaces API v1.12](../../../1.12/reference/apis/spaces-api/) -- [Spaces API v1.11](../../../1.11/reference/apis/spaces-api/) -- [Spaces API v1.10](../../../1.10/reference/apis/spaces-api/) -- [Spaces API v1.9](../../../1.9/reference/apis/spaces-api/) - -For version support policy, see . version compatibility details, see the . -::: ## Benefits diff --git a/spaces_versioned_docs/version-1.13/howtos/control-plane-topologies.md b/spaces_versioned_docs/version-1.13/howtos/control-plane-topologies.md index 9020e5a41..11cd5efcf 100644 --- a/spaces_versioned_docs/version-1.13/howtos/control-plane-topologies.md +++ b/spaces_versioned_docs/version-1.13/howtos/control-plane-topologies.md @@ -4,11 +4,6 @@ sidebar_position: 15 description: Configure scheduling of composites to remote control planes --- -:::info API Version Information -This guide is for the Control Plane Topology feature, which is in **private preview**. interested customers with access to this feature, it applies to v1.12+. - -For related API specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). details on feature availability, see the . -::: :::important This feature is in private preview for select customers in Upbound Spaces. If you're interested in this deployment mode, please [contact us](https://www.upbound.io/support/contact). diff --git a/spaces_versioned_docs/version-1.13/howtos/ctp-connector.md b/spaces_versioned_docs/version-1.13/howtos/ctp-connector.md index b2cc48c49..cf4a7b235 100644 --- a/spaces_versioned_docs/version-1.13/howtos/ctp-connector.md +++ b/spaces_versioned_docs/version-1.13/howtos/ctp-connector.md @@ -7,11 +7,6 @@ plan: "standard" -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). Control Plane Connector functionality is consistent across versions. - -For related API specifications and resources, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: Control Plane Connector connects arbitrary Kubernetes application clusters outside the Upbound Spaces to your control planes running in Upbound Spaces. diff --git a/spaces_versioned_docs/version-1.13/howtos/debugging-a-ctp.md b/spaces_versioned_docs/version-1.13/howtos/debugging-a-ctp.md index 521271e40..85a2ca688 100644 --- a/spaces_versioned_docs/version-1.13/howtos/debugging-a-ctp.md +++ b/spaces_versioned_docs/version-1.13/howtos/debugging-a-ctp.md @@ -6,11 +6,6 @@ description: A guide for how to debug resources on a control plane running in Up This guide provides troubleshooting guidance for how to identify and fix issues on a control plane. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). Debugging methods are consistent across versions. - -For related control plane specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version-specific features, see the . -::: ## Start from Upbound Console diff --git a/spaces_versioned_docs/version-1.13/howtos/mcp-connector-guide.md b/spaces_versioned_docs/version-1.13/howtos/mcp-connector-guide.md index 8a3866d07..98b64cf15 100644 --- a/spaces_versioned_docs/version-1.13/howtos/mcp-connector-guide.md +++ b/spaces_versioned_docs/version-1.13/howtos/mcp-connector-guide.md @@ -7,11 +7,6 @@ description: A tutorial to configure a Space with Argo to declaratively create a In this tutorial, you learn how to configure a Kubernetes app cluster to communicate with a control plane in an Upbound self-hosted Space. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Control plane connector functionality is consistent across versions. - -For related API specifications and resources, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . -::: The [control plane connector][control-plane-connector] bridges your Kubernetes application clusters---running outside of Upbound--to your control planes running in Upbound. This allows you to interact with your control plane's API right from the app cluster. The claim APIs you define via `CompositeResourceDefinitions` are available alongside Kubernetes workload APIs like `Pod`. In effect, control plane connector provides the same experience as a locally installed Crossplane. diff --git a/spaces_versioned_docs/version-1.13/howtos/migrating-to-mcps.md b/spaces_versioned_docs/version-1.13/howtos/migrating-to-mcps.md index 93b9c5ac2..61c54e7b5 100644 --- a/spaces_versioned_docs/version-1.13/howtos/migrating-to-mcps.md +++ b/spaces_versioned_docs/version-1.13/howtos/migrating-to-mcps.md @@ -6,11 +6,6 @@ description: A guide to how to migrate to a control plane in Upbound The Upbound migration tool is a [CLI command][cli-command] that helps you migrate your existing Crossplane control plane to a control plane in Upbound. This tool works for migrating from self-managed Crossplane installations as well as between Upbound managed control planes (MCPs). -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). Migration procedures are consistent across versions, though you may want to upgrade to a newer version after migration to get the latest features. - -For version-specific features and migration considerations, see the . the full compatibility matrix, see the . -::: To migrate a control plane to Upbound, you must: diff --git a/spaces_versioned_docs/version-1.13/howtos/observability.md b/spaces_versioned_docs/version-1.13/howtos/observability.md index 8fc5c3278..0ddd1f966 100644 --- a/spaces_versioned_docs/version-1.13/howtos/observability.md +++ b/spaces_versioned_docs/version-1.13/howtos/observability.md @@ -22,26 +22,6 @@ Upbound Spaces offers two levels of observability: -:::info API Version Information & Version Selector -This guide applies to all supported versions (v1.9-v1.15+). Observability features evolved: - -- **v1.9-v1.12**: SharedTelemetryConfig available (alpha until v1.11) -- **v1.11+**: Observability promoted to stable with logs export support -- **v1.14+**: Both space-level and control-plane observability GA - -**View API Reference for Your Version**: -| Version | Status | Link | -|---------|--------|------| -| v1.15 | Latest | [Spaces API v1.15](../../../next/reference/apis/spaces-api/) | -| v1.14 | Supported | [Spaces API v1.14](../../../1.14/reference/apis/spaces-api/) | -| v1.13 | Supported | [Spaces API v1.13](../../../1.13/reference/apis/spaces-api/) | -| v1.12 | Supported | [Spaces API v1.12](../../../1.12/reference/apis/spaces-api/) | -| v1.11 | Supported | [Spaces API v1.11](../../../1.11/reference/apis/spaces-api/) | -| v1.10 | Limited | [Spaces API v1.10](../../../1.10/reference/apis/spaces-api/) | -| v1.9 | EOL | [Spaces API v1.9](../../../1.9/reference/apis/spaces-api/) | - -For version support policy and feature availability, see and . -::: :::important **Space-level observability** (available since v1.6.0, GA in v1.14.0): diff --git a/spaces_versioned_docs/version-1.13/howtos/query-api.md b/spaces_versioned_docs/version-1.13/howtos/query-api.md index 78163de2f..c9703de55 100644 --- a/spaces_versioned_docs/version-1.13/howtos/query-api.md +++ b/spaces_versioned_docs/version-1.13/howtos/query-api.md @@ -9,11 +9,6 @@ description: Use the `up` CLI to query objects and resources Upbound's Query API allows users to inspect objects and resources within their control planes. The read-only `up alpha query` and `up alpha get` CLI commands allow you to gather information on your control planes in a fast and efficient package. These commands follow the [`kubectl` conventions][kubectl-conventions] for filtering, sorting, and retrieving information from your Space. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). Query API is available in Cloud Spaces since v1.6 and Self-Hosted since v1.8. - -For detailed deployment procedures, see [Deploy Query API infrastructure - Self-Hosted](./self-hosted/query-api.md). version compatibility details, see the . -::: diff --git a/spaces_versioned_docs/version-1.13/howtos/secrets-management.md b/spaces_versioned_docs/version-1.13/howtos/secrets-management.md index 88e730ae5..b901a7dad 100644 --- a/spaces_versioned_docs/version-1.13/howtos/secrets-management.md +++ b/spaces_versioned_docs/version-1.13/howtos/secrets-management.md @@ -12,11 +12,6 @@ planes as secrets in an external secret store. This feature is a wrapper around the [External Secrets Operator (ESO)][external-secrets-operator-eso] that pulls secrets from external vaults and distributes them across your platform. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). The SharedSecretStore and SharedExternalSecret CRDs have been stable since v1.9. - -For API specifications and configuration details, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: ## Benefits diff --git a/spaces_versioned_docs/version-1.13/howtos/self-hosted/administer-features.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/administer-features.md index ce878014e..87923b81a 100644 --- a/spaces_versioned_docs/version-1.13/howtos/self-hosted/administer-features.md +++ b/spaces_versioned_docs/version-1.13/howtos/self-hosted/administer-features.md @@ -4,11 +4,6 @@ sidebar_position: 12 description: Enable and disable features in Spaces --- -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Feature flags and their availability may vary by version. - -For detailed feature availability across versions, see the . -::: This guide shows how to enable or disable features in your self-hosted Space. diff --git a/spaces_versioned_docs/version-1.13/howtos/self-hosted/attach-detach.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/attach-detach.md index 1465921cf..d240f44de 100644 --- a/spaces_versioned_docs/version-1.13/howtos/self-hosted/attach-detach.md +++ b/spaces_versioned_docs/version-1.13/howtos/self-hosted/attach-detach.md @@ -3,11 +3,6 @@ title: Connect or disconnect a Space sidebar_position: 12 description: Enable and connect self-hosted Spaces to the Upbound console --- -:::info API Version Information -This guide applies to **Spaces v1.8.0 and later** (Self-Hosted only). Connection to Upbound console requires Query API and RBAC to be enabled. - -For version-specific features and requirements, see the . Query API setup details, see [Deploy Query API infrastructure](./query-api.md). -::: :::important This feature is in preview. Starting in Spaces `v1.8.0` and later, you must diff --git a/spaces_versioned_docs/version-1.13/howtos/self-hosted/billing.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/billing.md index 145ff9f03..9b3917d15 100644 --- a/spaces_versioned_docs/version-1.13/howtos/self-hosted/billing.md +++ b/spaces_versioned_docs/version-1.13/howtos/self-hosted/billing.md @@ -4,11 +4,6 @@ sidebar_position: 50 description: A guide for how billing works in an Upbound Space --- -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Billing models may evolve between versions; see Capacity Licensing for alternative models. - -For version-specific features and capacity-based licensing details, see the . reference specifications, see [Capacity Licensing](/spaces/howtos/self-hosted/capacity-licensing). -::: Spaces are a self-hosting feature of Upbound's [flagship product][flagship-product] for platform teams to deploy control planes in their self-managed environments. You can install Spaces into any Kubernetes cluster in your own cloud account, on-premises data center, or on the edge. The pricing usage-based and requires an Upbound account and subscription. The billing unit is a `Loop`. diff --git a/spaces_versioned_docs/version-1.13/howtos/self-hosted/configure-ha.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/configure-ha.md index ddf36c55e..e3f9f182f 100644 --- a/spaces_versioned_docs/version-1.13/howtos/self-hosted/configure-ha.md +++ b/spaces_versioned_docs/version-1.13/howtos/self-hosted/configure-ha.md @@ -12,11 +12,6 @@ production operation at scale. Use this guide when you're ready to deploy production scaling, high availability, and monitoring in your Space. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. - -For API specifications on ControlPlane resources and configurations, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . -::: ## Prerequisites diff --git a/spaces_versioned_docs/version-1.13/howtos/self-hosted/ctp-audit-logs.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/ctp-audit-logs.md index 52f52c776..e387b2873 100644 --- a/spaces_versioned_docs/version-1.13/howtos/self-hosted/ctp-audit-logs.md +++ b/spaces_versioned_docs/version-1.13/howtos/self-hosted/ctp-audit-logs.md @@ -11,11 +11,6 @@ updates, and deletions of Crossplane resources. Control plane audit logs use observability features to collect audit logs with `SharedTelemetryConfig` and send logs to an OpenTelemetry (`OTEL`) collector. -:::info API Version Information -This guide applies to **Spaces v1.14.0 and later** (Self-Hosted only). Audit logging is not available in earlier versions. - -For API specifications on observability resources, see the [Spaces API Reference - v1.14](../../../../../1.14/reference/apis/spaces-api/). details on observability evolution across versions, see the . -::: ## Prerequisites diff --git a/spaces_versioned_docs/version-1.13/howtos/self-hosted/declarative-ctps.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/declarative-ctps.md index 2c3e5331b..12447b6fb 100644 --- a/spaces_versioned_docs/version-1.13/howtos/self-hosted/declarative-ctps.md +++ b/spaces_versioned_docs/version-1.13/howtos/self-hosted/declarative-ctps.md @@ -7,11 +7,6 @@ description: A tutorial to configure a Space with Argo to declaratively create a In this tutorial, you learn how to configure [Argo CD][argo-cd] to communicate with a self-hosted Space. This flow allows you to declaratively create and manage control planes from Git. Argo CD is a continuous delivery tool for Kubernetes that you can use to drive GitOps flows for your control plane infrastructure. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. - -For API specifications on ControlPlane resources and their declarative creation, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . -::: ## Prerequisites diff --git a/spaces_versioned_docs/version-1.13/howtos/self-hosted/dr.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/dr.md index 67ecbfecf..9f9b9c1f8 100644 --- a/spaces_versioned_docs/version-1.13/howtos/self-hosted/dr.md +++ b/spaces_versioned_docs/version-1.13/howtos/self-hosted/dr.md @@ -4,14 +4,6 @@ sidebar_position: 13 description: Configure Space-wide backups for disaster recovery. --- -:::info API Version Information -This guide applies to **Spaces v1.9.0 and later** (Self-Hosted only). Space Backups is a alpha feature enabled by default starting in v1.14.0. - -- **v1.9.0-v1.13.x**: Alpha feature (requires manual enablement) -- **v1.14.0+**: GA (enabled by default) - -For version-specific features and backup resources, see the . control-plane backups, see [Backup and Restore](../backup-and-restore.md). -::: :::important For Connected and Disconnected Spaces, this feature requires Spaces `v1.9.0` and, starting with `v1.14.0`, Spaces enables it by default. diff --git a/spaces_versioned_docs/version-1.13/howtos/self-hosted/oidc-configuration.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/oidc-configuration.md index cbef4dc42..33f775422 100644 --- a/spaces_versioned_docs/version-1.13/howtos/self-hosted/oidc-configuration.md +++ b/spaces_versioned_docs/version-1.13/howtos/self-hosted/oidc-configuration.md @@ -15,11 +15,6 @@ This guide walks you through how to create and apply an authentication configuration to validate Upbound with an external identity provider. Each section focuses on a specific part of the configuration file. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. - -For details on authentication and access control across versions, see the . related platform authentication features, see the [Platform manual](../../../../platform/). -::: ## Creating the `AuthenticationConfiguration` file diff --git a/spaces_versioned_docs/version-1.13/howtos/self-hosted/proxies-config.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/proxies-config.md index 3802e4cb0..422e47088 100644 --- a/spaces_versioned_docs/version-1.13/howtos/self-hosted/proxies-config.md +++ b/spaces_versioned_docs/version-1.13/howtos/self-hosted/proxies-config.md @@ -4,11 +4,6 @@ sidebar_position: 20 description: Configure Upbound within a proxied environment --- -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments in proxied environments. Proxy configuration procedures are consistent across versions. - -For version-specific deployment considerations, see the . -::: diff --git a/spaces_versioned_docs/version-1.13/howtos/self-hosted/query-api.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/query-api.md index c112e9001..3a01165dc 100644 --- a/spaces_versioned_docs/version-1.13/howtos/self-hosted/query-api.md +++ b/spaces_versioned_docs/version-1.13/howtos/self-hosted/query-api.md @@ -11,14 +11,6 @@ aliases: -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). Query API has evolved across versions: - -- **Cloud Spaces**: Available since v1.6 (enabled by default) -- **Self-Hosted**: Available since v1.8 (requires manual enablement) - -For details on Query API availability across versions, see the . -::: :::important diff --git a/spaces_versioned_docs/version-1.13/howtos/self-hosted/scaling-resources.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/scaling-resources.md index 7bb04d2c2..0b3a21257 100644 --- a/spaces_versioned_docs/version-1.13/howtos/self-hosted/scaling-resources.md +++ b/spaces_versioned_docs/version-1.13/howtos/self-hosted/scaling-resources.md @@ -11,11 +11,6 @@ In large workloads or control plane migration, you may performance impacting resource constraints. This guide explains how to scale vCluster and `etcd` resources for optimal performance in your self-hosted Space. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Scaling procedures are consistent across versions. - -For version-specific resource requirements and capacity planning, see the . -::: ## Signs of resource constraints diff --git a/spaces_versioned_docs/version-1.13/howtos/self-hosted/space-observability.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/space-observability.md index 52f223f5b..e9f71c8d8 100644 --- a/spaces_versioned_docs/version-1.13/howtos/self-hosted/space-observability.md +++ b/spaces_versioned_docs/version-1.13/howtos/self-hosted/space-observability.md @@ -4,14 +4,6 @@ sidebar_position: 30 description: Configure Space-level observability --- -:::info API Version Information -This guide applies to **Space v1.6.0 and later** (Self-Hosted only). Space-level observability became GA in v1.14.0. - -- **v1.6.0-v1.13.x**: Available as alpha (flag: `features.alpha.observability.enabled=true`) -- **v1.14.0+**: GA (flag: `observability.enabled=true`) - -For details on observability evolution and related API resources, see the . control-plane observability (distinct from space-level), see the [main observability guide](../observability.md). -::: :::important This feature is GA since `v1.14.0`, requires Spaces `v1.6.0`, and is off by default. To enable, set `observability.enabled=true` (`features.alpha.observability.enabled=true` before `v1.14.0`) when installing Spaces: diff --git a/spaces_versioned_docs/version-1.13/howtos/self-hosted/spaces-management.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/spaces-management.md index 3df61c306..a9290acab 100644 --- a/spaces_versioned_docs/version-1.13/howtos/self-hosted/spaces-management.md +++ b/spaces_versioned_docs/version-1.13/howtos/self-hosted/spaces-management.md @@ -4,11 +4,6 @@ sidebar_position: 10 description: Common operations in Spaces --- -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Spaces management procedures are consistent across versions. - -For version compatibility details, see the . -::: ## Spaces management diff --git a/spaces_versioned_docs/version-1.13/howtos/self-hosted/use-argo.md b/spaces_versioned_docs/version-1.13/howtos/self-hosted/use-argo.md index d58f7db44..eff5558db 100644 --- a/spaces_versioned_docs/version-1.13/howtos/self-hosted/use-argo.md +++ b/spaces_versioned_docs/version-1.13/howtos/self-hosted/use-argo.md @@ -10,11 +10,6 @@ aliases: --- -:::info API Version Information -This guide covers the ArgoCD Plugin, a preview feature available in all supported versions (v1.9-v1.14+) for self-hosted deployments. - -For details on GitOps patterns and related features across versions, see [Automation and GitOps Overview](/spaces/howtos/automation-and-gitops/overview/). -::: :::important This feature is in preview and is off by default. To enable, set `features.alpha.argocdPlugin.enabled=true` when installing Spaces: diff --git a/spaces_versioned_docs/version-1.13/howtos/simulations.md b/spaces_versioned_docs/version-1.13/howtos/simulations.md index 26cb0e657..537906b8d 100644 --- a/spaces_versioned_docs/version-1.13/howtos/simulations.md +++ b/spaces_versioned_docs/version-1.13/howtos/simulations.md @@ -4,11 +4,6 @@ sidebar_position: 100 description: Use the Up CLI to mock operations before deploying to your environments. --- -:::info API Version Information -This guide covers Simulations, available in v1.10+ (GA since v1.13). version-specific availability and features, see the . - -For API specifications on the Simulations CRD, see the [Spaces API Reference](../../../reference/apis/spaces-api/). -::: :::important The Simulations feature is in private preview. more information, [reach out to Upbound][reach-out-to-upbound]. diff --git a/spaces_versioned_docs/version-1.14/concepts/control-planes.md b/spaces_versioned_docs/version-1.14/concepts/control-planes.md index 7066343de..76c6386c8 100644 --- a/spaces_versioned_docs/version-1.14/concepts/control-planes.md +++ b/spaces_versioned_docs/version-1.14/concepts/control-planes.md @@ -13,11 +13,6 @@ Control planes in Upbound are fully isolated Crossplane control plane instances This lets users focus on building their APIs and operating their control planes, while Upbound handles the rest. Each control plane has its own dedicated API server connecting users to their control plane. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). - -For detailed information about Control Plane API specifications and CRD fields, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: ## Control plane architecture diff --git a/spaces_versioned_docs/version-1.14/howtos/api-connector.md b/spaces_versioned_docs/version-1.14/howtos/api-connector.md index a14468f52..4db30bac1 100644 --- a/spaces_versioned_docs/version-1.14/howtos/api-connector.md +++ b/spaces_versioned_docs/version-1.14/howtos/api-connector.md @@ -6,11 +6,6 @@ aliases: - /api-connector - /concepts/api-connector --- -:::info API Version Information -This guide covers API Connector, a preview feature available in all supported versions (v1.9-v1.14+). - -For related API specifications and available resources, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: :::warning API Connector is currently in **Preview**. The feature is under active diff --git a/spaces_versioned_docs/version-1.14/howtos/auto-upgrade.md b/spaces_versioned_docs/version-1.14/howtos/auto-upgrade.md index 249056fb4..edc50e38d 100644 --- a/spaces_versioned_docs/version-1.14/howtos/auto-upgrade.md +++ b/spaces_versioned_docs/version-1.14/howtos/auto-upgrade.md @@ -9,11 +9,6 @@ plan: "standard" Upbound Spaces can automatically upgrade the version of Upbound Crossplane in managed control plane instances. You can edit the `spec.crossplane.autoUpgrade` field in your `ControlPlane` specification with the available release channels below. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). The `spec.crossplane.autoUpgrade` field has been available since v1.9. - -For ControlPlane API specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: | Channel | Description | Example | |------------|-----------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------| diff --git a/spaces_versioned_docs/version-1.14/howtos/automation-and-gitops/overview.md b/spaces_versioned_docs/version-1.14/howtos/automation-and-gitops/overview.md index 57eeb15fc..7af47c032 100644 --- a/spaces_versioned_docs/version-1.14/howtos/automation-and-gitops/overview.md +++ b/spaces_versioned_docs/version-1.14/howtos/automation-and-gitops/overview.md @@ -8,11 +8,6 @@ plan: "business" Automating control plane deployments with GitOps enables declarative, version-controlled infrastructure management. This section covers integrating GitOps workflows with Upbound control planes using Argo CD and related tools. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). GitOps patterns are compatible across all versions of Upbound Spaces. - -For details on deployment models and their differences, see the [Deployment Modes](../../concepts/deployment-modes.md) concept guide. version-specific features, see the . -::: ## What is GitOps? diff --git a/spaces_versioned_docs/version-1.14/howtos/backup-and-restore.md b/spaces_versioned_docs/version-1.14/howtos/backup-and-restore.md index 3b8d026cb..e434552ea 100644 --- a/spaces_versioned_docs/version-1.14/howtos/backup-and-restore.md +++ b/spaces_versioned_docs/version-1.14/howtos/backup-and-restore.md @@ -9,20 +9,6 @@ plan: "enterprise" Upbound's _Shared Backups_ is a built-in backup and restore feature. Shared Backups lets you configure automatic schedules for taking snapshots of your control planes. You can restore data from these backups by making new control planes. This guide explains how to use Shared Backups for disaster recovery or upgrade scenarios. -:::info API Version Information & Available Versions -This guide applies to **all supported versions** (v1.9-v1.15+). - -**Select your API version**: -- [Spaces API v1.15 (Latest)](../../../next/reference/apis/spaces-api/) -- [Spaces API v1.14](../../../1.14/reference/apis/spaces-api/) -- [Spaces API v1.13](../../../1.13/reference/apis/spaces-api/) -- [Spaces API v1.12](../../../1.12/reference/apis/spaces-api/) -- [Spaces API v1.11](../../../1.11/reference/apis/spaces-api/) -- [Spaces API v1.10](../../../1.10/reference/apis/spaces-api/) -- [Spaces API v1.9](../../../1.9/reference/apis/spaces-api/) - -For version support policy, see . version compatibility details, see the . -::: ## Benefits diff --git a/spaces_versioned_docs/version-1.14/howtos/control-plane-topologies.md b/spaces_versioned_docs/version-1.14/howtos/control-plane-topologies.md index 9020e5a41..11cd5efcf 100644 --- a/spaces_versioned_docs/version-1.14/howtos/control-plane-topologies.md +++ b/spaces_versioned_docs/version-1.14/howtos/control-plane-topologies.md @@ -4,11 +4,6 @@ sidebar_position: 15 description: Configure scheduling of composites to remote control planes --- -:::info API Version Information -This guide is for the Control Plane Topology feature, which is in **private preview**. interested customers with access to this feature, it applies to v1.12+. - -For related API specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). details on feature availability, see the . -::: :::important This feature is in private preview for select customers in Upbound Spaces. If you're interested in this deployment mode, please [contact us](https://www.upbound.io/support/contact). diff --git a/spaces_versioned_docs/version-1.14/howtos/ctp-connector.md b/spaces_versioned_docs/version-1.14/howtos/ctp-connector.md index b2cc48c49..cf4a7b235 100644 --- a/spaces_versioned_docs/version-1.14/howtos/ctp-connector.md +++ b/spaces_versioned_docs/version-1.14/howtos/ctp-connector.md @@ -7,11 +7,6 @@ plan: "standard" -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). Control Plane Connector functionality is consistent across versions. - -For related API specifications and resources, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: Control Plane Connector connects arbitrary Kubernetes application clusters outside the Upbound Spaces to your control planes running in Upbound Spaces. diff --git a/spaces_versioned_docs/version-1.14/howtos/debugging-a-ctp.md b/spaces_versioned_docs/version-1.14/howtos/debugging-a-ctp.md index 521271e40..85a2ca688 100644 --- a/spaces_versioned_docs/version-1.14/howtos/debugging-a-ctp.md +++ b/spaces_versioned_docs/version-1.14/howtos/debugging-a-ctp.md @@ -6,11 +6,6 @@ description: A guide for how to debug resources on a control plane running in Up This guide provides troubleshooting guidance for how to identify and fix issues on a control plane. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). Debugging methods are consistent across versions. - -For related control plane specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version-specific features, see the . -::: ## Start from Upbound Console diff --git a/spaces_versioned_docs/version-1.14/howtos/mcp-connector-guide.md b/spaces_versioned_docs/version-1.14/howtos/mcp-connector-guide.md index 8a3866d07..98b64cf15 100644 --- a/spaces_versioned_docs/version-1.14/howtos/mcp-connector-guide.md +++ b/spaces_versioned_docs/version-1.14/howtos/mcp-connector-guide.md @@ -7,11 +7,6 @@ description: A tutorial to configure a Space with Argo to declaratively create a In this tutorial, you learn how to configure a Kubernetes app cluster to communicate with a control plane in an Upbound self-hosted Space. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Control plane connector functionality is consistent across versions. - -For related API specifications and resources, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . -::: The [control plane connector][control-plane-connector] bridges your Kubernetes application clusters---running outside of Upbound--to your control planes running in Upbound. This allows you to interact with your control plane's API right from the app cluster. The claim APIs you define via `CompositeResourceDefinitions` are available alongside Kubernetes workload APIs like `Pod`. In effect, control plane connector provides the same experience as a locally installed Crossplane. diff --git a/spaces_versioned_docs/version-1.14/howtos/migrating-to-mcps.md b/spaces_versioned_docs/version-1.14/howtos/migrating-to-mcps.md index 93b9c5ac2..61c54e7b5 100644 --- a/spaces_versioned_docs/version-1.14/howtos/migrating-to-mcps.md +++ b/spaces_versioned_docs/version-1.14/howtos/migrating-to-mcps.md @@ -6,11 +6,6 @@ description: A guide to how to migrate to a control plane in Upbound The Upbound migration tool is a [CLI command][cli-command] that helps you migrate your existing Crossplane control plane to a control plane in Upbound. This tool works for migrating from self-managed Crossplane installations as well as between Upbound managed control planes (MCPs). -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). Migration procedures are consistent across versions, though you may want to upgrade to a newer version after migration to get the latest features. - -For version-specific features and migration considerations, see the . the full compatibility matrix, see the . -::: To migrate a control plane to Upbound, you must: diff --git a/spaces_versioned_docs/version-1.14/howtos/observability.md b/spaces_versioned_docs/version-1.14/howtos/observability.md index 8fc5c3278..0ddd1f966 100644 --- a/spaces_versioned_docs/version-1.14/howtos/observability.md +++ b/spaces_versioned_docs/version-1.14/howtos/observability.md @@ -22,26 +22,6 @@ Upbound Spaces offers two levels of observability: -:::info API Version Information & Version Selector -This guide applies to all supported versions (v1.9-v1.15+). Observability features evolved: - -- **v1.9-v1.12**: SharedTelemetryConfig available (alpha until v1.11) -- **v1.11+**: Observability promoted to stable with logs export support -- **v1.14+**: Both space-level and control-plane observability GA - -**View API Reference for Your Version**: -| Version | Status | Link | -|---------|--------|------| -| v1.15 | Latest | [Spaces API v1.15](../../../next/reference/apis/spaces-api/) | -| v1.14 | Supported | [Spaces API v1.14](../../../1.14/reference/apis/spaces-api/) | -| v1.13 | Supported | [Spaces API v1.13](../../../1.13/reference/apis/spaces-api/) | -| v1.12 | Supported | [Spaces API v1.12](../../../1.12/reference/apis/spaces-api/) | -| v1.11 | Supported | [Spaces API v1.11](../../../1.11/reference/apis/spaces-api/) | -| v1.10 | Limited | [Spaces API v1.10](../../../1.10/reference/apis/spaces-api/) | -| v1.9 | EOL | [Spaces API v1.9](../../../1.9/reference/apis/spaces-api/) | - -For version support policy and feature availability, see and . -::: :::important **Space-level observability** (available since v1.6.0, GA in v1.14.0): diff --git a/spaces_versioned_docs/version-1.14/howtos/query-api.md b/spaces_versioned_docs/version-1.14/howtos/query-api.md index 78163de2f..c9703de55 100644 --- a/spaces_versioned_docs/version-1.14/howtos/query-api.md +++ b/spaces_versioned_docs/version-1.14/howtos/query-api.md @@ -9,11 +9,6 @@ description: Use the `up` CLI to query objects and resources Upbound's Query API allows users to inspect objects and resources within their control planes. The read-only `up alpha query` and `up alpha get` CLI commands allow you to gather information on your control planes in a fast and efficient package. These commands follow the [`kubectl` conventions][kubectl-conventions] for filtering, sorting, and retrieving information from your Space. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). Query API is available in Cloud Spaces since v1.6 and Self-Hosted since v1.8. - -For detailed deployment procedures, see [Deploy Query API infrastructure - Self-Hosted](./self-hosted/query-api.md). version compatibility details, see the . -::: diff --git a/spaces_versioned_docs/version-1.14/howtos/secrets-management.md b/spaces_versioned_docs/version-1.14/howtos/secrets-management.md index 88e730ae5..b901a7dad 100644 --- a/spaces_versioned_docs/version-1.14/howtos/secrets-management.md +++ b/spaces_versioned_docs/version-1.14/howtos/secrets-management.md @@ -12,11 +12,6 @@ planes as secrets in an external secret store. This feature is a wrapper around the [External Secrets Operator (ESO)][external-secrets-operator-eso] that pulls secrets from external vaults and distributes them across your platform. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). The SharedSecretStore and SharedExternalSecret CRDs have been stable since v1.9. - -For API specifications and configuration details, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: ## Benefits diff --git a/spaces_versioned_docs/version-1.14/howtos/self-hosted/administer-features.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/administer-features.md index ce878014e..87923b81a 100644 --- a/spaces_versioned_docs/version-1.14/howtos/self-hosted/administer-features.md +++ b/spaces_versioned_docs/version-1.14/howtos/self-hosted/administer-features.md @@ -4,11 +4,6 @@ sidebar_position: 12 description: Enable and disable features in Spaces --- -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Feature flags and their availability may vary by version. - -For detailed feature availability across versions, see the . -::: This guide shows how to enable or disable features in your self-hosted Space. diff --git a/spaces_versioned_docs/version-1.14/howtos/self-hosted/attach-detach.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/attach-detach.md index 1465921cf..d240f44de 100644 --- a/spaces_versioned_docs/version-1.14/howtos/self-hosted/attach-detach.md +++ b/spaces_versioned_docs/version-1.14/howtos/self-hosted/attach-detach.md @@ -3,11 +3,6 @@ title: Connect or disconnect a Space sidebar_position: 12 description: Enable and connect self-hosted Spaces to the Upbound console --- -:::info API Version Information -This guide applies to **Spaces v1.8.0 and later** (Self-Hosted only). Connection to Upbound console requires Query API and RBAC to be enabled. - -For version-specific features and requirements, see the . Query API setup details, see [Deploy Query API infrastructure](./query-api.md). -::: :::important This feature is in preview. Starting in Spaces `v1.8.0` and later, you must diff --git a/spaces_versioned_docs/version-1.14/howtos/self-hosted/billing.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/billing.md index 145ff9f03..9b3917d15 100644 --- a/spaces_versioned_docs/version-1.14/howtos/self-hosted/billing.md +++ b/spaces_versioned_docs/version-1.14/howtos/self-hosted/billing.md @@ -4,11 +4,6 @@ sidebar_position: 50 description: A guide for how billing works in an Upbound Space --- -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Billing models may evolve between versions; see Capacity Licensing for alternative models. - -For version-specific features and capacity-based licensing details, see the . reference specifications, see [Capacity Licensing](/spaces/howtos/self-hosted/capacity-licensing). -::: Spaces are a self-hosting feature of Upbound's [flagship product][flagship-product] for platform teams to deploy control planes in their self-managed environments. You can install Spaces into any Kubernetes cluster in your own cloud account, on-premises data center, or on the edge. The pricing usage-based and requires an Upbound account and subscription. The billing unit is a `Loop`. diff --git a/spaces_versioned_docs/version-1.14/howtos/self-hosted/configure-ha.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/configure-ha.md index ddf36c55e..e3f9f182f 100644 --- a/spaces_versioned_docs/version-1.14/howtos/self-hosted/configure-ha.md +++ b/spaces_versioned_docs/version-1.14/howtos/self-hosted/configure-ha.md @@ -12,11 +12,6 @@ production operation at scale. Use this guide when you're ready to deploy production scaling, high availability, and monitoring in your Space. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. - -For API specifications on ControlPlane resources and configurations, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . -::: ## Prerequisites diff --git a/spaces_versioned_docs/version-1.14/howtos/self-hosted/ctp-audit-logs.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/ctp-audit-logs.md index 52f52c776..e387b2873 100644 --- a/spaces_versioned_docs/version-1.14/howtos/self-hosted/ctp-audit-logs.md +++ b/spaces_versioned_docs/version-1.14/howtos/self-hosted/ctp-audit-logs.md @@ -11,11 +11,6 @@ updates, and deletions of Crossplane resources. Control plane audit logs use observability features to collect audit logs with `SharedTelemetryConfig` and send logs to an OpenTelemetry (`OTEL`) collector. -:::info API Version Information -This guide applies to **Spaces v1.14.0 and later** (Self-Hosted only). Audit logging is not available in earlier versions. - -For API specifications on observability resources, see the [Spaces API Reference - v1.14](../../../../../1.14/reference/apis/spaces-api/). details on observability evolution across versions, see the . -::: ## Prerequisites diff --git a/spaces_versioned_docs/version-1.14/howtos/self-hosted/declarative-ctps.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/declarative-ctps.md index 2c3e5331b..12447b6fb 100644 --- a/spaces_versioned_docs/version-1.14/howtos/self-hosted/declarative-ctps.md +++ b/spaces_versioned_docs/version-1.14/howtos/self-hosted/declarative-ctps.md @@ -7,11 +7,6 @@ description: A tutorial to configure a Space with Argo to declaratively create a In this tutorial, you learn how to configure [Argo CD][argo-cd] to communicate with a self-hosted Space. This flow allows you to declaratively create and manage control planes from Git. Argo CD is a continuous delivery tool for Kubernetes that you can use to drive GitOps flows for your control plane infrastructure. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. - -For API specifications on ControlPlane resources and their declarative creation, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . -::: ## Prerequisites diff --git a/spaces_versioned_docs/version-1.14/howtos/self-hosted/dr.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/dr.md index 67ecbfecf..9f9b9c1f8 100644 --- a/spaces_versioned_docs/version-1.14/howtos/self-hosted/dr.md +++ b/spaces_versioned_docs/version-1.14/howtos/self-hosted/dr.md @@ -4,14 +4,6 @@ sidebar_position: 13 description: Configure Space-wide backups for disaster recovery. --- -:::info API Version Information -This guide applies to **Spaces v1.9.0 and later** (Self-Hosted only). Space Backups is a alpha feature enabled by default starting in v1.14.0. - -- **v1.9.0-v1.13.x**: Alpha feature (requires manual enablement) -- **v1.14.0+**: GA (enabled by default) - -For version-specific features and backup resources, see the . control-plane backups, see [Backup and Restore](../backup-and-restore.md). -::: :::important For Connected and Disconnected Spaces, this feature requires Spaces `v1.9.0` and, starting with `v1.14.0`, Spaces enables it by default. diff --git a/spaces_versioned_docs/version-1.14/howtos/self-hosted/oidc-configuration.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/oidc-configuration.md index cbef4dc42..33f775422 100644 --- a/spaces_versioned_docs/version-1.14/howtos/self-hosted/oidc-configuration.md +++ b/spaces_versioned_docs/version-1.14/howtos/self-hosted/oidc-configuration.md @@ -15,11 +15,6 @@ This guide walks you through how to create and apply an authentication configuration to validate Upbound with an external identity provider. Each section focuses on a specific part of the configuration file. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. - -For details on authentication and access control across versions, see the . related platform authentication features, see the [Platform manual](../../../../platform/). -::: ## Creating the `AuthenticationConfiguration` file diff --git a/spaces_versioned_docs/version-1.14/howtos/self-hosted/proxies-config.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/proxies-config.md index 3802e4cb0..422e47088 100644 --- a/spaces_versioned_docs/version-1.14/howtos/self-hosted/proxies-config.md +++ b/spaces_versioned_docs/version-1.14/howtos/self-hosted/proxies-config.md @@ -4,11 +4,6 @@ sidebar_position: 20 description: Configure Upbound within a proxied environment --- -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments in proxied environments. Proxy configuration procedures are consistent across versions. - -For version-specific deployment considerations, see the . -::: diff --git a/spaces_versioned_docs/version-1.14/howtos/self-hosted/query-api.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/query-api.md index c112e9001..3a01165dc 100644 --- a/spaces_versioned_docs/version-1.14/howtos/self-hosted/query-api.md +++ b/spaces_versioned_docs/version-1.14/howtos/self-hosted/query-api.md @@ -11,14 +11,6 @@ aliases: -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). Query API has evolved across versions: - -- **Cloud Spaces**: Available since v1.6 (enabled by default) -- **Self-Hosted**: Available since v1.8 (requires manual enablement) - -For details on Query API availability across versions, see the . -::: :::important diff --git a/spaces_versioned_docs/version-1.14/howtos/self-hosted/scaling-resources.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/scaling-resources.md index 7bb04d2c2..0b3a21257 100644 --- a/spaces_versioned_docs/version-1.14/howtos/self-hosted/scaling-resources.md +++ b/spaces_versioned_docs/version-1.14/howtos/self-hosted/scaling-resources.md @@ -11,11 +11,6 @@ In large workloads or control plane migration, you may performance impacting resource constraints. This guide explains how to scale vCluster and `etcd` resources for optimal performance in your self-hosted Space. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Scaling procedures are consistent across versions. - -For version-specific resource requirements and capacity planning, see the . -::: ## Signs of resource constraints diff --git a/spaces_versioned_docs/version-1.14/howtos/self-hosted/space-observability.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/space-observability.md index 52f223f5b..e9f71c8d8 100644 --- a/spaces_versioned_docs/version-1.14/howtos/self-hosted/space-observability.md +++ b/spaces_versioned_docs/version-1.14/howtos/self-hosted/space-observability.md @@ -4,14 +4,6 @@ sidebar_position: 30 description: Configure Space-level observability --- -:::info API Version Information -This guide applies to **Space v1.6.0 and later** (Self-Hosted only). Space-level observability became GA in v1.14.0. - -- **v1.6.0-v1.13.x**: Available as alpha (flag: `features.alpha.observability.enabled=true`) -- **v1.14.0+**: GA (flag: `observability.enabled=true`) - -For details on observability evolution and related API resources, see the . control-plane observability (distinct from space-level), see the [main observability guide](../observability.md). -::: :::important This feature is GA since `v1.14.0`, requires Spaces `v1.6.0`, and is off by default. To enable, set `observability.enabled=true` (`features.alpha.observability.enabled=true` before `v1.14.0`) when installing Spaces: diff --git a/spaces_versioned_docs/version-1.14/howtos/self-hosted/spaces-management.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/spaces-management.md index 3df61c306..a9290acab 100644 --- a/spaces_versioned_docs/version-1.14/howtos/self-hosted/spaces-management.md +++ b/spaces_versioned_docs/version-1.14/howtos/self-hosted/spaces-management.md @@ -4,11 +4,6 @@ sidebar_position: 10 description: Common operations in Spaces --- -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Spaces management procedures are consistent across versions. - -For version compatibility details, see the . -::: ## Spaces management diff --git a/spaces_versioned_docs/version-1.14/howtos/self-hosted/use-argo.md b/spaces_versioned_docs/version-1.14/howtos/self-hosted/use-argo.md index d58f7db44..eff5558db 100644 --- a/spaces_versioned_docs/version-1.14/howtos/self-hosted/use-argo.md +++ b/spaces_versioned_docs/version-1.14/howtos/self-hosted/use-argo.md @@ -10,11 +10,6 @@ aliases: --- -:::info API Version Information -This guide covers the ArgoCD Plugin, a preview feature available in all supported versions (v1.9-v1.14+) for self-hosted deployments. - -For details on GitOps patterns and related features across versions, see [Automation and GitOps Overview](/spaces/howtos/automation-and-gitops/overview/). -::: :::important This feature is in preview and is off by default. To enable, set `features.alpha.argocdPlugin.enabled=true` when installing Spaces: diff --git a/spaces_versioned_docs/version-1.14/howtos/simulations.md b/spaces_versioned_docs/version-1.14/howtos/simulations.md index 26cb0e657..537906b8d 100644 --- a/spaces_versioned_docs/version-1.14/howtos/simulations.md +++ b/spaces_versioned_docs/version-1.14/howtos/simulations.md @@ -4,11 +4,6 @@ sidebar_position: 100 description: Use the Up CLI to mock operations before deploying to your environments. --- -:::info API Version Information -This guide covers Simulations, available in v1.10+ (GA since v1.13). version-specific availability and features, see the . - -For API specifications on the Simulations CRD, see the [Spaces API Reference](../../../reference/apis/spaces-api/). -::: :::important The Simulations feature is in private preview. more information, [reach out to Upbound][reach-out-to-upbound]. diff --git a/spaces_versioned_docs/version-1.15/concepts/control-planes.md b/spaces_versioned_docs/version-1.15/concepts/control-planes.md index 7066343de..76c6386c8 100644 --- a/spaces_versioned_docs/version-1.15/concepts/control-planes.md +++ b/spaces_versioned_docs/version-1.15/concepts/control-planes.md @@ -13,11 +13,6 @@ Control planes in Upbound are fully isolated Crossplane control plane instances This lets users focus on building their APIs and operating their control planes, while Upbound handles the rest. Each control plane has its own dedicated API server connecting users to their control plane. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). - -For detailed information about Control Plane API specifications and CRD fields, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: ## Control plane architecture diff --git a/spaces_versioned_docs/version-1.15/howtos/api-connector.md b/spaces_versioned_docs/version-1.15/howtos/api-connector.md index a14468f52..4db30bac1 100644 --- a/spaces_versioned_docs/version-1.15/howtos/api-connector.md +++ b/spaces_versioned_docs/version-1.15/howtos/api-connector.md @@ -6,11 +6,6 @@ aliases: - /api-connector - /concepts/api-connector --- -:::info API Version Information -This guide covers API Connector, a preview feature available in all supported versions (v1.9-v1.14+). - -For related API specifications and available resources, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: :::warning API Connector is currently in **Preview**. The feature is under active diff --git a/spaces_versioned_docs/version-1.15/howtos/auto-upgrade.md b/spaces_versioned_docs/version-1.15/howtos/auto-upgrade.md index 249056fb4..edc50e38d 100644 --- a/spaces_versioned_docs/version-1.15/howtos/auto-upgrade.md +++ b/spaces_versioned_docs/version-1.15/howtos/auto-upgrade.md @@ -9,11 +9,6 @@ plan: "standard" Upbound Spaces can automatically upgrade the version of Upbound Crossplane in managed control plane instances. You can edit the `spec.crossplane.autoUpgrade` field in your `ControlPlane` specification with the available release channels below. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). The `spec.crossplane.autoUpgrade` field has been available since v1.9. - -For ControlPlane API specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: | Channel | Description | Example | |------------|-----------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------| diff --git a/spaces_versioned_docs/version-1.15/howtos/automation-and-gitops/overview.md b/spaces_versioned_docs/version-1.15/howtos/automation-and-gitops/overview.md index 57eeb15fc..7af47c032 100644 --- a/spaces_versioned_docs/version-1.15/howtos/automation-and-gitops/overview.md +++ b/spaces_versioned_docs/version-1.15/howtos/automation-and-gitops/overview.md @@ -8,11 +8,6 @@ plan: "business" Automating control plane deployments with GitOps enables declarative, version-controlled infrastructure management. This section covers integrating GitOps workflows with Upbound control planes using Argo CD and related tools. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). GitOps patterns are compatible across all versions of Upbound Spaces. - -For details on deployment models and their differences, see the [Deployment Modes](../../concepts/deployment-modes.md) concept guide. version-specific features, see the . -::: ## What is GitOps? diff --git a/spaces_versioned_docs/version-1.15/howtos/backup-and-restore.md b/spaces_versioned_docs/version-1.15/howtos/backup-and-restore.md index 3b8d026cb..e434552ea 100644 --- a/spaces_versioned_docs/version-1.15/howtos/backup-and-restore.md +++ b/spaces_versioned_docs/version-1.15/howtos/backup-and-restore.md @@ -9,20 +9,6 @@ plan: "enterprise" Upbound's _Shared Backups_ is a built-in backup and restore feature. Shared Backups lets you configure automatic schedules for taking snapshots of your control planes. You can restore data from these backups by making new control planes. This guide explains how to use Shared Backups for disaster recovery or upgrade scenarios. -:::info API Version Information & Available Versions -This guide applies to **all supported versions** (v1.9-v1.15+). - -**Select your API version**: -- [Spaces API v1.15 (Latest)](../../../next/reference/apis/spaces-api/) -- [Spaces API v1.14](../../../1.14/reference/apis/spaces-api/) -- [Spaces API v1.13](../../../1.13/reference/apis/spaces-api/) -- [Spaces API v1.12](../../../1.12/reference/apis/spaces-api/) -- [Spaces API v1.11](../../../1.11/reference/apis/spaces-api/) -- [Spaces API v1.10](../../../1.10/reference/apis/spaces-api/) -- [Spaces API v1.9](../../../1.9/reference/apis/spaces-api/) - -For version support policy, see . version compatibility details, see the . -::: ## Benefits diff --git a/spaces_versioned_docs/version-1.15/howtos/control-plane-topologies.md b/spaces_versioned_docs/version-1.15/howtos/control-plane-topologies.md index 9020e5a41..11cd5efcf 100644 --- a/spaces_versioned_docs/version-1.15/howtos/control-plane-topologies.md +++ b/spaces_versioned_docs/version-1.15/howtos/control-plane-topologies.md @@ -4,11 +4,6 @@ sidebar_position: 15 description: Configure scheduling of composites to remote control planes --- -:::info API Version Information -This guide is for the Control Plane Topology feature, which is in **private preview**. interested customers with access to this feature, it applies to v1.12+. - -For related API specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). details on feature availability, see the . -::: :::important This feature is in private preview for select customers in Upbound Spaces. If you're interested in this deployment mode, please [contact us](https://www.upbound.io/support/contact). diff --git a/spaces_versioned_docs/version-1.15/howtos/ctp-connector.md b/spaces_versioned_docs/version-1.15/howtos/ctp-connector.md index b2cc48c49..cf4a7b235 100644 --- a/spaces_versioned_docs/version-1.15/howtos/ctp-connector.md +++ b/spaces_versioned_docs/version-1.15/howtos/ctp-connector.md @@ -7,11 +7,6 @@ plan: "standard" -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). Control Plane Connector functionality is consistent across versions. - -For related API specifications and resources, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: Control Plane Connector connects arbitrary Kubernetes application clusters outside the Upbound Spaces to your control planes running in Upbound Spaces. diff --git a/spaces_versioned_docs/version-1.15/howtos/debugging-a-ctp.md b/spaces_versioned_docs/version-1.15/howtos/debugging-a-ctp.md index 521271e40..85a2ca688 100644 --- a/spaces_versioned_docs/version-1.15/howtos/debugging-a-ctp.md +++ b/spaces_versioned_docs/version-1.15/howtos/debugging-a-ctp.md @@ -6,11 +6,6 @@ description: A guide for how to debug resources on a control plane running in Up This guide provides troubleshooting guidance for how to identify and fix issues on a control plane. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). Debugging methods are consistent across versions. - -For related control plane specifications, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version-specific features, see the . -::: ## Start from Upbound Console diff --git a/spaces_versioned_docs/version-1.15/howtos/mcp-connector-guide.md b/spaces_versioned_docs/version-1.15/howtos/mcp-connector-guide.md index 8a3866d07..98b64cf15 100644 --- a/spaces_versioned_docs/version-1.15/howtos/mcp-connector-guide.md +++ b/spaces_versioned_docs/version-1.15/howtos/mcp-connector-guide.md @@ -7,11 +7,6 @@ description: A tutorial to configure a Space with Argo to declaratively create a In this tutorial, you learn how to configure a Kubernetes app cluster to communicate with a control plane in an Upbound self-hosted Space. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Control plane connector functionality is consistent across versions. - -For related API specifications and resources, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . -::: The [control plane connector][control-plane-connector] bridges your Kubernetes application clusters---running outside of Upbound--to your control planes running in Upbound. This allows you to interact with your control plane's API right from the app cluster. The claim APIs you define via `CompositeResourceDefinitions` are available alongside Kubernetes workload APIs like `Pod`. In effect, control plane connector provides the same experience as a locally installed Crossplane. diff --git a/spaces_versioned_docs/version-1.15/howtos/migrating-to-mcps.md b/spaces_versioned_docs/version-1.15/howtos/migrating-to-mcps.md index 93b9c5ac2..61c54e7b5 100644 --- a/spaces_versioned_docs/version-1.15/howtos/migrating-to-mcps.md +++ b/spaces_versioned_docs/version-1.15/howtos/migrating-to-mcps.md @@ -6,11 +6,6 @@ description: A guide to how to migrate to a control plane in Upbound The Upbound migration tool is a [CLI command][cli-command] that helps you migrate your existing Crossplane control plane to a control plane in Upbound. This tool works for migrating from self-managed Crossplane installations as well as between Upbound managed control planes (MCPs). -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). Migration procedures are consistent across versions, though you may want to upgrade to a newer version after migration to get the latest features. - -For version-specific features and migration considerations, see the . the full compatibility matrix, see the . -::: To migrate a control plane to Upbound, you must: diff --git a/spaces_versioned_docs/version-1.15/howtos/observability.md b/spaces_versioned_docs/version-1.15/howtos/observability.md index 8fc5c3278..0ddd1f966 100644 --- a/spaces_versioned_docs/version-1.15/howtos/observability.md +++ b/spaces_versioned_docs/version-1.15/howtos/observability.md @@ -22,26 +22,6 @@ Upbound Spaces offers two levels of observability: -:::info API Version Information & Version Selector -This guide applies to all supported versions (v1.9-v1.15+). Observability features evolved: - -- **v1.9-v1.12**: SharedTelemetryConfig available (alpha until v1.11) -- **v1.11+**: Observability promoted to stable with logs export support -- **v1.14+**: Both space-level and control-plane observability GA - -**View API Reference for Your Version**: -| Version | Status | Link | -|---------|--------|------| -| v1.15 | Latest | [Spaces API v1.15](../../../next/reference/apis/spaces-api/) | -| v1.14 | Supported | [Spaces API v1.14](../../../1.14/reference/apis/spaces-api/) | -| v1.13 | Supported | [Spaces API v1.13](../../../1.13/reference/apis/spaces-api/) | -| v1.12 | Supported | [Spaces API v1.12](../../../1.12/reference/apis/spaces-api/) | -| v1.11 | Supported | [Spaces API v1.11](../../../1.11/reference/apis/spaces-api/) | -| v1.10 | Limited | [Spaces API v1.10](../../../1.10/reference/apis/spaces-api/) | -| v1.9 | EOL | [Spaces API v1.9](../../../1.9/reference/apis/spaces-api/) | - -For version support policy and feature availability, see and . -::: :::important **Space-level observability** (available since v1.6.0, GA in v1.14.0): diff --git a/spaces_versioned_docs/version-1.15/howtos/query-api.md b/spaces_versioned_docs/version-1.15/howtos/query-api.md index 78163de2f..c9703de55 100644 --- a/spaces_versioned_docs/version-1.15/howtos/query-api.md +++ b/spaces_versioned_docs/version-1.15/howtos/query-api.md @@ -9,11 +9,6 @@ description: Use the `up` CLI to query objects and resources Upbound's Query API allows users to inspect objects and resources within their control planes. The read-only `up alpha query` and `up alpha get` CLI commands allow you to gather information on your control planes in a fast and efficient package. These commands follow the [`kubectl` conventions][kubectl-conventions] for filtering, sorting, and retrieving information from your Space. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). Query API is available in Cloud Spaces since v1.6 and Self-Hosted since v1.8. - -For detailed deployment procedures, see [Deploy Query API infrastructure - Self-Hosted](./self-hosted/query-api.md). version compatibility details, see the . -::: diff --git a/spaces_versioned_docs/version-1.15/howtos/secrets-management.md b/spaces_versioned_docs/version-1.15/howtos/secrets-management.md index 88e730ae5..b901a7dad 100644 --- a/spaces_versioned_docs/version-1.15/howtos/secrets-management.md +++ b/spaces_versioned_docs/version-1.15/howtos/secrets-management.md @@ -12,11 +12,6 @@ planes as secrets in an external secret store. This feature is a wrapper around the [External Secrets Operator (ESO)][external-secrets-operator-eso] that pulls secrets from external vaults and distributes them across your platform. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). The SharedSecretStore and SharedExternalSecret CRDs have been stable since v1.9. - -For API specifications and configuration details, see the [Spaces API Reference](../../../reference/apis/spaces-api/). version compatibility details, see the . -::: ## Benefits diff --git a/spaces_versioned_docs/version-1.15/howtos/self-hosted/administer-features.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/administer-features.md index ce878014e..87923b81a 100644 --- a/spaces_versioned_docs/version-1.15/howtos/self-hosted/administer-features.md +++ b/spaces_versioned_docs/version-1.15/howtos/self-hosted/administer-features.md @@ -4,11 +4,6 @@ sidebar_position: 12 description: Enable and disable features in Spaces --- -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Feature flags and their availability may vary by version. - -For detailed feature availability across versions, see the . -::: This guide shows how to enable or disable features in your self-hosted Space. diff --git a/spaces_versioned_docs/version-1.15/howtos/self-hosted/attach-detach.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/attach-detach.md index 1465921cf..d240f44de 100644 --- a/spaces_versioned_docs/version-1.15/howtos/self-hosted/attach-detach.md +++ b/spaces_versioned_docs/version-1.15/howtos/self-hosted/attach-detach.md @@ -3,11 +3,6 @@ title: Connect or disconnect a Space sidebar_position: 12 description: Enable and connect self-hosted Spaces to the Upbound console --- -:::info API Version Information -This guide applies to **Spaces v1.8.0 and later** (Self-Hosted only). Connection to Upbound console requires Query API and RBAC to be enabled. - -For version-specific features and requirements, see the . Query API setup details, see [Deploy Query API infrastructure](./query-api.md). -::: :::important This feature is in preview. Starting in Spaces `v1.8.0` and later, you must diff --git a/spaces_versioned_docs/version-1.15/howtos/self-hosted/billing.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/billing.md index 145ff9f03..9b3917d15 100644 --- a/spaces_versioned_docs/version-1.15/howtos/self-hosted/billing.md +++ b/spaces_versioned_docs/version-1.15/howtos/self-hosted/billing.md @@ -4,11 +4,6 @@ sidebar_position: 50 description: A guide for how billing works in an Upbound Space --- -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Billing models may evolve between versions; see Capacity Licensing for alternative models. - -For version-specific features and capacity-based licensing details, see the . reference specifications, see [Capacity Licensing](/spaces/howtos/self-hosted/capacity-licensing). -::: Spaces are a self-hosting feature of Upbound's [flagship product][flagship-product] for platform teams to deploy control planes in their self-managed environments. You can install Spaces into any Kubernetes cluster in your own cloud account, on-premises data center, or on the edge. The pricing usage-based and requires an Upbound account and subscription. The billing unit is a `Loop`. diff --git a/spaces_versioned_docs/version-1.15/howtos/self-hosted/configure-ha.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/configure-ha.md index ddf36c55e..e3f9f182f 100644 --- a/spaces_versioned_docs/version-1.15/howtos/self-hosted/configure-ha.md +++ b/spaces_versioned_docs/version-1.15/howtos/self-hosted/configure-ha.md @@ -12,11 +12,6 @@ production operation at scale. Use this guide when you're ready to deploy production scaling, high availability, and monitoring in your Space. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. - -For API specifications on ControlPlane resources and configurations, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . -::: ## Prerequisites diff --git a/spaces_versioned_docs/version-1.15/howtos/self-hosted/ctp-audit-logs.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/ctp-audit-logs.md index 52f52c776..e387b2873 100644 --- a/spaces_versioned_docs/version-1.15/howtos/self-hosted/ctp-audit-logs.md +++ b/spaces_versioned_docs/version-1.15/howtos/self-hosted/ctp-audit-logs.md @@ -11,11 +11,6 @@ updates, and deletions of Crossplane resources. Control plane audit logs use observability features to collect audit logs with `SharedTelemetryConfig` and send logs to an OpenTelemetry (`OTEL`) collector. -:::info API Version Information -This guide applies to **Spaces v1.14.0 and later** (Self-Hosted only). Audit logging is not available in earlier versions. - -For API specifications on observability resources, see the [Spaces API Reference - v1.14](../../../../../1.14/reference/apis/spaces-api/). details on observability evolution across versions, see the . -::: ## Prerequisites diff --git a/spaces_versioned_docs/version-1.15/howtos/self-hosted/declarative-ctps.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/declarative-ctps.md index 2c3e5331b..12447b6fb 100644 --- a/spaces_versioned_docs/version-1.15/howtos/self-hosted/declarative-ctps.md +++ b/spaces_versioned_docs/version-1.15/howtos/self-hosted/declarative-ctps.md @@ -7,11 +7,6 @@ description: A tutorial to configure a Space with Argo to declaratively create a In this tutorial, you learn how to configure [Argo CD][argo-cd] to communicate with a self-hosted Space. This flow allows you to declaratively create and manage control planes from Git. Argo CD is a continuous delivery tool for Kubernetes that you can use to drive GitOps flows for your control plane infrastructure. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. - -For API specifications on ControlPlane resources and their declarative creation, see the [Spaces API Reference](../../../../../reference/apis/spaces-api/). version compatibility details, see the . -::: ## Prerequisites diff --git a/spaces_versioned_docs/version-1.15/howtos/self-hosted/dr.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/dr.md index 67ecbfecf..9f9b9c1f8 100644 --- a/spaces_versioned_docs/version-1.15/howtos/self-hosted/dr.md +++ b/spaces_versioned_docs/version-1.15/howtos/self-hosted/dr.md @@ -4,14 +4,6 @@ sidebar_position: 13 description: Configure Space-wide backups for disaster recovery. --- -:::info API Version Information -This guide applies to **Spaces v1.9.0 and later** (Self-Hosted only). Space Backups is a alpha feature enabled by default starting in v1.14.0. - -- **v1.9.0-v1.13.x**: Alpha feature (requires manual enablement) -- **v1.14.0+**: GA (enabled by default) - -For version-specific features and backup resources, see the . control-plane backups, see [Backup and Restore](../backup-and-restore.md). -::: :::important For Connected and Disconnected Spaces, this feature requires Spaces `v1.9.0` and, starting with `v1.14.0`, Spaces enables it by default. diff --git a/spaces_versioned_docs/version-1.15/howtos/self-hosted/oidc-configuration.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/oidc-configuration.md index cbef4dc42..33f775422 100644 --- a/spaces_versioned_docs/version-1.15/howtos/self-hosted/oidc-configuration.md +++ b/spaces_versioned_docs/version-1.15/howtos/self-hosted/oidc-configuration.md @@ -15,11 +15,6 @@ This guide walks you through how to create and apply an authentication configuration to validate Upbound with an external identity provider. Each section focuses on a specific part of the configuration file. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. - -For details on authentication and access control across versions, see the . related platform authentication features, see the [Platform manual](../../../../platform/). -::: ## Creating the `AuthenticationConfiguration` file diff --git a/spaces_versioned_docs/version-1.15/howtos/self-hosted/proxies-config.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/proxies-config.md index 3802e4cb0..422e47088 100644 --- a/spaces_versioned_docs/version-1.15/howtos/self-hosted/proxies-config.md +++ b/spaces_versioned_docs/version-1.15/howtos/self-hosted/proxies-config.md @@ -4,11 +4,6 @@ sidebar_position: 20 description: Configure Upbound within a proxied environment --- -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments in proxied environments. Proxy configuration procedures are consistent across versions. - -For version-specific deployment considerations, see the . -::: diff --git a/spaces_versioned_docs/version-1.15/howtos/self-hosted/query-api.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/query-api.md index c112e9001..3a01165dc 100644 --- a/spaces_versioned_docs/version-1.15/howtos/self-hosted/query-api.md +++ b/spaces_versioned_docs/version-1.15/howtos/self-hosted/query-api.md @@ -11,14 +11,6 @@ aliases: -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+). Query API has evolved across versions: - -- **Cloud Spaces**: Available since v1.6 (enabled by default) -- **Self-Hosted**: Available since v1.8 (requires manual enablement) - -For details on Query API availability across versions, see the . -::: :::important diff --git a/spaces_versioned_docs/version-1.15/howtos/self-hosted/scaling-resources.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/scaling-resources.md index 7bb04d2c2..0b3a21257 100644 --- a/spaces_versioned_docs/version-1.15/howtos/self-hosted/scaling-resources.md +++ b/spaces_versioned_docs/version-1.15/howtos/self-hosted/scaling-resources.md @@ -11,11 +11,6 @@ In large workloads or control plane migration, you may performance impacting resource constraints. This guide explains how to scale vCluster and `etcd` resources for optimal performance in your self-hosted Space. -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Scaling procedures are consistent across versions. - -For version-specific resource requirements and capacity planning, see the . -::: ## Signs of resource constraints diff --git a/spaces_versioned_docs/version-1.15/howtos/self-hosted/space-observability.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/space-observability.md index 52f223f5b..e9f71c8d8 100644 --- a/spaces_versioned_docs/version-1.15/howtos/self-hosted/space-observability.md +++ b/spaces_versioned_docs/version-1.15/howtos/self-hosted/space-observability.md @@ -4,14 +4,6 @@ sidebar_position: 30 description: Configure Space-level observability --- -:::info API Version Information -This guide applies to **Space v1.6.0 and later** (Self-Hosted only). Space-level observability became GA in v1.14.0. - -- **v1.6.0-v1.13.x**: Available as alpha (flag: `features.alpha.observability.enabled=true`) -- **v1.14.0+**: GA (flag: `observability.enabled=true`) - -For details on observability evolution and related API resources, see the . control-plane observability (distinct from space-level), see the [main observability guide](../observability.md). -::: :::important This feature is GA since `v1.14.0`, requires Spaces `v1.6.0`, and is off by default. To enable, set `observability.enabled=true` (`features.alpha.observability.enabled=true` before `v1.14.0`) when installing Spaces: diff --git a/spaces_versioned_docs/version-1.15/howtos/self-hosted/spaces-management.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/spaces-management.md index 3df61c306..a9290acab 100644 --- a/spaces_versioned_docs/version-1.15/howtos/self-hosted/spaces-management.md +++ b/spaces_versioned_docs/version-1.15/howtos/self-hosted/spaces-management.md @@ -4,11 +4,6 @@ sidebar_position: 10 description: Common operations in Spaces --- -:::info API Version Information -This guide applies to all supported versions (v1.9-v1.14+) for self-hosted deployments. Spaces management procedures are consistent across versions. - -For version compatibility details, see the . -::: ## Spaces management diff --git a/spaces_versioned_docs/version-1.15/howtos/self-hosted/use-argo.md b/spaces_versioned_docs/version-1.15/howtos/self-hosted/use-argo.md index 0862feb13..eff5558db 100644 --- a/spaces_versioned_docs/version-1.15/howtos/self-hosted/use-argo.md +++ b/spaces_versioned_docs/version-1.15/howtos/self-hosted/use-argo.md @@ -10,11 +10,6 @@ aliases: --- -:::info API Version Information -This guide covers the ArgoCD Plugin, a preview feature available in all supported versions (v1.9-v1.14+) for self-hosted deployments. - -For general GitOps guidance, see [Automation and GitOps Overview](/spaces/howtos/automation-and-gitops/overview/). -::: :::important This feature is in preview and is off by default. To enable, set `features.alpha.argocdPlugin.enabled=true` when installing Spaces: diff --git a/spaces_versioned_docs/version-1.15/howtos/simulations.md b/spaces_versioned_docs/version-1.15/howtos/simulations.md index 26cb0e657..537906b8d 100644 --- a/spaces_versioned_docs/version-1.15/howtos/simulations.md +++ b/spaces_versioned_docs/version-1.15/howtos/simulations.md @@ -4,11 +4,6 @@ sidebar_position: 100 description: Use the Up CLI to mock operations before deploying to your environments. --- -:::info API Version Information -This guide covers Simulations, available in v1.10+ (GA since v1.13). version-specific availability and features, see the . - -For API specifications on the Simulations CRD, see the [Spaces API Reference](../../../reference/apis/spaces-api/). -::: :::important The Simulations feature is in private preview. more information, [reach out to Upbound][reach-out-to-upbound]. From 7ac9f64adf1eda8a4f00ab0022011b251787ee1a Mon Sep 17 00:00:00 2001 From: Rae Sharp Date: Wed, 28 Jan 2026 23:25:32 -0500 Subject: [PATCH 11/11] remove older versions references --- spaces-docs/overview/index.md | 4 +++- spaces_versioned_docs/version-1.13/overview/index.md | 5 ++++- spaces_versioned_docs/version-1.14/overview/index.md | 5 ++++- spaces_versioned_docs/version-1.15/overview/index.md | 5 ++++- 4 files changed, 15 insertions(+), 4 deletions(-) diff --git a/spaces-docs/overview/index.md b/spaces-docs/overview/index.md index b199ea0b2..d7d9497fb 100644 --- a/spaces-docs/overview/index.md +++ b/spaces-docs/overview/index.md @@ -5,7 +5,9 @@ sidebar_position: 0 # Upbound Spaces -Welcome to the Upbound Spaces documentation. This section contains comprehensive documentation for Spaces API and Spaces operations across all supported versions (v1.9 through v1.15). +Welcome to the Upbound Spaces documentation. This section contains comprehensive +documentation for Spaces API and Spaces operations across all supported +versions. ## Get Started diff --git a/spaces_versioned_docs/version-1.13/overview/index.md b/spaces_versioned_docs/version-1.13/overview/index.md index 7b79f6e44..143f02bec 100644 --- a/spaces_versioned_docs/version-1.13/overview/index.md +++ b/spaces_versioned_docs/version-1.13/overview/index.md @@ -5,7 +5,10 @@ sidebar_position: 0 # Upbound Spaces -Welcome to the Upbound Spaces documentation. This section contains comprehensive documentation for Spaces API and Spaces operations across all supported versions (v1.9 through v1.15). +Welcome to the Upbound Spaces documentation. This section contains comprehensive +documentation for Spaces API and Spaces operations across all supported +versions. + ## Get Started diff --git a/spaces_versioned_docs/version-1.14/overview/index.md b/spaces_versioned_docs/version-1.14/overview/index.md index 7b79f6e44..143f02bec 100644 --- a/spaces_versioned_docs/version-1.14/overview/index.md +++ b/spaces_versioned_docs/version-1.14/overview/index.md @@ -5,7 +5,10 @@ sidebar_position: 0 # Upbound Spaces -Welcome to the Upbound Spaces documentation. This section contains comprehensive documentation for Spaces API and Spaces operations across all supported versions (v1.9 through v1.15). +Welcome to the Upbound Spaces documentation. This section contains comprehensive +documentation for Spaces API and Spaces operations across all supported +versions. + ## Get Started diff --git a/spaces_versioned_docs/version-1.15/overview/index.md b/spaces_versioned_docs/version-1.15/overview/index.md index b199ea0b2..b982c629e 100644 --- a/spaces_versioned_docs/version-1.15/overview/index.md +++ b/spaces_versioned_docs/version-1.15/overview/index.md @@ -5,7 +5,10 @@ sidebar_position: 0 # Upbound Spaces -Welcome to the Upbound Spaces documentation. This section contains comprehensive documentation for Spaces API and Spaces operations across all supported versions (v1.9 through v1.15). +Welcome to the Upbound Spaces documentation. This section contains comprehensive +documentation for Spaces API and Spaces operations across all supported +versions. + ## Get Started