From 662d1b6056c59391d73e45dd002e1e132c497e35 Mon Sep 17 00:00:00 2001 From: Joe Lanford Date: Fri, 30 Jan 2026 00:58:09 -0500 Subject: [PATCH 1/8] feat(olm): add lifecycle-controller and lifecycle-server Add controller that watches CatalogSources and manages lifecycle-server deployments. The server serves FBC catalog content over HTTP. Includes build targets, container image updates, and RBAC manifests. Co-Authored-By: Claude Opus 4.5 --- Makefile | 10 +- cmd/lifecycle-controller/main.go | 157 +++++ cmd/lifecycle-controller/start.go | 23 + cmd/lifecycle-controller/util.go | 23 + cmd/lifecycle-server/main.go | 125 ++++ cmd/lifecycle-server/start.go | 20 + cmd/lifecycle-server/util.go | 14 + ...lm_08-lifecycle-controller.deployment.yaml | 89 +++ ...0_50_olm_08-lifecycle-controller.rbac.yaml | 77 +++ .../0000_50_olm_09-lifecycle-server.rbac.yaml | 16 + operator-lifecycle-manager.Dockerfile | 2 + pkg/lifecycle-controller/controller.go | 620 ++++++++++++++++++ pkg/lifecycle-server/fbc.go | 83 +++ pkg/lifecycle-server/server.go | 126 ++++ 14 files changed, 1384 insertions(+), 1 deletion(-) create mode 100644 cmd/lifecycle-controller/main.go create mode 100644 cmd/lifecycle-controller/start.go create mode 100644 cmd/lifecycle-controller/util.go create mode 100644 cmd/lifecycle-server/main.go create mode 100644 cmd/lifecycle-server/start.go create mode 100644 cmd/lifecycle-server/util.go create mode 100644 manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml create mode 100644 manifests/0000_50_olm_08-lifecycle-controller.rbac.yaml create mode 100644 manifests/0000_50_olm_09-lifecycle-server.rbac.yaml create mode 100644 pkg/lifecycle-controller/controller.go create mode 100644 pkg/lifecycle-server/fbc.go create mode 100644 pkg/lifecycle-server/server.go diff --git a/Makefile b/Makefile index d9739044e8..66af13bff8 100644 --- a/Makefile +++ b/Makefile @@ -31,6 +31,8 @@ COLLECT_PROFILES_CMD := $(addprefix bin/, collect-profiles) OPM := $(addprefix bin/, opm) OLM_CMDS := $(shell go list -mod=vendor $(OLM_PKG)/cmd/...) PSM_CMD := $(addprefix bin/, psm) +LIFECYCLE_CONTROLLER_CMD := $(addprefix bin/, lifecycle-controller) +LIFECYCLE_SERVER_CMD := $(addprefix bin/, lifecycle-server) REGISTRY_CMDS := $(addprefix bin/, $(shell ls staging/operator-registry/cmd | grep -v opm)) # Default image tag for build/olm-container and build/registry-container @@ -77,7 +79,7 @@ build/registry: $(MAKE) $(REGISTRY_CMDS) $(OPM) build/olm: - $(MAKE) $(PSM_CMD) $(OLM_CMDS) $(COLLECT_PROFILES_CMD) bin/copy-content + $(MAKE) $(PSM_CMD) $(OLM_CMDS) $(COLLECT_PROFILES_CMD) bin/copy-content $(LIFECYCLE_CONTROLLER_CMD) $(LIFECYCLE_SERVER_CMD) $(OPM): version_flags=-ldflags "-X '$(REGISTRY_PKG)/cmd/opm/version.gitCommit=$(GIT_COMMIT)' -X '$(REGISTRY_PKG)/cmd/opm/version.opmVersion=$(OPM_VERSION)' -X '$(REGISTRY_PKG)/cmd/opm/version.buildDate=$(BUILD_DATE)'" $(OPM): @@ -97,6 +99,12 @@ $(PSM_CMD): FORCE $(COLLECT_PROFILES_CMD): FORCE go build $(GO_BUILD_OPTS) $(GO_BUILD_TAGS) -o $(COLLECT_PROFILES_CMD) $(ROOT_PKG)/cmd/collect-profiles +$(LIFECYCLE_CONTROLLER_CMD): FORCE + go build $(GO_BUILD_OPTS) $(GO_BUILD_TAGS) -o $(LIFECYCLE_CONTROLLER_CMD) $(ROOT_PKG)/cmd/lifecycle-controller + +$(LIFECYCLE_SERVER_CMD): FORCE + go build $(GO_BUILD_OPTS) $(GO_BUILD_TAGS) -o $(LIFECYCLE_SERVER_CMD) $(ROOT_PKG)/cmd/lifecycle-server + .PHONY: cross cross: version_flags=-X '$(REGISTRY_PKG)/cmd/opm/version.gitCommit=$(GIT_COMMIT)' -X '$(REGISTRY_PKG)/cmd/opm/version.opmVersion=$(OPM_VERSION)' -X '$(REGISTRY_PKG)/cmd/opm/version.buildDate=$(BUILD_DATE)' cross: diff --git a/cmd/lifecycle-controller/main.go b/cmd/lifecycle-controller/main.go new file mode 100644 index 0000000000..e2b0924502 --- /dev/null +++ b/cmd/lifecycle-controller/main.go @@ -0,0 +1,157 @@ +package main + +import ( + "fmt" + "net/http" + "os" + "time" + + "github.com/spf13/cobra" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + _ "k8s.io/client-go/plugin/pkg/client/auth" + + operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/manager" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + + controllers "github.com/openshift/operator-framework-olm/pkg/lifecycle-controller" +) + +const ( + defaultNamespace = "openshift-operator-lifecycle-manager" + defaultMetricsPort = "0" + defaultHealthCheckPort = ":8081" + defaultPprofPort = ":6060" + leaderElectionID = "lifecycle-controller-lock" + defaultCatalogSourceSelector = "olm.operatorframework.io/lifecycle-server=true" + + // Leader election defaults per OpenShift conventions + // https://github.com/openshift/enhancements/blob/master/CONVENTIONS.md#high-availability + defaultLeaseDuration = 137 * time.Second + defaultRenewDeadline = 107 * time.Second + defaultRetryPeriod = 26 * time.Second +) + +func main() { + cmd := newStartCmd() + + if err := cmd.Execute(); err != nil { + fmt.Fprintf(os.Stderr, "encountered an error while executing the binary: %v\n", err) + os.Exit(1) + } +} + +func run(cmd *cobra.Command, args []string) error { + namespace, err := cmd.Flags().GetString("namespace") + if err != nil { + return err + } + disableLeaderElection, err := cmd.Flags().GetBool("disable-leader-election") + if err != nil { + return err + } + healthCheckAddr, err := cmd.Flags().GetString("health") + if err != nil { + return err + } + pprofAddr, err := cmd.Flags().GetString("pprof") + if err != nil { + return err + } + metricsAddr, err := cmd.Flags().GetString("metrics") + if err != nil { + return err + } + catalogSourceSelectorStr, err := cmd.Flags().GetString("catalog-source-selector") + if err != nil { + return err + } + + serverImage := os.Getenv("LIFECYCLE_SERVER_IMAGE") + if serverImage == "" { + return fmt.Errorf("LIFECYCLE_SERVER_IMAGE environment variable is required") + } + + ctrl.SetLogger(zap.New(zap.UseDevMode(true))) + setupLog := ctrl.Log.WithName("setup") + + // Parse the catalog source label selector + catalogSourceSelector, err := labels.Parse(catalogSourceSelectorStr) + if err != nil { + setupLog.Error(err, "failed to parse catalog-source-selector", "selector", catalogSourceSelectorStr) + return fmt.Errorf("invalid catalog-source-selector %q: %w", catalogSourceSelectorStr, err) + } + setupLog.Info("using catalog source selector", "selector", catalogSourceSelector.String()) + + restConfig := ctrl.GetConfigOrDie() + + // Leader election timing defaults + leaseDuration := defaultLeaseDuration + renewDeadline := defaultRenewDeadline + retryPeriod := defaultRetryPeriod + + mgr, err := ctrl.NewManager(restConfig, manager.Options{ + Scheme: setupScheme(), + Metrics: metricsserver.Options{BindAddress: metricsAddr}, + LeaderElection: !disableLeaderElection, + LeaderElectionNamespace: namespace, + LeaderElectionID: leaderElectionID, + LeaseDuration: &leaseDuration, + RenewDeadline: &renewDeadline, + RetryPeriod: &retryPeriod, + HealthProbeBindAddress: healthCheckAddr, + PprofBindAddress: pprofAddr, + LeaderElectionReleaseOnCancel: true, + Cache: cache.Options{ + ByObject: map[client.Object]cache.ByObject{ + // Watch all CatalogSources cluster-wide + &operatorsv1alpha1.CatalogSource{}: {}, + // Watch all Pods cluster-wide (for catalog pods) + &corev1.Pod{}: {}, + // Watch the lifecycle-server Deployment + &appsv1.Deployment{}: {}, + }, + }, + }) + if err != nil { + setupLog.Error(err, "failed to setup manager instance") + return err + } + + if err := (&controllers.LifecycleControllerReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("lifecycle-controller"), + Scheme: mgr.GetScheme(), + ServerImage: serverImage, + CatalogSourceSelector: catalogSourceSelector, + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "lifecycle-controller") + return err + } + + // Add health check endpoint + if err := mgr.AddHealthzCheck("healthz", func(req *http.Request) error { + return nil + }); err != nil { + setupLog.Error(err, "unable to set up health check") + return err + } + + // Set up signal handler context + ctx := ctrl.SetupSignalHandler() + + setupLog.Info("starting manager") + if err := mgr.Start(ctx); err != nil { + setupLog.Error(err, "problem running manager") + return err + } + + return nil +} diff --git a/cmd/lifecycle-controller/start.go b/cmd/lifecycle-controller/start.go new file mode 100644 index 0000000000..f2d0442e9a --- /dev/null +++ b/cmd/lifecycle-controller/start.go @@ -0,0 +1,23 @@ +package main + +import ( + "github.com/spf13/cobra" +) + +func newStartCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "start", + Short: "Start the Lifecycle Controller", + SilenceUsage: true, + RunE: run, + } + + cmd.Flags().String("namespace", defaultNamespace, "namespace where the controller runs") + cmd.Flags().String("health", defaultHealthCheckPort, "health check port") + cmd.Flags().String("pprof", defaultPprofPort, "pprof port") + cmd.Flags().String("metrics", defaultMetricsPort, "metrics port") + cmd.Flags().Bool("disable-leader-election", false, "disable leader election") + cmd.Flags().String("catalog-source-selector", defaultCatalogSourceSelector, "label selector for catalog sources to manage (e.g., 'olm.operatorframework.io/lifecycle-server=true')") + + return cmd +} diff --git a/cmd/lifecycle-controller/util.go b/cmd/lifecycle-controller/util.go new file mode 100644 index 0000000000..1a1b911394 --- /dev/null +++ b/cmd/lifecycle-controller/util.go @@ -0,0 +1,23 @@ +package main + +import ( + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + + operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" +) + +func setupScheme() *runtime.Scheme { + scheme := runtime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(appsv1.AddToScheme(scheme)) + utilruntime.Must(corev1.AddToScheme(scheme)) + utilruntime.Must(rbacv1.AddToScheme(scheme)) + utilruntime.Must(operatorsv1alpha1.AddToScheme(scheme)) + + return scheme +} diff --git a/cmd/lifecycle-server/main.go b/cmd/lifecycle-server/main.go new file mode 100644 index 0000000000..3b6c6b61d6 --- /dev/null +++ b/cmd/lifecycle-server/main.go @@ -0,0 +1,125 @@ +package main + +import ( + "context" + "fmt" + "net/http" + "os" + "os/signal" + "syscall" + "time" + + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + server "github.com/openshift/operator-framework-olm/pkg/lifecycle-server" +) + +const ( + defaultFBCPath = "/catalog/configs" + defaultListenAddr = ":8080" + defaultHealthAddr = ":8081" + shutdownTimeout = 10 * time.Second +) + +func main() { + cmd := newStartCmd() + + if err := cmd.Execute(); err != nil { + fmt.Fprintf(os.Stderr, "encountered an error while executing the binary: %v\n", err) + os.Exit(1) + } +} + +func run(cmd *cobra.Command, args []string) error { + fbcPath, err := cmd.Flags().GetString("fbc-path") + if err != nil { + return err + } + listenAddr, err := cmd.Flags().GetString("listen") + if err != nil { + return err + } + healthAddr, err := cmd.Flags().GetString("health") + if err != nil { + return err + } + + log := logrus.New() + log.SetFormatter(&logrus.JSONFormatter{}) + log.Info("starting lifecycle-server") + + // Load lifecycle data from FBC + log.WithField("path", fbcPath).Info("loading lifecycle data from FBC") + data, err := server.LoadLifecycleData(fbcPath) + if err != nil { + log.WithError(err).Warn("failed to load lifecycle data, starting with empty data") + data = make(server.LifecycleIndex) + } + log.WithFields(logrus.Fields{ + "blobCount": server.CountBlobs(data), + "versionCount": len(data), + "versions": server.ListVersions(data), + }).Info("loaded lifecycle data") + + // Create HTTP handler + handler := server.NewHandler(data, log) + + // Create health handler + healthHandler := http.NewServeMux() + healthHandler.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte("ok")) + }) + + // Create HTTP servers + apiServer := &http.Server{ + Addr: listenAddr, + Handler: handler, + } + healthServer := &http.Server{ + Addr: healthAddr, + Handler: healthHandler, + } + + // Start servers + errCh := make(chan error, 2) + go func() { + log.WithField("addr", listenAddr).Info("starting API server") + if err := apiServer.ListenAndServe(); err != nil && err != http.ErrServerClosed { + errCh <- fmt.Errorf("API server error: %w", err) + } + }() + go func() { + log.WithField("addr", healthAddr).Info("starting health server") + if err := healthServer.ListenAndServe(); err != nil && err != http.ErrServerClosed { + errCh <- fmt.Errorf("health server error: %w", err) + } + }() + + // Wait for shutdown signal + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) + + select { + case sig := <-sigCh: + log.WithField("signal", sig).Info("received shutdown signal") + case err := <-errCh: + log.WithError(err).Error("server error") + return err + } + + // Graceful shutdown + ctx, cancel := context.WithTimeout(context.Background(), shutdownTimeout) + defer cancel() + + log.Info("shutting down servers") + if err := apiServer.Shutdown(ctx); err != nil { + log.WithError(err).Error("API server shutdown error") + } + if err := healthServer.Shutdown(ctx); err != nil { + log.WithError(err).Error("health server shutdown error") + } + + return nil +} diff --git a/cmd/lifecycle-server/start.go b/cmd/lifecycle-server/start.go new file mode 100644 index 0000000000..fea72c5103 --- /dev/null +++ b/cmd/lifecycle-server/start.go @@ -0,0 +1,20 @@ +package main + +import ( + "github.com/spf13/cobra" +) + +func newStartCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "start", + Short: "Start the Lifecycle Server", + SilenceUsage: true, + RunE: run, + } + + cmd.Flags().String("fbc-path", defaultFBCPath, "path to FBC catalog data") + cmd.Flags().String("listen", defaultListenAddr, "address to listen on for HTTP API") + cmd.Flags().String("health", defaultHealthAddr, "address to listen on for health checks") + + return cmd +} diff --git a/cmd/lifecycle-server/util.go b/cmd/lifecycle-server/util.go new file mode 100644 index 0000000000..cd6410f5f2 --- /dev/null +++ b/cmd/lifecycle-server/util.go @@ -0,0 +1,14 @@ +package main + +import ( + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" +) + +func setupScheme() *runtime.Scheme { + scheme := runtime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + + return scheme +} diff --git a/manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml b/manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml new file mode 100644 index 0000000000..9156d9af30 --- /dev/null +++ b/manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml @@ -0,0 +1,89 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager + labels: + app: lifecycle-controller + annotations: + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + capability.openshift.io/name: "OperatorLifecycleManager" +spec: + strategy: + type: Recreate + replicas: 1 + selector: + matchLabels: + app: lifecycle-controller + template: + metadata: + annotations: + target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' + openshift.io/required-scc: restricted-v2 + kubectl.kubernetes.io/default-container: lifecycle-controller + labels: + app: lifecycle-controller + spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: lifecycle-controller + priorityClassName: "system-cluster-critical" + containers: + - name: lifecycle-controller + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: ["ALL"] + command: + - /bin/lifecycle-controller + args: + - start + - --catalog-source-selector=$(CATALOG_SOURCE_SELECTOR) + image: quay.io/operator-framework/olm@sha256:placeholder + imagePullPolicy: IfNotPresent + env: + - name: CATALOG_SOURCE_SELECTOR + value: "olm.operatorframework.io/lifecycle-server=true" + - name: LIFECYCLE_SERVER_IMAGE + value: quay.io/operator-framework/olm@sha256:placeholder + - name: GOMEMLIMIT + value: "5MiB" + resources: + requests: + cpu: 10m + memory: 10Mi + ports: + - containerPort: 8081 + name: health + livenessProbe: + httpGet: + path: /healthz + port: 8081 + scheme: HTTP + initialDelaySeconds: 30 + readinessProbe: + httpGet: + path: /healthz + port: 8081 + scheme: HTTP + initialDelaySeconds: 30 + terminationMessagePolicy: FallbackToLogsOnError + nodeSelector: + kubernetes.io/os: linux + node-role.kubernetes.io/control-plane: "" + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 120 + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 120 diff --git a/manifests/0000_50_olm_08-lifecycle-controller.rbac.yaml b/manifests/0000_50_olm_08-lifecycle-controller.rbac.yaml new file mode 100644 index 0000000000..4ef2408f51 --- /dev/null +++ b/manifests/0000_50_olm_08-lifecycle-controller.rbac.yaml @@ -0,0 +1,77 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager + annotations: + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + capability.openshift.io/name: "OperatorLifecycleManager" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: operator-lifecycle-manager-lifecycle-controller + annotations: + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + capability.openshift.io/name: "OperatorLifecycleManager" +rules: + # Watch CatalogSources cluster-wide + - apiGroups: ["operators.coreos.com"] + resources: ["catalogsources"] + verbs: ["get", "list", "watch"] + # Watch catalog pods cluster-wide + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] + # Manage lifecycle-server deployments + - apiGroups: ["apps"] + resources: ["deployments"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Manage lifecycle-server services + - apiGroups: [""] + resources: ["services"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Manage lifecycle-server serviceaccounts + - apiGroups: [""] + resources: ["serviceaccounts"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Manage lifecycle-server clusterrolebindings + - apiGroups: ["rbac.authorization.k8s.io"] + resources: ["clusterrolebindings"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Required to grant these permissions to lifecycle-server via CRB + - apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["create"] + - apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["create"] + # Leader election + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: operator-lifecycle-manager-lifecycle-controller + annotations: + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + capability.openshift.io/name: "OperatorLifecycleManager" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-lifecycle-manager-lifecycle-controller +subjects: + - kind: ServiceAccount + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager diff --git a/manifests/0000_50_olm_09-lifecycle-server.rbac.yaml b/manifests/0000_50_olm_09-lifecycle-server.rbac.yaml new file mode 100644 index 0000000000..5417ba31c6 --- /dev/null +++ b/manifests/0000_50_olm_09-lifecycle-server.rbac.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: operator-lifecycle-manager-lifecycle-server + annotations: + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + capability.openshift.io/name: "OperatorLifecycleManager" +rules: + # Required by kube-rbac-proxy for authn/authz + - apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["create"] + - apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["create"] diff --git a/operator-lifecycle-manager.Dockerfile b/operator-lifecycle-manager.Dockerfile index f1fe671ea4..839daf5907 100644 --- a/operator-lifecycle-manager.Dockerfile +++ b/operator-lifecycle-manager.Dockerfile @@ -40,6 +40,8 @@ COPY --from=builder /build/bin/cpb /bin/cpb COPY --from=builder /build/bin/psm /bin/psm COPY --from=builder /build/bin/copy-content /bin/copy-content COPY --from=builder /tmp/build/olmv0-tests-ext.gz /usr/bin/olmv0-tests-ext.gz +COPY --from=builder /build/bin/lifecycle-controller /bin/lifecycle-controller +COPY --from=builder /build/bin/lifecycle-server /bin/lifecycle-server # This image doesn't need to run as root user. USER 1001 diff --git a/pkg/lifecycle-controller/controller.go b/pkg/lifecycle-controller/controller.go new file mode 100644 index 0000000000..059fe58ba3 --- /dev/null +++ b/pkg/lifecycle-controller/controller.go @@ -0,0 +1,620 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + "sort" + "strings" + + "github.com/go-logr/logr" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/ptr" + + operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +const ( + catalogLabelKey = "olm.catalogSource" + fieldManager = "lifecycle-controller" + clusterRoleName = "operator-lifecycle-manager-lifecycle-server" + clusterRoleBindingName = "operator-lifecycle-manager-lifecycle-server" + lifecycleServerLabelKey = "olm.lifecycle-server" + resourceBaseName = "lifecycle-server" +) + +// LifecycleControllerReconciler reconciles CatalogSources and manages lifecycle-server resources +type LifecycleControllerReconciler struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + ServerImage string + CatalogSourceSelector labels.Selector +} + +// Reconcile watches CatalogSources and manages lifecycle-server resources per catalog +func (r *LifecycleControllerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := r.Log.WithValues("catalogSource", req.NamespacedName) + + log.Info("handling reconciliation request") + defer log.Info("finished reconciliation") + + // Get the CatalogSource + var cs operatorsv1alpha1.CatalogSource + if err := r.Get(ctx, req.NamespacedName, &cs); err != nil { + if errors.IsNotFound(err) { + // CatalogSource was deleted, cleanup resources + if err := r.cleanupResources(ctx, log, req.Namespace, req.Name); err != nil { + return ctrl.Result{}, err + } + // Also reconcile the shared CRB to remove this SA + return ctrl.Result{}, r.reconcileClusterRoleBinding(ctx, log) + } + log.Error(err, "failed to get catalog source") + return ctrl.Result{}, err + } + + // Check if CatalogSource matches our selector + if !r.CatalogSourceSelector.Matches(labels.Set(cs.Labels)) { + // CatalogSource doesn't match, cleanup any existing resources + if err := r.cleanupResources(ctx, log, cs.Namespace, cs.Name); err != nil { + return ctrl.Result{}, err + } + // Also reconcile the shared CRB to remove this SA + return ctrl.Result{}, r.reconcileClusterRoleBinding(ctx, log) + } + + // Get the catalog image ref from running pod + imageRef, nodeName, err := r.getCatalogPodInfo(ctx, &cs) + if err != nil { + log.Error(err, "failed to get catalog pod info") + return ctrl.Result{}, err + } + if imageRef == "" { + log.Info("no valid image ref for catalog source, waiting for pod") + return ctrl.Result{}, nil + } + + // Ensure all resources exist for this CatalogSource + if err := r.ensureResources(ctx, log, &cs, imageRef, nodeName); err != nil { + return ctrl.Result{}, err + } + + // Reconcile the shared ClusterRoleBinding + if err := r.reconcileClusterRoleBinding(ctx, log); err != nil { + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil +} + +// getCatalogPodInfo gets the image digest and node name from the catalog's running pod +func (r *LifecycleControllerReconciler) getCatalogPodInfo(ctx context.Context, cs *operatorsv1alpha1.CatalogSource) (string, string, error) { + var pods corev1.PodList + if err := r.List(ctx, &pods, + client.InNamespace(cs.Namespace), + client.MatchingLabels{catalogLabelKey: cs.Name}, + ); err != nil { + return "", "", err + } + + // Find a running pod with a valid digest + for i := range pods.Items { + p := &pods.Items[i] + if p.Status.Phase != corev1.PodRunning { + continue + } + digest := imageID(p) + if digest != "" { + return digest, p.Spec.NodeName, nil + } + } + + return "", "", nil +} + +// ensureResources creates or updates namespace-scoped resources for a CatalogSource +func (r *LifecycleControllerReconciler) ensureResources(ctx context.Context, log logr.Logger, cs *operatorsv1alpha1.CatalogSource, imageRef, nodeName string) error { + name := resourceName(cs.Name) + + // Apply ServiceAccount (in catalog's namespace) + sa := r.buildServiceAccount(name, cs) + if err := r.Patch(ctx, sa, client.Apply, client.FieldOwner(fieldManager), client.ForceOwnership); err != nil { + log.Error(err, "failed to apply serviceaccount") + return err + } + + // Apply Service (in catalog's namespace) + svc := r.buildService(name, cs) + if err := r.Patch(ctx, svc, client.Apply, client.FieldOwner(fieldManager), client.ForceOwnership); err != nil { + log.Error(err, "failed to apply service") + return err + } + + // Apply Deployment (in catalog's namespace) + deploy := r.buildDeployment(name, cs, imageRef, nodeName) + if err := r.Patch(ctx, deploy, client.Apply, client.FieldOwner(fieldManager), client.ForceOwnership); err != nil { + log.Error(err, "failed to apply deployment") + return err + } + + log.Info("applied resources", "name", name, "namespace", cs.Namespace, "imageRef", imageRef, "nodeName", nodeName) + return nil +} + +// reconcileClusterRoleBinding maintains a single CRB with all lifecycle-server ServiceAccounts +func (r *LifecycleControllerReconciler) reconcileClusterRoleBinding(ctx context.Context, log logr.Logger) error { + // List all matching CatalogSources + var allCatalogSources operatorsv1alpha1.CatalogSourceList + if err := r.List(ctx, &allCatalogSources); err != nil { + log.Error(err, "failed to list catalog sources for CRB reconciliation") + return err + } + + // Build subjects list from matching CatalogSources + var subjects []rbacv1.Subject + for i := range allCatalogSources.Items { + cs := &allCatalogSources.Items[i] + if !r.CatalogSourceSelector.Matches(labels.Set(cs.Labels)) { + continue + } + // Check if SA exists (only add if we've created resources for this catalog) + saName := resourceName(cs.Name) + var sa corev1.ServiceAccount + if err := r.Get(ctx, types.NamespacedName{Name: saName, Namespace: cs.Namespace}, &sa); err != nil { + if errors.IsNotFound(err) { + continue // SA doesn't exist yet, skip + } + return err + } + subjects = append(subjects, rbacv1.Subject{ + Kind: "ServiceAccount", + Name: saName, + Namespace: cs.Namespace, + }) + } + + // Sort subjects for deterministic ordering + sort.Slice(subjects, func(i, j int) bool { + if subjects[i].Namespace != subjects[j].Namespace { + return subjects[i].Namespace < subjects[j].Namespace + } + return subjects[i].Name < subjects[j].Name + }) + + // Apply the CRB + crb := &rbacv1.ClusterRoleBinding{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "rbac.authorization.k8s.io/v1", + Kind: "ClusterRoleBinding", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: clusterRoleBindingName, + Labels: map[string]string{ + lifecycleServerLabelKey: "true", + }, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: clusterRoleName, + }, + Subjects: subjects, + } + + if err := r.Patch(ctx, crb, client.Apply, client.FieldOwner(fieldManager), client.ForceOwnership); err != nil { + log.Error(err, "failed to apply clusterrolebinding") + return err + } + + log.Info("reconciled clusterrolebinding", "subjectCount", len(subjects)) + return nil +} + +// cleanupResources deletes namespace-scoped resources for a CatalogSource +func (r *LifecycleControllerReconciler) cleanupResources(ctx context.Context, log logr.Logger, csNamespace, csName string) error { + name := resourceName(csName) + log = log.WithValues("resourceName", name, "namespace", csNamespace) + + // Delete Deployment + deploy := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: csNamespace, + }, + } + if err := r.Delete(ctx, deploy); err != nil && !errors.IsNotFound(err) { + log.Error(err, "failed to delete deployment") + return err + } + + // Delete Service + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: csNamespace, + }, + } + if err := r.Delete(ctx, svc); err != nil && !errors.IsNotFound(err) { + log.Error(err, "failed to delete service") + return err + } + + // Delete ServiceAccount + sa := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: csNamespace, + }, + } + if err := r.Delete(ctx, sa); err != nil && !errors.IsNotFound(err) { + log.Error(err, "failed to delete serviceaccount") + return err + } + + log.Info("cleaned up resources") + return nil +} + +// resourceName generates a DNS-compatible name for lifecycle-server resources +func resourceName(csName string) string { + name := fmt.Sprintf("%s-%s", csName, resourceBaseName) + name = strings.ReplaceAll(name, ".", "-") + name = strings.ReplaceAll(name, "_", "-") + if len(name) > 63 { + name = name[:63] + } + return strings.ToLower(name) +} + +// buildServiceAccount creates a ServiceAccount for a lifecycle-server +func (r *LifecycleControllerReconciler) buildServiceAccount(name string, cs *operatorsv1alpha1.CatalogSource) *corev1.ServiceAccount { + return &corev1.ServiceAccount{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ServiceAccount", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: cs.Namespace, + Labels: map[string]string{ + lifecycleServerLabelKey: "true", + "catalog-name": cs.Name, + }, + }, + } +} + +// buildService creates a Service for a lifecycle-server +func (r *LifecycleControllerReconciler) buildService(name string, cs *operatorsv1alpha1.CatalogSource) *corev1.Service { + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: cs.Namespace, + Labels: map[string]string{ + lifecycleServerLabelKey: "true", + "catalog-name": cs.Name, + }, + }, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{ + lifecycleServerLabelKey: "true", + "catalog-name": cs.Name, + }, + Ports: []corev1.ServicePort{ + { + Name: "https", + Port: 8443, + TargetPort: intstr.FromString("https"), + Protocol: corev1.ProtocolTCP, + }, + }, + Type: corev1.ServiceTypeClusterIP, + }, + } +} + +// buildDeployment creates a Deployment for a lifecycle-server +func (r *LifecycleControllerReconciler) buildDeployment(name string, cs *operatorsv1alpha1.CatalogSource, imageRef, nodeName string) *appsv1.Deployment { + podLabels := map[string]string{ + lifecycleServerLabelKey: "true", + "catalog-name": cs.Name, + } + + // Determine the catalog directory inside the image + catalogDir := "/configs" // default for standard catalog images + if cs.Spec.GrpcPodConfig != nil && cs.Spec.GrpcPodConfig.ExtractContent != nil && cs.Spec.GrpcPodConfig.ExtractContent.CatalogDir != "" { + catalogDir = cs.Spec.GrpcPodConfig.ExtractContent.CatalogDir + } + + catalogMountPath := fmt.Sprintf("/catalogs/%s/%s", cs.Namespace, cs.Name) + fbcPath := fmt.Sprintf("%s%s", catalogMountPath, catalogDir) + + return &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: cs.Namespace, + Labels: podLabels, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To(int32(1)), + Strategy: appsv1.DeploymentStrategy{ + Type: appsv1.RollingUpdateDeploymentStrategyType, + RollingUpdate: &appsv1.RollingUpdateDeployment{ + MaxUnavailable: ptr.To(intstr.FromInt(1)), + MaxSurge: ptr.To(intstr.FromInt(1)), + }, + }, + Selector: &metav1.LabelSelector{ + MatchLabels: podLabels, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: podLabels, + Annotations: map[string]string{ + "target.workload.openshift.io/management": `{"effect": "PreferredDuringScheduling"}`, + "openshift.io/required-scc": "restricted-v2", + "kubectl.kubernetes.io/default-container": "lifecycle-server", + }, + }, + Spec: corev1.PodSpec{ + SecurityContext: &corev1.PodSecurityContext{ + RunAsNonRoot: ptr.To(true), + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + }, + ServiceAccountName: name, + PriorityClassName: "system-cluster-critical", + // Prefer scheduling on the same node as the catalog pod + Affinity: &corev1.Affinity{ + NodeAffinity: &corev1.NodeAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []corev1.PreferredSchedulingTerm{ + { + Weight: 100, + Preference: corev1.NodeSelectorTerm{ + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: "kubernetes.io/hostname", + Operator: corev1.NodeSelectorOpIn, + Values: []string{nodeName}, + }, + }, + }, + }, + }, + }, + }, + NodeSelector: map[string]string{ + "kubernetes.io/os": "linux", + }, + Tolerations: []corev1.Toleration{ + { + Key: "node-role.kubernetes.io/master", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoSchedule, + }, + { + Key: "node.kubernetes.io/unreachable", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoExecute, + TolerationSeconds: ptr.To(int64(120)), + }, + { + Key: "node.kubernetes.io/not-ready", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoExecute, + TolerationSeconds: ptr.To(int64(120)), + }, + }, + Containers: []corev1.Container{ + { + Name: "lifecycle-server", + Image: r.ServerImage, + ImagePullPolicy: corev1.PullIfNotPresent, + Command: []string{"/bin/lifecycle-server"}, + Args: []string{"start", fmt.Sprintf("--fbc-path=%s", fbcPath)}, + Env: []corev1.EnvVar{ + { + Name: "GOMEMLIMIT", + Value: "50MiB", + }, + }, + Ports: []corev1.ContainerPort{ + { + Name: "http", + ContainerPort: 8080, + }, + { + Name: "health", + ContainerPort: 8081, + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "catalog", + MountPath: catalogMountPath, + ReadOnly: true, + }, + }, + LivenessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/healthz", + Port: intstr.FromInt(8081), + Scheme: corev1.URISchemeHTTP, + }, + }, + InitialDelaySeconds: 30, + }, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/healthz", + Port: intstr.FromInt(8081), + Scheme: corev1.URISchemeHTTP, + }, + }, + InitialDelaySeconds: 30, + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("10m"), + corev1.ResourceMemory: resource.MustParse("50Mi"), + }, + }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: ptr.To(false), + ReadOnlyRootFilesystem: ptr.To(true), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, + TerminationMessagePolicy: corev1.TerminationMessageFallbackToLogsOnError, + }, + { + Name: "kube-rbac-proxy", + Image: "quay.io/brancz/kube-rbac-proxy:v0.18.0", + ImagePullPolicy: corev1.PullIfNotPresent, + Args: []string{ + "--upstream=http://127.0.0.1:8080/", + "--secure-listen-address=0.0.0.0:8443", + }, + Ports: []corev1.ContainerPort{ + { + Name: "https", + ContainerPort: 8443, + }, + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("10m"), + corev1.ResourceMemory: resource.MustParse("20Mi"), + }, + }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: ptr.To(false), + ReadOnlyRootFilesystem: ptr.To(true), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, + TerminationMessagePolicy: corev1.TerminationMessageFallbackToLogsOnError, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "catalog", + VolumeSource: corev1.VolumeSource{ + Image: &corev1.ImageVolumeSource{ + Reference: imageRef, + PullPolicy: corev1.PullIfNotPresent, + }, + }, + }, + }, + }, + }, + }, + } +} + +// imageID extracts digest from pod status (handles extract-content mode) +func imageID(pod *corev1.Pod) string { + if len(pod.Status.InitContainerStatuses) == 2 { + // Extract content mode: use init container [1] + return pod.Status.InitContainerStatuses[1].ImageID + } + if len(pod.Status.ContainerStatuses) > 0 { + return pod.Status.ContainerStatuses[0].ImageID + } + return "" +} + +// SetupWithManager sets up the controller with the Manager +func (r *LifecycleControllerReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&operatorsv1alpha1.CatalogSource{}). + // Watch Pods to detect catalog pod changes + Watches(&corev1.Pod{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { + pod, ok := obj.(*corev1.Pod) + if !ok { + return nil + } + // Check if this is a catalog pod + catalogName, ok := pod.Labels[catalogLabelKey] + if !ok { + return nil + } + // Enqueue the CatalogSource for reconciliation + return []reconcile.Request{ + { + NamespacedName: types.NamespacedName{ + Name: catalogName, + Namespace: pod.Namespace, + }, + }, + } + })). + // Watch lifecycle-server Deployments to detect changes/deletion + Watches(&appsv1.Deployment{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { + deploy, ok := obj.(*appsv1.Deployment) + if !ok { + return nil + } + // Only watch our deployments + if deploy.Labels[lifecycleServerLabelKey] != "true" { + return nil + } + csName := deploy.Labels["catalog-name"] + if csName == "" { + return nil + } + return []reconcile.Request{ + { + NamespacedName: types.NamespacedName{ + Name: csName, + Namespace: deploy.Namespace, + }, + }, + } + })). + Complete(r) +} diff --git a/pkg/lifecycle-server/fbc.go b/pkg/lifecycle-server/fbc.go new file mode 100644 index 0000000000..5ef0306486 --- /dev/null +++ b/pkg/lifecycle-server/fbc.go @@ -0,0 +1,83 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "context" + "encoding/json" + "os" + "regexp" + "sync" + + "github.com/operator-framework/operator-registry/alpha/declcfg" +) + +// schemaVersionRegex matches lifecycle schema versions like v1, v1alpha1, v2beta3, etc. +// Matches: v1, v1alpha1, v1beta1, v200beta300 +// Does not match: 1, v0, v1beta0 +var schemaVersionRegex = regexp.MustCompile(`^io\.openshift\.operators\.lifecycles\.(v[1-9][0-9]*(?:(?:alpha|beta)[1-9][0-9]*)?)$`) + +// LifecycleIndex maps schema version -> package name -> raw JSON blob +type LifecycleIndex map[string]map[string]json.RawMessage + +// LoadLifecycleData loads lifecycle blobs from FBC files at the given path +func LoadLifecycleData(fbcPath string) (LifecycleIndex, error) { + result := make(LifecycleIndex) + var mu sync.Mutex + + // Check if path exists + if _, err := os.Stat(fbcPath); os.IsNotExist(err) { + return result, nil + } + + root := os.DirFS(fbcPath) + err := declcfg.WalkMetasFS(context.Background(), root, func(path string, meta *declcfg.Meta, err error) error { + if err != nil { + return nil // Skip errors, continue walking + } + if meta == nil { + return nil + } + + // Check if schema matches our pattern + matches := schemaVersionRegex.FindStringSubmatch(meta.Schema) + if matches == nil { + return nil + } + schemaVersion := matches[1] // e.g., "v1alpha1" + + if meta.Package == "" { + return nil + } + + // Store in index (thread-safe) + mu.Lock() + if result[schemaVersion] == nil { + result[schemaVersion] = make(map[string]json.RawMessage) + } + result[schemaVersion][meta.Package] = meta.Blob + mu.Unlock() + + return nil + }) + + if err != nil { + return nil, err + } + + return result, nil +} diff --git a/pkg/lifecycle-server/server.go b/pkg/lifecycle-server/server.go new file mode 100644 index 0000000000..69b20b9b66 --- /dev/null +++ b/pkg/lifecycle-server/server.go @@ -0,0 +1,126 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "net/http" + "regexp" + + "github.com/sirupsen/logrus" +) + +const apiPrefix = "/api/" + +// pathRegex matches /api//lifecycles/.json +// version pattern: v[1-9][0-9]*((?:alpha|beta)[1-9][0-9]*)? +// Matches: v1, v1alpha1, v1beta1, v200beta300 +// Does not match: 1, v0, v1beta0 +var pathRegex = regexp.MustCompile(`^/api/(v[1-9][0-9]*(?:(?:alpha|beta)[1-9][0-9]*)?)/lifecycles/([^/]+)\.json$`) + +// NewHandler creates a new HTTP handler for the lifecycle API +func NewHandler(data LifecycleIndex, log *logrus.Logger) http.Handler { + mux := http.NewServeMux() + + // Handle GET /api//lifecycles/.json + mux.HandleFunc(apiPrefix, func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + return + } + + // If no lifecycle data is available, return 503 Service Unavailable + if len(data) == 0 { + log.Debug("no lifecycle data available, returning 503") + http.Error(w, "No lifecycle data available", http.StatusServiceUnavailable) + return + } + + // Parse the path + matches := pathRegex.FindStringSubmatch(r.URL.Path) + if matches == nil { + http.NotFound(w, r) + return + } + + version := matches[1] // e.g., "v1alpha1" + pkg := matches[2] // package name + + // Look up version in index + versionData, ok := data[version] + if !ok { + log.WithFields(logrus.Fields{ + "version": version, + "package": pkg, + }).Debug("version not found") + http.NotFound(w, r) + return + } + + // Look up package in version + rawJSON, ok := versionData[pkg] + if !ok { + log.WithFields(logrus.Fields{ + "version": version, + "package": pkg, + }).Debug("package not found") + http.NotFound(w, r) + return + } + + log.WithFields(logrus.Fields{ + "version": version, + "package": pkg, + }).Debug("returning lifecycle data") + + w.Header().Set("Content-Type", "application/json") + w.Write(rawJSON) + }) + + // List available versions at /api/ + mux.HandleFunc("/api", func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + return + } + + // Redirect /api to /api/ + if r.URL.Path == "/api" { + http.Redirect(w, r, "/api/", http.StatusMovedPermanently) + return + } + }) + + return mux +} + +// CountBlobs returns the total number of blobs in the index +func CountBlobs(index LifecycleIndex) int { + count := 0 + for _, packages := range index { + count += len(packages) + } + return count +} + +// ListVersions returns the list of versions available in the index +func ListVersions(index LifecycleIndex) []string { + versions := make([]string, 0, len(index)) + for v := range index { + versions = append(versions, v) + } + return versions +} From a0e797e3557a0bc3a7bb1d38a8d1bc33804fb6ca Mon Sep 17 00:00:00 2001 From: Joe Lanford Date: Fri, 30 Jan 2026 15:37:07 -0500 Subject: [PATCH 2/8] major refactoring and more OCP-isms --- cmd/lifecycle-controller/main.go | 148 +------ cmd/lifecycle-controller/start.go | 378 +++++++++++++++++- cmd/lifecycle-controller/util.go | 8 +- cmd/lifecycle-server/main.go | 110 ----- cmd/lifecycle-server/start.go | 215 +++++++++- ...lm_08-lifecycle-controller.deployment.yaml | 17 +- pkg/lifecycle-controller/controller.go | 196 +++++---- pkg/lifecycle-server/fbc.go | 7 +- pkg/lifecycle-server/server.go | 66 +-- 9 files changed, 737 insertions(+), 408 deletions(-) diff --git a/cmd/lifecycle-controller/main.go b/cmd/lifecycle-controller/main.go index e2b0924502..5cfe101a07 100644 --- a/cmd/lifecycle-controller/main.go +++ b/cmd/lifecycle-controller/main.go @@ -2,156 +2,22 @@ package main import ( "fmt" - "net/http" "os" - "time" "github.com/spf13/cobra" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" _ "k8s.io/client-go/plugin/pkg/client/auth" - - operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/cache" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log/zap" - "sigs.k8s.io/controller-runtime/pkg/manager" - metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" - - controllers "github.com/openshift/operator-framework-olm/pkg/lifecycle-controller" -) - -const ( - defaultNamespace = "openshift-operator-lifecycle-manager" - defaultMetricsPort = "0" - defaultHealthCheckPort = ":8081" - defaultPprofPort = ":6060" - leaderElectionID = "lifecycle-controller-lock" - defaultCatalogSourceSelector = "olm.operatorframework.io/lifecycle-server=true" - - // Leader election defaults per OpenShift conventions - // https://github.com/openshift/enhancements/blob/master/CONVENTIONS.md#high-availability - defaultLeaseDuration = 137 * time.Second - defaultRenewDeadline = 107 * time.Second - defaultRetryPeriod = 26 * time.Second ) func main() { - cmd := newStartCmd() - - if err := cmd.Execute(); err != nil { - fmt.Fprintf(os.Stderr, "encountered an error while executing the binary: %v\n", err) - os.Exit(1) + rootCmd := &cobra.Command{ + Use: "lifecycle-controller", + Short: "Lifecycle Controller for OLM", } -} -func run(cmd *cobra.Command, args []string) error { - namespace, err := cmd.Flags().GetString("namespace") - if err != nil { - return err - } - disableLeaderElection, err := cmd.Flags().GetBool("disable-leader-election") - if err != nil { - return err - } - healthCheckAddr, err := cmd.Flags().GetString("health") - if err != nil { - return err - } - pprofAddr, err := cmd.Flags().GetString("pprof") - if err != nil { - return err - } - metricsAddr, err := cmd.Flags().GetString("metrics") - if err != nil { - return err - } - catalogSourceSelectorStr, err := cmd.Flags().GetString("catalog-source-selector") - if err != nil { - return err - } + rootCmd.AddCommand(newStartCmd()) - serverImage := os.Getenv("LIFECYCLE_SERVER_IMAGE") - if serverImage == "" { - return fmt.Errorf("LIFECYCLE_SERVER_IMAGE environment variable is required") - } - - ctrl.SetLogger(zap.New(zap.UseDevMode(true))) - setupLog := ctrl.Log.WithName("setup") - - // Parse the catalog source label selector - catalogSourceSelector, err := labels.Parse(catalogSourceSelectorStr) - if err != nil { - setupLog.Error(err, "failed to parse catalog-source-selector", "selector", catalogSourceSelectorStr) - return fmt.Errorf("invalid catalog-source-selector %q: %w", catalogSourceSelectorStr, err) - } - setupLog.Info("using catalog source selector", "selector", catalogSourceSelector.String()) - - restConfig := ctrl.GetConfigOrDie() - - // Leader election timing defaults - leaseDuration := defaultLeaseDuration - renewDeadline := defaultRenewDeadline - retryPeriod := defaultRetryPeriod - - mgr, err := ctrl.NewManager(restConfig, manager.Options{ - Scheme: setupScheme(), - Metrics: metricsserver.Options{BindAddress: metricsAddr}, - LeaderElection: !disableLeaderElection, - LeaderElectionNamespace: namespace, - LeaderElectionID: leaderElectionID, - LeaseDuration: &leaseDuration, - RenewDeadline: &renewDeadline, - RetryPeriod: &retryPeriod, - HealthProbeBindAddress: healthCheckAddr, - PprofBindAddress: pprofAddr, - LeaderElectionReleaseOnCancel: true, - Cache: cache.Options{ - ByObject: map[client.Object]cache.ByObject{ - // Watch all CatalogSources cluster-wide - &operatorsv1alpha1.CatalogSource{}: {}, - // Watch all Pods cluster-wide (for catalog pods) - &corev1.Pod{}: {}, - // Watch the lifecycle-server Deployment - &appsv1.Deployment{}: {}, - }, - }, - }) - if err != nil { - setupLog.Error(err, "failed to setup manager instance") - return err - } - - if err := (&controllers.LifecycleControllerReconciler{ - Client: mgr.GetClient(), - Log: ctrl.Log.WithName("controllers").WithName("lifecycle-controller"), - Scheme: mgr.GetScheme(), - ServerImage: serverImage, - CatalogSourceSelector: catalogSourceSelector, - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "lifecycle-controller") - return err - } - - // Add health check endpoint - if err := mgr.AddHealthzCheck("healthz", func(req *http.Request) error { - return nil - }); err != nil { - setupLog.Error(err, "unable to set up health check") - return err - } - - // Set up signal handler context - ctx := ctrl.SetupSignalHandler() - - setupLog.Info("starting manager") - if err := mgr.Start(ctx); err != nil { - setupLog.Error(err, "problem running manager") - return err + if err := rootCmd.Execute(); err != nil { + fmt.Fprintf(os.Stderr, "encountered an error while executing the binary: %v\n", err) + os.Exit(1) } - - return nil } diff --git a/cmd/lifecycle-controller/start.go b/cmd/lifecycle-controller/start.go index f2d0442e9a..f1d3c74e81 100644 --- a/cmd/lifecycle-controller/start.go +++ b/cmd/lifecycle-controller/start.go @@ -1,7 +1,63 @@ package main import ( + "context" + "crypto/tls" + "fmt" + "net/http" + "os" + "time" + + "github.com/go-logr/logr" "github.com/spf13/cobra" + "k8s.io/klog/v2" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/types" + + configv1 "github.com/openshift/api/config/v1" + operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + metricsfilters "sigs.k8s.io/controller-runtime/pkg/metrics/filters" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/apiserver" + + controllers "github.com/openshift/operator-framework-olm/pkg/lifecycle-controller" +) + +const ( + defaultMetricsAddr = ":8443" + defaultHealthCheckAddr = "localhost:8081" + leaderElectionID = "lifecycle-controller-lock" + + // Leader election defaults per OpenShift conventions + // https://github.com/openshift/enhancements/blob/master/CONVENTIONS.md#high-availability + defaultLeaseDuration = 137 * time.Second + defaultRenewDeadline = 107 * time.Second + defaultRetryPeriod = 26 * time.Second + + // Name of the cluster-scoped APIServer resource + clusterAPIServerName = "cluster" +) + +var ( + disableLeaderElection bool + healthCheckAddr string + metricsAddr string + catalogSourceLabelSelector string + catalogSourceFieldSelector string ) func newStartCmd() *cobra.Command { @@ -12,12 +68,322 @@ func newStartCmd() *cobra.Command { RunE: run, } - cmd.Flags().String("namespace", defaultNamespace, "namespace where the controller runs") - cmd.Flags().String("health", defaultHealthCheckPort, "health check port") - cmd.Flags().String("pprof", defaultPprofPort, "pprof port") - cmd.Flags().String("metrics", defaultMetricsPort, "metrics port") - cmd.Flags().Bool("disable-leader-election", false, "disable leader election") - cmd.Flags().String("catalog-source-selector", defaultCatalogSourceSelector, "label selector for catalog sources to manage (e.g., 'olm.operatorframework.io/lifecycle-server=true')") + cmd.Flags().StringVar(&healthCheckAddr, "health", defaultHealthCheckAddr, "health check address") + cmd.Flags().StringVar(&metricsAddr, "metrics", defaultMetricsAddr, "metrics address") + cmd.Flags().BoolVar(&disableLeaderElection, "disable-leader-election", false, "disable leader election") + cmd.Flags().StringVar(&catalogSourceLabelSelector, "catalog-source-label-selector", "", "label selector for catalog sources to manage (empty means all)") + cmd.Flags().StringVar(&catalogSourceFieldSelector, "catalog-source-field-selector", "", "field selector for catalog sources to manage (empty means all)") return cmd } + +// catalogPodLabelSelector returns a label selector matching pods with olm.catalogSource label +func catalogPodLabelSelector() labels.Selector { + // This call cannot fail: the label key is valid and selection.Exists requires no values. + req, err := labels.NewRequirement("olm.catalogSource", selection.Exists, nil) + if err != nil { + // Panic on impossible error to satisfy static analysis and catch programming errors + panic(fmt.Sprintf("BUG: failed to create label requirement: %v", err)) + } + return labels.NewSelector().Add(*req) +} + +// tlsConfig holds the TLS configuration extracted from the APIServer resource +type tlsConfig struct { + minVersion uint16 + cipherSuites []uint16 + // String representations for passing to lifecycle-server + minVersionString string + cipherSuiteStrings []string +} + +// getInitialTLSConfig reads the APIServer "cluster" resource and extracts TLS settings. +// Falls back to Intermediate profile defaults if the resource doesn't exist. +func getInitialTLSConfig(ctx context.Context, c client.Client, log logr.Logger) (*tlsConfig, error) { + var apiServer configv1.APIServer + err := c.Get(ctx, types.NamespacedName{Name: clusterAPIServerName}, &apiServer) + if err != nil { + if errors.IsNotFound(err) { + log.Info("APIServer 'cluster' not found, using Intermediate TLS profile defaults") + minVersion, cipherSuites := apiserver.GetSecurityProfileConfig(nil) + return &tlsConfig{ + minVersion: minVersion, + cipherSuites: cipherSuites, + minVersionString: tlsVersionToString(minVersion), + cipherSuiteStrings: cipherSuiteIDsToNames(cipherSuites), + }, nil + } + return nil, fmt.Errorf("failed to get APIServer 'cluster': %w", err) + } + + minVersion, cipherSuites := apiserver.GetSecurityProfileConfig(apiServer.Spec.TLSSecurityProfile) + cfg := &tlsConfig{ + minVersion: minVersion, + cipherSuites: cipherSuites, + minVersionString: tlsVersionToString(minVersion), + cipherSuiteStrings: cipherSuiteIDsToNames(cipherSuites), + } + + log.Info("loaded TLS configuration from APIServer", + "profile", getTLSProfileName(apiServer.Spec.TLSSecurityProfile), + "minVersion", cfg.minVersionString, + "cipherCount", len(cfg.cipherSuites), + ) + + return cfg, nil +} + +// tlsVersionToString converts a TLS version constant to its string name +func tlsVersionToString(version uint16) string { + switch version { + case tls.VersionTLS10: + return "VersionTLS10" + case tls.VersionTLS11: + return "VersionTLS11" + case tls.VersionTLS12: + return "VersionTLS12" + case tls.VersionTLS13: + return "VersionTLS13" + default: + return "VersionTLS12" + } +} + +// cipherSuiteIDsToNames converts TLS cipher suite IDs to their IANA names +func cipherSuiteIDsToNames(ids []uint16) []string { + names := make([]string, 0, len(ids)) + for _, id := range ids { + if suite := tls.CipherSuiteName(id); suite != "" { + names = append(names, suite) + } + } + return names +} + +// getTLSProfileName returns the TLS profile name for logging +func getTLSProfileName(profile *configv1.TLSSecurityProfile) string { + if profile == nil { + return "Intermediate (default)" + } + if profile.Type == "" { + return "Intermediate (default)" + } + return string(profile.Type) +} + +func run(_ *cobra.Command, _ []string) error { + serverImage := os.Getenv("LIFECYCLE_SERVER_IMAGE") + if serverImage == "" { + return fmt.Errorf("LIFECYCLE_SERVER_IMAGE environment variable is required") + } + + namespace := os.Getenv("NAMESPACE") + if !disableLeaderElection && namespace == "" { + return fmt.Errorf("NAMESPACE environment variable is required when leader election is enabled") + } + + ctrl.SetLogger(klog.NewKlogr()) + setupLog := ctrl.Log.WithName("setup") + + version := os.Getenv("RELEASE_VERSION") + if version == "" { + version = "unknown" + } + setupLog.Info("starting lifecycle-controller", "version", version) + + // Parse the catalog source label selector + labelSelector, err := labels.Parse(catalogSourceLabelSelector) + if err != nil { + setupLog.Error(err, "failed to parse catalog-source-label-selector", "selector", catalogSourceLabelSelector) + return fmt.Errorf("invalid catalog-source-label-selector %q: %w", catalogSourceLabelSelector, err) + } + setupLog.Info("using catalog source label selector", "selector", labelSelector.String()) + + // Parse the catalog source field selector + fieldSelector, err := fields.ParseSelector(catalogSourceFieldSelector) + if err != nil { + setupLog.Error(err, "failed to parse catalog-source-field-selector", "selector", catalogSourceFieldSelector) + return fmt.Errorf("invalid catalog-source-field-selector %q: %w", catalogSourceFieldSelector, err) + } + setupLog.Info("using catalog source field selector", "selector", fieldSelector.String()) + + restConfig := ctrl.GetConfigOrDie() + scheme := setupScheme() + + // Create a temporary client to read initial TLS configuration + tempClient, err := client.New(restConfig, client.Options{Scheme: scheme}) + if err != nil { + setupLog.Error(err, "failed to create temporary client for TLS config") + return err + } + + // Get initial TLS configuration from APIServer "cluster" + ctx := context.Background() + initialTLSConfig, err := getInitialTLSConfig(ctx, tempClient, setupLog) + if err != nil { + setupLog.Error(err, "failed to get initial TLS configuration") + return err + } + + // Leader election timing defaults + leaseDuration := defaultLeaseDuration + renewDeadline := defaultRenewDeadline + retryPeriod := defaultRetryPeriod + + mgr, err := ctrl.NewManager(restConfig, manager.Options{ + Scheme: scheme, + Metrics: metricsserver.Options{ + BindAddress: metricsAddr, + SecureServing: true, + FilterProvider: metricsfilters.WithAuthenticationAndAuthorization, + TLSOpts: []func(*tls.Config){ + func(cfg *tls.Config) { + cfg.MinVersion = initialTLSConfig.minVersion + cfg.CipherSuites = initialTLSConfig.cipherSuites + }, + }, + }, + LeaderElection: !disableLeaderElection, + LeaderElectionNamespace: namespace, + LeaderElectionID: leaderElectionID, + LeaseDuration: &leaseDuration, + RenewDeadline: &renewDeadline, + RetryPeriod: &retryPeriod, + HealthProbeBindAddress: healthCheckAddr, + LeaderElectionReleaseOnCancel: true, + Cache: cache.Options{ + ByObject: map[client.Object]cache.ByObject{ + &operatorsv1alpha1.CatalogSource{}: {}, + &corev1.Pod{}: { + Label: catalogPodLabelSelector(), + }, + &appsv1.Deployment{}: { + Label: controllers.LifecycleServerLabelSelector(), + }, + &configv1.APIServer{}: {}, + }, + }, + }) + if err != nil { + setupLog.Error(err, "failed to setup manager instance") + return err + } + + if err := (&controllers.LifecycleControllerReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("lifecycle-controller"), + Scheme: mgr.GetScheme(), + ServerImage: serverImage, + CatalogSourceLabelSelector: labelSelector, + CatalogSourceFieldSelector: fieldSelector, + TLSMinVersion: initialTLSConfig.minVersionString, + TLSCipherSuites: initialTLSConfig.cipherSuiteStrings, + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "lifecycle-controller") + return err + } + + // Set up APIServer watcher to exit on TLS config change + if err := setupAPIServerWatcher(mgr, initialTLSConfig, setupLog); err != nil { + setupLog.Error(err, "failed to setup APIServer watcher") + return err + } + + // Add health check endpoint (used for both liveness and readiness probes) + if err := mgr.AddHealthzCheck("healthz", func(req *http.Request) error { + return nil + }); err != nil { + setupLog.Error(err, "unable to set up health check") + return err + } + + // Set up signal handler context + signalCtx := ctrl.SetupSignalHandler() + + setupLog.Info("starting manager") + if err := mgr.Start(signalCtx); err != nil { + setupLog.Error(err, "problem running manager") + return err + } + + return nil +} + +// apiServerWatcher watches the APIServer "cluster" resource and exits if TLS config changes +type apiServerWatcher struct { + client client.Client + log logr.Logger + initialMinVer uint16 + initialCiphers []uint16 +} + +func setupAPIServerWatcher(mgr manager.Manager, initialCfg *tlsConfig, log logr.Logger) error { + watcher := &apiServerWatcher{ + client: mgr.GetClient(), + log: log.WithName("apiserver-watcher"), + initialMinVer: initialCfg.minVersion, + initialCiphers: initialCfg.cipherSuites, + } + + // Create a controller that watches APIServer and triggers reconcile + return ctrl.NewControllerManagedBy(mgr). + Named("apiserver-tls-watcher"). + WatchesRawSource(source.Kind(mgr.GetCache(), &configv1.APIServer{}, + handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, obj *configv1.APIServer) []reconcile.Request { + if obj.Name == clusterAPIServerName { + return []reconcile.Request{{NamespacedName: types.NamespacedName{Name: clusterAPIServerName}}} + } + return nil + }), + )). + Complete(watcher) +} + +func (w *apiServerWatcher) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { + if req.Name != clusterAPIServerName { + return reconcile.Result{}, nil + } + + var apiServer configv1.APIServer + if err := w.client.Get(ctx, req.NamespacedName, &apiServer); err != nil { + if errors.IsNotFound(err) { + // APIServer deleted - check if we had a non-default config + defaultMin, defaultCiphers := apiserver.GetSecurityProfileConfig(nil) + if w.initialMinVer != defaultMin || !cipherSuitesEqual(w.initialCiphers, defaultCiphers) { + w.log.Info("APIServer 'cluster' deleted and initial config was non-default, exiting to pick up new defaults") + os.Exit(0) + } + return reconcile.Result{}, nil + } + return reconcile.Result{}, err + } + + // Get current TLS config + currentMinVer, currentCiphers := apiserver.GetSecurityProfileConfig(apiServer.Spec.TLSSecurityProfile) + + // Compare with initial config + if w.initialMinVer != currentMinVer || !cipherSuitesEqual(w.initialCiphers, currentCiphers) { + w.log.Info("TLS security profile changed, exiting to pick up new configuration", + "oldMinVersion", tlsVersionToString(w.initialMinVer), + "newMinVersion", tlsVersionToString(currentMinVer), + "oldCipherCount", len(w.initialCiphers), + "newCipherCount", len(currentCiphers), + ) + os.Exit(0) + } + + return reconcile.Result{}, nil +} + +// cipherSuitesEqual compares two cipher suite slices for equality +func cipherSuitesEqual(a, b []uint16) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} \ No newline at end of file diff --git a/cmd/lifecycle-controller/util.go b/cmd/lifecycle-controller/util.go index 1a1b911394..03e94834eb 100644 --- a/cmd/lifecycle-controller/util.go +++ b/cmd/lifecycle-controller/util.go @@ -1,23 +1,19 @@ package main import ( - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" + configv1 "github.com/openshift/api/config/v1" operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" ) func setupScheme() *runtime.Scheme { scheme := runtime.NewScheme() utilruntime.Must(clientgoscheme.AddToScheme(scheme)) - utilruntime.Must(appsv1.AddToScheme(scheme)) - utilruntime.Must(corev1.AddToScheme(scheme)) - utilruntime.Must(rbacv1.AddToScheme(scheme)) utilruntime.Must(operatorsv1alpha1.AddToScheme(scheme)) + utilruntime.Must(configv1.AddToScheme(scheme)) return scheme } diff --git a/cmd/lifecycle-server/main.go b/cmd/lifecycle-server/main.go index 3b6c6b61d6..74596d79bb 100644 --- a/cmd/lifecycle-server/main.go +++ b/cmd/lifecycle-server/main.go @@ -1,25 +1,8 @@ package main import ( - "context" "fmt" - "net/http" "os" - "os/signal" - "syscall" - "time" - - "github.com/sirupsen/logrus" - "github.com/spf13/cobra" - - server "github.com/openshift/operator-framework-olm/pkg/lifecycle-server" -) - -const ( - defaultFBCPath = "/catalog/configs" - defaultListenAddr = ":8080" - defaultHealthAddr = ":8081" - shutdownTimeout = 10 * time.Second ) func main() { @@ -30,96 +13,3 @@ func main() { os.Exit(1) } } - -func run(cmd *cobra.Command, args []string) error { - fbcPath, err := cmd.Flags().GetString("fbc-path") - if err != nil { - return err - } - listenAddr, err := cmd.Flags().GetString("listen") - if err != nil { - return err - } - healthAddr, err := cmd.Flags().GetString("health") - if err != nil { - return err - } - - log := logrus.New() - log.SetFormatter(&logrus.JSONFormatter{}) - log.Info("starting lifecycle-server") - - // Load lifecycle data from FBC - log.WithField("path", fbcPath).Info("loading lifecycle data from FBC") - data, err := server.LoadLifecycleData(fbcPath) - if err != nil { - log.WithError(err).Warn("failed to load lifecycle data, starting with empty data") - data = make(server.LifecycleIndex) - } - log.WithFields(logrus.Fields{ - "blobCount": server.CountBlobs(data), - "versionCount": len(data), - "versions": server.ListVersions(data), - }).Info("loaded lifecycle data") - - // Create HTTP handler - handler := server.NewHandler(data, log) - - // Create health handler - healthHandler := http.NewServeMux() - healthHandler.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - w.Write([]byte("ok")) - }) - - // Create HTTP servers - apiServer := &http.Server{ - Addr: listenAddr, - Handler: handler, - } - healthServer := &http.Server{ - Addr: healthAddr, - Handler: healthHandler, - } - - // Start servers - errCh := make(chan error, 2) - go func() { - log.WithField("addr", listenAddr).Info("starting API server") - if err := apiServer.ListenAndServe(); err != nil && err != http.ErrServerClosed { - errCh <- fmt.Errorf("API server error: %w", err) - } - }() - go func() { - log.WithField("addr", healthAddr).Info("starting health server") - if err := healthServer.ListenAndServe(); err != nil && err != http.ErrServerClosed { - errCh <- fmt.Errorf("health server error: %w", err) - } - }() - - // Wait for shutdown signal - sigCh := make(chan os.Signal, 1) - signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) - - select { - case sig := <-sigCh: - log.WithField("signal", sig).Info("received shutdown signal") - case err := <-errCh: - log.WithError(err).Error("server error") - return err - } - - // Graceful shutdown - ctx, cancel := context.WithTimeout(context.Background(), shutdownTimeout) - defer cancel() - - log.Info("shutting down servers") - if err := apiServer.Shutdown(ctx); err != nil { - log.WithError(err).Error("API server shutdown error") - } - if err := healthServer.Shutdown(ctx); err != nil { - log.WithError(err).Error("health server shutdown error") - } - - return nil -} diff --git a/cmd/lifecycle-server/start.go b/cmd/lifecycle-server/start.go index fea72c5103..93fc0ed95e 100644 --- a/cmd/lifecycle-server/start.go +++ b/cmd/lifecycle-server/start.go @@ -1,7 +1,47 @@ package main import ( + "context" + "crypto/tls" + "fmt" + "net/http" + "os" + "os/signal" + "syscall" + "time" + "github.com/spf13/cobra" + + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/authentication/authenticatorfactory" + "k8s.io/apiserver/pkg/authorization/authorizerfactory" + "k8s.io/apiserver/pkg/endpoints/filters" + "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + cliflag "k8s.io/component-base/cli/flag" + "k8s.io/klog/v2" + + server "github.com/openshift/operator-framework-olm/pkg/lifecycle-server" +) + +const ( + defaultFBCPath = "/catalog/configs" + defaultListenAddr = ":8443" + defaultHealthAddr = "localhost:8081" + defaultTLSCertPath = "/var/run/secrets/serving-cert/tls.crt" + defaultTLSKeyPath = "/var/run/secrets/serving-cert/tls.key" + shutdownTimeout = 10 * time.Second +) + +var ( + fbcPath string + listenAddr string + healthAddr string + tlsCertPath string + tlsKeyPath string + tlsMinVersion string + tlsCipherSuites []string ) func newStartCmd() *cobra.Command { @@ -12,9 +52,178 @@ func newStartCmd() *cobra.Command { RunE: run, } - cmd.Flags().String("fbc-path", defaultFBCPath, "path to FBC catalog data") - cmd.Flags().String("listen", defaultListenAddr, "address to listen on for HTTP API") - cmd.Flags().String("health", defaultHealthAddr, "address to listen on for health checks") + cmd.Flags().StringVar(&fbcPath, "fbc-path", defaultFBCPath, "path to FBC catalog data") + cmd.Flags().StringVar(&listenAddr, "listen", defaultListenAddr, "address to listen on for HTTPS API") + cmd.Flags().StringVar(&healthAddr, "health", defaultHealthAddr, "address to listen on for health checks") + cmd.Flags().StringVar(&tlsCertPath, "tls-cert", defaultTLSCertPath, "path to TLS certificate") + cmd.Flags().StringVar(&tlsKeyPath, "tls-key", defaultTLSKeyPath, "path to TLS private key") + cmd.Flags().StringVar(&tlsMinVersion, "tls-min-version", "", "minimum TLS version (VersionTLS12 or VersionTLS13)") + cmd.Flags().StringSliceVar(&tlsCipherSuites, "tls-cipher-suites", nil, "comma-separated list of cipher suites") return cmd } + +func run(_ *cobra.Command, _ []string) error { + log := klog.NewKlogr() + log.Info("starting lifecycle-server") + + // Parse TLS configuration + var tlsMinVersionID uint16 + var err error + if tlsMinVersion != "" { + tlsMinVersionID, err = cliflag.TLSVersion(tlsMinVersion) + if err != nil { + return fmt.Errorf("invalid tls-min-version: %w", err) + } + } + + var tlsCipherSuiteIDs []uint16 + if len(tlsCipherSuites) > 0 { + tlsCipherSuiteIDs, err = cliflag.TLSCipherSuites(tlsCipherSuites) + if err != nil { + return fmt.Errorf("invalid tls-cipher-suites: %w", err) + } + } + + // Create Kubernetes client for authn/authz + config, err := rest.InClusterConfig() + if err != nil { + return fmt.Errorf("failed to get in-cluster config: %w", err) + } + kubeClient, err := kubernetes.NewForConfig(config) + if err != nil { + return fmt.Errorf("failed to create kubernetes client: %w", err) + } + + // Create delegating authenticator (uses TokenReview) + authnConfig := authenticatorfactory.DelegatingAuthenticatorConfig{ + Anonymous: nil, // disable anonymous auth + TokenAccessReviewClient: kubeClient.AuthenticationV1(), + CacheTTL: 2 * time.Minute, + } + authenticator, _, err := authnConfig.New() + if err != nil { + return fmt.Errorf("failed to create authenticator: %w", err) + } + + // Create delegating authorizer (uses SubjectAccessReview) + authzConfig := authorizerfactory.DelegatingAuthorizerConfig{ + SubjectAccessReviewClient: kubeClient.AuthorizationV1(), + AllowCacheTTL: 5 * time.Minute, + DenyCacheTTL: 30 * time.Second, + } + authorizer, err := authzConfig.New() + if err != nil { + return fmt.Errorf("failed to create authorizer: %w", err) + } + + // Load lifecycle data from FBC + log.Info("loading lifecycle data from FBC", "path", fbcPath) + data, err := server.LoadLifecycleData(fbcPath) + if err != nil { + log.Error(err, "failed to load lifecycle data, starting with empty data") + data = make(server.LifecycleIndex) + } + log.Info("loaded lifecycle data", + "blobCount", server.CountBlobs(data), + "versionCount", len(data), + "versions", server.ListVersions(data), + ) + + // Create HTTP handler with authn/authz middleware + baseHandler := server.NewHandler(data, log) + + // Wrap with authorization + authorizedHandler := filters.WithAuthorization(baseHandler, authorizer, nil) + + // Wrap with authentication + handler := filters.WithAuthentication( + authorizedHandler, + authenticator, + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Error(w, "Unauthorized", http.StatusUnauthorized) + }), + nil, + nil, + ) + + // Wrap with request info (required by authorization filter) + requestInfoResolver := &request.RequestInfoFactory{ + APIPrefixes: sets.NewString("api"), + GrouplessAPIPrefixes: sets.NewString("api"), + } + handler = filters.WithRequestInfo(handler, requestInfoResolver) + + // Create health handler (no auth required) + healthHandler := http.NewServeMux() + healthHandler.HandleFunc("GET /healthz", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte("ok")) + }) + + // Load TLS certificate + cert, err := tls.LoadX509KeyPair(tlsCertPath, tlsKeyPath) + if err != nil { + return fmt.Errorf("failed to load TLS certificate: %w", err) + } + + // Create TLS config + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{cert}, + MinVersion: tlsMinVersionID, + CipherSuites: tlsCipherSuiteIDs, + } + + // Create servers + apiServer := &http.Server{ + Addr: listenAddr, + Handler: handler, + TLSConfig: tlsConfig, + } + healthServer := &http.Server{ + Addr: healthAddr, + Handler: healthHandler, + } + + // Start servers + errCh := make(chan error, 2) + go func() { + log.Info("starting API server (HTTPS)", "addr", listenAddr) + // Cert paths are empty since TLSConfig already has certificates loaded + if err := apiServer.ListenAndServeTLS("", ""); err != nil && err != http.ErrServerClosed { + errCh <- fmt.Errorf("API server error: %w", err) + } + }() + go func() { + log.Info("starting health server", "addr", healthAddr) + if err := healthServer.ListenAndServe(); err != nil && err != http.ErrServerClosed { + errCh <- fmt.Errorf("health server error: %w", err) + } + }() + + // Wait for shutdown signal + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) + + select { + case sig := <-sigCh: + log.Info("received shutdown signal", "signal", sig) + case err := <-errCh: + log.Error(err, "server error") + return err + } + + // Graceful shutdown + ctx, cancel := context.WithTimeout(context.Background(), shutdownTimeout) + defer cancel() + + log.Info("shutting down servers") + if err := apiServer.Shutdown(ctx); err != nil { + log.Error(err, "API server shutdown error") + } + if err := healthServer.Shutdown(ctx); err != nil { + log.Error(err, "health server shutdown error") + } + + return nil +} diff --git a/manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml b/manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml index 9156d9af30..47c7d7d8f7 100644 --- a/manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml +++ b/manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml @@ -42,14 +42,17 @@ spec: - /bin/lifecycle-controller args: - start - - --catalog-source-selector=$(CATALOG_SOURCE_SELECTOR) - image: quay.io/operator-framework/olm@sha256:placeholder + image: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607 imagePullPolicy: IfNotPresent env: - - name: CATALOG_SOURCE_SELECTOR - value: "olm.operatorframework.io/lifecycle-server=true" + - name: RELEASE_VERSION + value: "0.0.1-snapshot" + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace - name: LIFECYCLE_SERVER_IMAGE - value: quay.io/operator-framework/olm@sha256:placeholder + value: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607 - name: GOMEMLIMIT value: "5MiB" resources: @@ -62,13 +65,13 @@ spec: livenessProbe: httpGet: path: /healthz - port: 8081 + port: health scheme: HTTP initialDelaySeconds: 30 readinessProbe: httpGet: path: /healthz - port: 8081 + port: health scheme: HTTP initialDelaySeconds: 30 terminationMessagePolicy: FallbackToLogsOnError diff --git a/pkg/lifecycle-controller/controller.go b/pkg/lifecycle-controller/controller.go index 059fe58ba3..e0a6e1d62b 100644 --- a/pkg/lifecycle-controller/controller.go +++ b/pkg/lifecycle-controller/controller.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -44,21 +45,37 @@ import ( ) const ( - catalogLabelKey = "olm.catalogSource" - fieldManager = "lifecycle-controller" - clusterRoleName = "operator-lifecycle-manager-lifecycle-server" - clusterRoleBindingName = "operator-lifecycle-manager-lifecycle-server" - lifecycleServerLabelKey = "olm.lifecycle-server" - resourceBaseName = "lifecycle-server" + catalogLabelKey = "olm.catalogSource" + fieldManager = "lifecycle-controller" + clusterRoleName = "operator-lifecycle-manager-lifecycle-server" + clusterRoleBindingName = "operator-lifecycle-manager-lifecycle-server" + lifecycleServerLabelKey = "olm.lifecycle-server" + lifecycleServerLabelVal = "true" + resourceBaseName = "lifecycle-server" ) // LifecycleControllerReconciler reconciles CatalogSources and manages lifecycle-server resources type LifecycleControllerReconciler struct { client.Client - Log logr.Logger - Scheme *runtime.Scheme - ServerImage string - CatalogSourceSelector labels.Selector + Log logr.Logger + Scheme *runtime.Scheme + ServerImage string + CatalogSourceLabelSelector labels.Selector + CatalogSourceFieldSelector fields.Selector + TLSMinVersion string + TLSCipherSuites []string +} + +// matchesCatalogSource checks if a CatalogSource matches both label and field selectors +func (r *LifecycleControllerReconciler) matchesCatalogSource(cs *operatorsv1alpha1.CatalogSource) bool { + if !r.CatalogSourceLabelSelector.Matches(labels.Set(cs.Labels)) { + return false + } + fieldSet := fields.Set{ + "metadata.name": cs.Name, + "metadata.namespace": cs.Namespace, + } + return r.CatalogSourceFieldSelector.Matches(fieldSet) } // Reconcile watches CatalogSources and manages lifecycle-server resources per catalog @@ -83,8 +100,8 @@ func (r *LifecycleControllerReconciler) Reconcile(ctx context.Context, req ctrl. return ctrl.Result{}, err } - // Check if CatalogSource matches our selector - if !r.CatalogSourceSelector.Matches(labels.Set(cs.Labels)) { + // Check if CatalogSource matches our selectors + if !r.matchesCatalogSource(&cs) { // CatalogSource doesn't match, cleanup any existing resources if err := r.cleanupResources(ctx, log, cs.Namespace, cs.Name); err != nil { return ctrl.Result{}, err @@ -184,7 +201,7 @@ func (r *LifecycleControllerReconciler) reconcileClusterRoleBinding(ctx context. var subjects []rbacv1.Subject for i := range allCatalogSources.Items { cs := &allCatalogSources.Items[i] - if !r.CatalogSourceSelector.Matches(labels.Set(cs.Labels)) { + if !r.matchesCatalogSource(cs) { continue } // Check if SA exists (only add if we've created resources for this catalog) @@ -220,7 +237,7 @@ func (r *LifecycleControllerReconciler) reconcileClusterRoleBinding(ctx context. ObjectMeta: metav1.ObjectMeta{ Name: clusterRoleBindingName, Labels: map[string]string{ - lifecycleServerLabelKey: "true", + lifecycleServerLabelKey: lifecycleServerLabelVal, }, }, RoleRef: rbacv1.RoleRef{ @@ -307,7 +324,7 @@ func (r *LifecycleControllerReconciler) buildServiceAccount(name string, cs *ope Name: name, Namespace: cs.Namespace, Labels: map[string]string{ - lifecycleServerLabelKey: "true", + lifecycleServerLabelKey: lifecycleServerLabelVal, "catalog-name": cs.Name, }, }, @@ -325,20 +342,23 @@ func (r *LifecycleControllerReconciler) buildService(name string, cs *operatorsv Name: name, Namespace: cs.Namespace, Labels: map[string]string{ - lifecycleServerLabelKey: "true", + lifecycleServerLabelKey: lifecycleServerLabelVal, "catalog-name": cs.Name, }, + Annotations: map[string]string{ + "service.beta.openshift.io/serving-cert-secret-name": fmt.Sprintf("%s-tls", name), + }, }, Spec: corev1.ServiceSpec{ Selector: map[string]string{ - lifecycleServerLabelKey: "true", + lifecycleServerLabelKey: lifecycleServerLabelVal, "catalog-name": cs.Name, }, Ports: []corev1.ServicePort{ { - Name: "https", + Name: "api", Port: 8443, - TargetPort: intstr.FromString("https"), + TargetPort: intstr.FromString("api"), Protocol: corev1.ProtocolTCP, }, }, @@ -350,7 +370,7 @@ func (r *LifecycleControllerReconciler) buildService(name string, cs *operatorsv // buildDeployment creates a Deployment for a lifecycle-server func (r *LifecycleControllerReconciler) buildDeployment(name string, cs *operatorsv1alpha1.CatalogSource, imageRef, nodeName string) *appsv1.Deployment { podLabels := map[string]string{ - lifecycleServerLabelKey: "true", + lifecycleServerLabelKey: lifecycleServerLabelVal, "catalog-name": cs.Name, } @@ -360,8 +380,8 @@ func (r *LifecycleControllerReconciler) buildDeployment(name string, cs *operato catalogDir = cs.Spec.GrpcPodConfig.ExtractContent.CatalogDir } - catalogMountPath := fmt.Sprintf("/catalogs/%s/%s", cs.Namespace, cs.Name) - fbcPath := fmt.Sprintf("%s%s", catalogMountPath, catalogDir) + const catalogMountPath = "/catalog" + fbcPath := catalogMountPath + catalogDir return &appsv1.Deployment{ TypeMeta: metav1.TypeMeta{ @@ -403,25 +423,8 @@ func (r *LifecycleControllerReconciler) buildDeployment(name string, cs *operato }, ServiceAccountName: name, PriorityClassName: "system-cluster-critical", - // Prefer scheduling on the same node as the catalog pod - Affinity: &corev1.Affinity{ - NodeAffinity: &corev1.NodeAffinity{ - PreferredDuringSchedulingIgnoredDuringExecution: []corev1.PreferredSchedulingTerm{ - { - Weight: 100, - Preference: corev1.NodeSelectorTerm{ - MatchExpressions: []corev1.NodeSelectorRequirement{ - { - Key: "kubernetes.io/hostname", - Operator: corev1.NodeSelectorOpIn, - Values: []string{nodeName}, - }, - }, - }, - }, - }, - }, - }, + // Prefer scheduling on the same node as the catalog pod (only if nodeName is known) + Affinity: nodeAffinityForNode(nodeName), NodeSelector: map[string]string{ "kubernetes.io/os": "linux", }, @@ -450,7 +453,7 @@ func (r *LifecycleControllerReconciler) buildDeployment(name string, cs *operato Image: r.ServerImage, ImagePullPolicy: corev1.PullIfNotPresent, Command: []string{"/bin/lifecycle-server"}, - Args: []string{"start", fmt.Sprintf("--fbc-path=%s", fbcPath)}, + Args: r.buildLifecycleServerArgs(fbcPath), Env: []corev1.EnvVar{ { Name: "GOMEMLIMIT", @@ -459,8 +462,8 @@ func (r *LifecycleControllerReconciler) buildDeployment(name string, cs *operato }, Ports: []corev1.ContainerPort{ { - Name: "http", - ContainerPort: 8080, + Name: "api", + ContainerPort: 8443, }, { Name: "health", @@ -473,12 +476,17 @@ func (r *LifecycleControllerReconciler) buildDeployment(name string, cs *operato MountPath: catalogMountPath, ReadOnly: true, }, + { + Name: "serving-cert", + MountPath: "/var/run/secrets/serving-cert", + ReadOnly: true, + }, }, LivenessProbe: &corev1.Probe{ ProbeHandler: corev1.ProbeHandler{ HTTPGet: &corev1.HTTPGetAction{ Path: "/healthz", - Port: intstr.FromInt(8081), + Port: intstr.FromString("health"), Scheme: corev1.URISchemeHTTP, }, }, @@ -488,7 +496,7 @@ func (r *LifecycleControllerReconciler) buildDeployment(name string, cs *operato ProbeHandler: corev1.ProbeHandler{ HTTPGet: &corev1.HTTPGetAction{ Path: "/healthz", - Port: intstr.FromInt(8081), + Port: intstr.FromString("health"), Scheme: corev1.URISchemeHTTP, }, }, @@ -509,35 +517,6 @@ func (r *LifecycleControllerReconciler) buildDeployment(name string, cs *operato }, TerminationMessagePolicy: corev1.TerminationMessageFallbackToLogsOnError, }, - { - Name: "kube-rbac-proxy", - Image: "quay.io/brancz/kube-rbac-proxy:v0.18.0", - ImagePullPolicy: corev1.PullIfNotPresent, - Args: []string{ - "--upstream=http://127.0.0.1:8080/", - "--secure-listen-address=0.0.0.0:8443", - }, - Ports: []corev1.ContainerPort{ - { - Name: "https", - ContainerPort: 8443, - }, - }, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("10m"), - corev1.ResourceMemory: resource.MustParse("20Mi"), - }, - }, - SecurityContext: &corev1.SecurityContext{ - AllowPrivilegeEscalation: ptr.To(false), - ReadOnlyRootFilesystem: ptr.To(true), - Capabilities: &corev1.Capabilities{ - Drop: []corev1.Capability{"ALL"}, - }, - }, - TerminationMessagePolicy: corev1.TerminationMessageFallbackToLogsOnError, - }, }, Volumes: []corev1.Volume{ { @@ -549,6 +528,14 @@ func (r *LifecycleControllerReconciler) buildDeployment(name string, cs *operato }, }, }, + { + Name: "serving-cert", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: fmt.Sprintf("%s-tls", name), + }, + }, + }, }, }, }, @@ -556,18 +543,69 @@ func (r *LifecycleControllerReconciler) buildDeployment(name string, cs *operato } } +// buildLifecycleServerArgs builds the command-line arguments for lifecycle-server +func (r *LifecycleControllerReconciler) buildLifecycleServerArgs(fbcPath string) []string { + args := []string{ + "start", + fmt.Sprintf("--fbc-path=%s", fbcPath), + } + + if r.TLSMinVersion != "" { + args = append(args, fmt.Sprintf("--tls-min-version=%s", r.TLSMinVersion)) + } + + if len(r.TLSCipherSuites) > 0 { + args = append(args, fmt.Sprintf("--tls-cipher-suites=%s", strings.Join(r.TLSCipherSuites, ","))) + } + + return args +} + // imageID extracts digest from pod status (handles extract-content mode) func imageID(pod *corev1.Pod) string { - if len(pod.Status.InitContainerStatuses) == 2 { - // Extract content mode: use init container [1] - return pod.Status.InitContainerStatuses[1].ImageID + // In extract-content mode, look for the "extract-content" init container + for i := range pod.Status.InitContainerStatuses { + if pod.Status.InitContainerStatuses[i].Name == "extract-content" { + return pod.Status.InitContainerStatuses[i].ImageID + } } + // Fallback to the first container (standard grpc mode) if len(pod.Status.ContainerStatuses) > 0 { return pod.Status.ContainerStatuses[0].ImageID } return "" } +// nodeAffinityForNode returns a node affinity preferring the given node, or nil if nodeName is empty +func nodeAffinityForNode(nodeName string) *corev1.Affinity { + if nodeName == "" { + return nil + } + return &corev1.Affinity{ + NodeAffinity: &corev1.NodeAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []corev1.PreferredSchedulingTerm{ + { + Weight: 100, + Preference: corev1.NodeSelectorTerm{ + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: "kubernetes.io/hostname", + Operator: corev1.NodeSelectorOpIn, + Values: []string{nodeName}, + }, + }, + }, + }, + }, + }, + } +} + +// LifecycleServerLabelSelector returns a label selector matching lifecycle-server deployments +func LifecycleServerLabelSelector() labels.Selector { + return labels.SelectorFromSet(labels.Set{lifecycleServerLabelKey: lifecycleServerLabelVal}) +} + // SetupWithManager sets up the controller with the Manager func (r *LifecycleControllerReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). @@ -600,7 +638,7 @@ func (r *LifecycleControllerReconciler) SetupWithManager(mgr ctrl.Manager) error return nil } // Only watch our deployments - if deploy.Labels[lifecycleServerLabelKey] != "true" { + if deploy.Labels[lifecycleServerLabelKey] != lifecycleServerLabelVal { return nil } csName := deploy.Labels["catalog-name"] diff --git a/pkg/lifecycle-server/fbc.go b/pkg/lifecycle-server/fbc.go index 5ef0306486..c4548a0e0c 100644 --- a/pkg/lifecycle-server/fbc.go +++ b/pkg/lifecycle-server/fbc.go @@ -26,10 +26,13 @@ import ( "github.com/operator-framework/operator-registry/alpha/declcfg" ) -// schemaVersionRegex matches lifecycle schema versions like v1, v1alpha1, v2beta3, etc. +// versionPattern matches API versions like v1, v1alpha1, v2beta3 // Matches: v1, v1alpha1, v1beta1, v200beta300 // Does not match: 1, v0, v1beta0 -var schemaVersionRegex = regexp.MustCompile(`^io\.openshift\.operators\.lifecycles\.(v[1-9][0-9]*(?:(?:alpha|beta)[1-9][0-9]*)?)$`) +const versionPattern = `v[1-9][0-9]*(?:(?:alpha|beta)[1-9][0-9]*)?` + +// schemaVersionRegex matches lifecycle schema versions in FBC blobs +var schemaVersionRegex = regexp.MustCompile(`^io\.openshift\.operators\.lifecycles\.(` + versionPattern + `)$`) // LifecycleIndex maps schema version -> package name -> raw JSON blob type LifecycleIndex map[string]map[string]json.RawMessage diff --git a/pkg/lifecycle-server/server.go b/pkg/lifecycle-server/server.go index 69b20b9b66..7525cf2e8c 100644 --- a/pkg/lifecycle-server/server.go +++ b/pkg/lifecycle-server/server.go @@ -18,54 +18,30 @@ package server import ( "net/http" - "regexp" - "github.com/sirupsen/logrus" + "github.com/go-logr/logr" ) -const apiPrefix = "/api/" - -// pathRegex matches /api//lifecycles/.json -// version pattern: v[1-9][0-9]*((?:alpha|beta)[1-9][0-9]*)? -// Matches: v1, v1alpha1, v1beta1, v200beta300 -// Does not match: 1, v0, v1beta0 -var pathRegex = regexp.MustCompile(`^/api/(v[1-9][0-9]*(?:(?:alpha|beta)[1-9][0-9]*)?)/lifecycles/([^/]+)\.json$`) - // NewHandler creates a new HTTP handler for the lifecycle API -func NewHandler(data LifecycleIndex, log *logrus.Logger) http.Handler { +func NewHandler(data LifecycleIndex, log logr.Logger) http.Handler { mux := http.NewServeMux() - // Handle GET /api//lifecycles/.json - mux.HandleFunc(apiPrefix, func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) - return - } + // GET /api/{version}/lifecycles/{package} + mux.HandleFunc("GET /api/{version}/lifecycles/{package}", func(w http.ResponseWriter, r *http.Request) { + version := r.PathValue("version") + pkg := r.PathValue("package") // If no lifecycle data is available, return 503 Service Unavailable if len(data) == 0 { - log.Debug("no lifecycle data available, returning 503") + log.V(1).Info("no lifecycle data available, returning 503") http.Error(w, "No lifecycle data available", http.StatusServiceUnavailable) return } - // Parse the path - matches := pathRegex.FindStringSubmatch(r.URL.Path) - if matches == nil { - http.NotFound(w, r) - return - } - - version := matches[1] // e.g., "v1alpha1" - pkg := matches[2] // package name - // Look up version in index versionData, ok := data[version] if !ok { - log.WithFields(logrus.Fields{ - "version": version, - "package": pkg, - }).Debug("version not found") + log.V(1).Info("version not found", "version", version, "package", pkg) http.NotFound(w, r) return } @@ -73,34 +49,16 @@ func NewHandler(data LifecycleIndex, log *logrus.Logger) http.Handler { // Look up package in version rawJSON, ok := versionData[pkg] if !ok { - log.WithFields(logrus.Fields{ - "version": version, - "package": pkg, - }).Debug("package not found") + log.V(1).Info("package not found", "version", version, "package", pkg) http.NotFound(w, r) return } - log.WithFields(logrus.Fields{ - "version": version, - "package": pkg, - }).Debug("returning lifecycle data") + log.V(1).Info("returning lifecycle data", "version", version, "package", pkg) w.Header().Set("Content-Type", "application/json") - w.Write(rawJSON) - }) - - // List available versions at /api/ - mux.HandleFunc("/api", func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) - return - } - - // Redirect /api to /api/ - if r.URL.Path == "/api" { - http.Redirect(w, r, "/api/", http.StatusMovedPermanently) - return + if _, err := w.Write(rawJSON); err != nil { + log.V(1).Error(err, "failed to write response") } }) From 9df0c5e330e545fa881e641bddb45b480ec01edf Mon Sep 17 00:00:00 2001 From: Joe Lanford Date: Fri, 30 Jan 2026 17:41:08 -0500 Subject: [PATCH 3/8] feat(lifecycle): add dynamic TLS config and NetworkPolicy support - Add TLSConfigProvider for thread-safe dynamic TLS configuration updates - Use tls.Config.GetConfigForClient for metrics server to dynamically pick up TLS profile changes without requiring a controller restart - Watch APIServer "cluster" resource and trigger CatalogSource reconciliation when TLS security profile changes - Use channel source to ensure TLS provider is updated before reconcile requests are processed (avoids race condition) - Add NetworkPolicy for lifecycle-controller (manifest) and lifecycle-server (created dynamically per CatalogSource) - Add RBAC permissions for apiservers and networkpolicies - Add WebhookRetryBackoff to lifecycle-server authn/authz config - Change health endpoints from localhost to all interfaces for Kubernetes probe compatibility - Rename labels: olm.lifecycle-server=true -> app=olm-lifecycle-server, app=lifecycle-controller -> app=olm-lifecycle-controller - Add olm.lifecycle-server/catalog-name label constant Co-Authored-By: Claude Opus 4.5 --- cmd/lifecycle-controller/start.go | 181 +++++++++++++----- cmd/lifecycle-server/start.go | 22 ++- ...lm_08-lifecycle-controller.deployment.yaml | 6 +- ...08-lifecycle-controller.networkpolicy.yaml | 33 ++++ ...0_50_olm_08-lifecycle-controller.rbac.yaml | 8 + pkg/lifecycle-controller/controller.go | 146 ++++++++++---- 6 files changed, 311 insertions(+), 85 deletions(-) create mode 100644 manifests/0000_50_olm_08-lifecycle-controller.networkpolicy.yaml diff --git a/cmd/lifecycle-controller/start.go b/cmd/lifecycle-controller/start.go index f1d3c74e81..b98a1adea1 100644 --- a/cmd/lifecycle-controller/start.go +++ b/cmd/lifecycle-controller/start.go @@ -6,6 +6,7 @@ import ( "fmt" "net/http" "os" + "sync" "time" "github.com/go-logr/logr" @@ -25,6 +26,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" metricsfilters "sigs.k8s.io/controller-runtime/pkg/metrics/filters" @@ -39,7 +41,7 @@ import ( const ( defaultMetricsAddr = ":8443" - defaultHealthCheckAddr = "localhost:8081" + defaultHealthCheckAddr = ":8081" leaderElectionID = "lifecycle-controller-lock" // Leader election defaults per OpenShift conventions @@ -97,6 +99,57 @@ type tlsConfig struct { cipherSuiteStrings []string } +// TLSConfigProvider provides thread-safe access to dynamically updated TLS configuration. +// It implements controllers.TLSConfigProvider interface. +type TLSConfigProvider struct { + mu sync.RWMutex + config *tlsConfig +} + +// NewTLSConfigProvider creates a new TLSConfigProvider with the given initial config. +func NewTLSConfigProvider(initial *tlsConfig) *TLSConfigProvider { + return &TLSConfigProvider{config: initial} +} + +// Get returns the current TLS configuration. +func (p *TLSConfigProvider) Get() *tlsConfig { + p.mu.RLock() + defer p.mu.RUnlock() + return p.config +} + +// Update sets a new TLS configuration. +func (p *TLSConfigProvider) Update(cfg *tlsConfig) { + p.mu.Lock() + defer p.mu.Unlock() + p.config = cfg +} + +// GetMinVersion returns the current TLS minimum version string. +func (p *TLSConfigProvider) GetMinVersion() string { + p.mu.RLock() + defer p.mu.RUnlock() + return p.config.minVersionString +} + +// GetCipherSuites returns the current TLS cipher suites. +func (p *TLSConfigProvider) GetCipherSuites() []string { + p.mu.RLock() + defer p.mu.RUnlock() + return p.config.cipherSuiteStrings +} + +// GetConfigForClient returns a TLS config callback for dynamic TLS configuration. +func (p *TLSConfigProvider) GetConfigForClient() func(*tls.ClientHelloInfo) (*tls.Config, error) { + return func(*tls.ClientHelloInfo) (*tls.Config, error) { + cfg := p.Get() + return &tls.Config{ + MinVersion: cfg.minVersion, + CipherSuites: cfg.cipherSuites, + }, nil + } +} + // getInitialTLSConfig reads the APIServer "cluster" resource and extracts TLS settings. // Falls back to Intermediate profile defaults if the resource doesn't exist. func getInitialTLSConfig(ctx context.Context, c client.Client, log logr.Logger) (*tlsConfig, error) { @@ -104,7 +157,7 @@ func getInitialTLSConfig(ctx context.Context, c client.Client, log logr.Logger) err := c.Get(ctx, types.NamespacedName{Name: clusterAPIServerName}, &apiServer) if err != nil { if errors.IsNotFound(err) { - log.Info("APIServer 'cluster' not found, using Intermediate TLS profile defaults") + log.Info("APIServer 'cluster' not found, using TLS profile defaults") minVersion, cipherSuites := apiserver.GetSecurityProfileConfig(nil) return &tlsConfig{ minVersion: minVersion, @@ -225,6 +278,9 @@ func run(_ *cobra.Command, _ []string) error { return err } + // Create a TLS config provider for dynamic updates + tlsProvider := NewTLSConfigProvider(initialTLSConfig) + // Leader election timing defaults leaseDuration := defaultLeaseDuration renewDeadline := defaultRenewDeadline @@ -238,8 +294,8 @@ func run(_ *cobra.Command, _ []string) error { FilterProvider: metricsfilters.WithAuthenticationAndAuthorization, TLSOpts: []func(*tls.Config){ func(cfg *tls.Config) { - cfg.MinVersion = initialTLSConfig.minVersion - cfg.CipherSuites = initialTLSConfig.cipherSuites + // Use GetConfigForClient for dynamic TLS configuration + cfg.GetConfigForClient = tlsProvider.GetConfigForClient() }, }, }, @@ -269,23 +325,23 @@ func run(_ *cobra.Command, _ []string) error { return err } - if err := (&controllers.LifecycleControllerReconciler{ + // Create channel for TLS config change notifications + // The apiServerWatcher sends events to this channel after updating the TLS provider + tlsChangeChan := make(chan event.GenericEvent) + tlsChangeSource := source.Channel(tlsChangeChan, &handler.EnqueueRequestForObject{}) + + reconciler := &controllers.LifecycleControllerReconciler{ Client: mgr.GetClient(), Log: ctrl.Log.WithName("controllers").WithName("lifecycle-controller"), Scheme: mgr.GetScheme(), ServerImage: serverImage, CatalogSourceLabelSelector: labelSelector, CatalogSourceFieldSelector: fieldSelector, - TLSMinVersion: initialTLSConfig.minVersionString, - TLSCipherSuites: initialTLSConfig.cipherSuiteStrings, - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "lifecycle-controller") - return err + TLSConfigProvider: tlsProvider, } - // Set up APIServer watcher to exit on TLS config change - if err := setupAPIServerWatcher(mgr, initialTLSConfig, setupLog); err != nil { - setupLog.Error(err, "failed to setup APIServer watcher") + if err := reconciler.SetupWithManager(mgr, tlsChangeSource); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "lifecycle-controller") return err } @@ -297,11 +353,14 @@ func run(_ *cobra.Command, _ []string) error { return err } - // Set up signal handler context - signalCtx := ctrl.SetupSignalHandler() + // Set up APIServer watcher to update TLS config and trigger CatalogSource reconciliation + if err := setupAPIServerWatcher(mgr, tlsProvider, tlsChangeChan, setupLog); err != nil { + setupLog.Error(err, "failed to setup APIServer watcher") + return err + } setupLog.Info("starting manager") - if err := mgr.Start(signalCtx); err != nil { + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { setupLog.Error(err, "problem running manager") return err } @@ -309,20 +368,20 @@ func run(_ *cobra.Command, _ []string) error { return nil } -// apiServerWatcher watches the APIServer "cluster" resource and exits if TLS config changes +// apiServerWatcher watches the APIServer "cluster" resource and updates TLS config dynamically type apiServerWatcher struct { - client client.Client - log logr.Logger - initialMinVer uint16 - initialCiphers []uint16 + client client.Client + log logr.Logger + tlsProvider *TLSConfigProvider + tlsChangeChan chan<- event.GenericEvent } -func setupAPIServerWatcher(mgr manager.Manager, initialCfg *tlsConfig, log logr.Logger) error { +func setupAPIServerWatcher(mgr manager.Manager, tlsProvider *TLSConfigProvider, tlsChangeChan chan<- event.GenericEvent, log logr.Logger) error { watcher := &apiServerWatcher{ - client: mgr.GetClient(), - log: log.WithName("apiserver-watcher"), - initialMinVer: initialCfg.minVersion, - initialCiphers: initialCfg.cipherSuites, + client: mgr.GetClient(), + log: log.WithName("apiserver-watcher"), + tlsProvider: tlsProvider, + tlsChangeChan: tlsChangeChan, } // Create a controller that watches APIServer and triggers reconcile @@ -344,32 +403,64 @@ func (w *apiServerWatcher) Reconcile(ctx context.Context, req reconcile.Request) return reconcile.Result{}, nil } + var newConfig *tlsConfig + var apiServer configv1.APIServer if err := w.client.Get(ctx, req.NamespacedName, &apiServer); err != nil { if errors.IsNotFound(err) { - // APIServer deleted - check if we had a non-default config - defaultMin, defaultCiphers := apiserver.GetSecurityProfileConfig(nil) - if w.initialMinVer != defaultMin || !cipherSuitesEqual(w.initialCiphers, defaultCiphers) { - w.log.Info("APIServer 'cluster' deleted and initial config was non-default, exiting to pick up new defaults") - os.Exit(0) + // APIServer deleted - use defaults + w.log.Info("APIServer 'cluster' deleted, using TLS profile defaults") + minVersion, cipherSuites := apiserver.GetSecurityProfileConfig(nil) + newConfig = &tlsConfig{ + minVersion: minVersion, + cipherSuites: cipherSuites, + minVersionString: tlsVersionToString(minVersion), + cipherSuiteStrings: cipherSuiteIDsToNames(cipherSuites), } - return reconcile.Result{}, nil + } else { + return reconcile.Result{}, err + } + } else { + // Get current TLS config from APIServer + minVersion, cipherSuites := apiserver.GetSecurityProfileConfig(apiServer.Spec.TLSSecurityProfile) + newConfig = &tlsConfig{ + minVersion: minVersion, + cipherSuites: cipherSuites, + minVersionString: tlsVersionToString(minVersion), + cipherSuiteStrings: cipherSuiteIDsToNames(cipherSuites), } + } + + // Check if config changed + currentConfig := w.tlsProvider.Get() + if currentConfig.minVersion == newConfig.minVersion && cipherSuitesEqual(currentConfig.cipherSuites, newConfig.cipherSuites) { + // No change + return reconcile.Result{}, nil + } + + w.log.Info("TLS security profile changed, updating configuration and triggering reconciliation", + "oldMinVersion", currentConfig.minVersionString, + "newMinVersion", newConfig.minVersionString, + "oldCipherCount", len(currentConfig.cipherSuites), + "newCipherCount", len(newConfig.cipherSuites), + ) + + // Update the provider + w.tlsProvider.Update(newConfig) + + // Trigger reconciliation of all CatalogSources to update lifecycle-server deployments + var catalogSources operatorsv1alpha1.CatalogSourceList + if err := w.client.List(ctx, &catalogSources); err != nil { + w.log.Error(err, "failed to list CatalogSources for reconciliation") return reconcile.Result{}, err } - // Get current TLS config - currentMinVer, currentCiphers := apiserver.GetSecurityProfileConfig(apiServer.Spec.TLSSecurityProfile) - - // Compare with initial config - if w.initialMinVer != currentMinVer || !cipherSuitesEqual(w.initialCiphers, currentCiphers) { - w.log.Info("TLS security profile changed, exiting to pick up new configuration", - "oldMinVersion", tlsVersionToString(w.initialMinVer), - "newMinVersion", tlsVersionToString(currentMinVer), - "oldCipherCount", len(w.initialCiphers), - "newCipherCount", len(currentCiphers), - ) - os.Exit(0) + w.log.Info("triggering reconciliation for CatalogSources", "count", len(catalogSources.Items)) + + // Send events to trigger reconciliation + for i := range catalogSources.Items { + cs := &catalogSources.Items[i] + w.tlsChangeChan <- event.GenericEvent{Object: cs} } return reconcile.Result{}, nil @@ -386,4 +477,4 @@ func cipherSuitesEqual(a, b []uint16) bool { } } return true -} \ No newline at end of file +} diff --git a/cmd/lifecycle-server/start.go b/cmd/lifecycle-server/start.go index 93fc0ed95e..cf4de58aab 100644 --- a/cmd/lifecycle-server/start.go +++ b/cmd/lifecycle-server/start.go @@ -13,6 +13,7 @@ import ( "github.com/spf13/cobra" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/authentication/authenticatorfactory" "k8s.io/apiserver/pkg/authorization/authorizerfactory" "k8s.io/apiserver/pkg/endpoints/filters" @@ -28,7 +29,7 @@ import ( const ( defaultFBCPath = "/catalog/configs" defaultListenAddr = ":8443" - defaultHealthAddr = "localhost:8081" + defaultHealthAddr = ":8081" defaultTLSCertPath = "/var/run/secrets/serving-cert/tls.crt" defaultTLSKeyPath = "/var/run/secrets/serving-cert/tls.key" shutdownTimeout = 10 * time.Second @@ -97,9 +98,16 @@ func run(_ *cobra.Command, _ []string) error { // Create delegating authenticator (uses TokenReview) authnConfig := authenticatorfactory.DelegatingAuthenticatorConfig{ - Anonymous: nil, // disable anonymous auth - TokenAccessReviewClient: kubeClient.AuthenticationV1(), - CacheTTL: 2 * time.Minute, + Anonymous: nil, // disable anonymous auth + TokenAccessReviewClient: kubeClient.AuthenticationV1(), + TokenAccessReviewTimeout: 10 * time.Second, + CacheTTL: 2 * time.Minute, + WebhookRetryBackoff: &wait.Backoff{ + Duration: 500 * time.Millisecond, + Factor: 1.5, + Jitter: 0.2, + Steps: 5, + }, } authenticator, _, err := authnConfig.New() if err != nil { @@ -111,6 +119,12 @@ func run(_ *cobra.Command, _ []string) error { SubjectAccessReviewClient: kubeClient.AuthorizationV1(), AllowCacheTTL: 5 * time.Minute, DenyCacheTTL: 30 * time.Second, + WebhookRetryBackoff: &wait.Backoff{ + Duration: 500 * time.Millisecond, + Factor: 1.5, + Jitter: 0.2, + Steps: 5, + }, } authorizer, err := authzConfig.New() if err != nil { diff --git a/manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml b/manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml index 47c7d7d8f7..5f256a2dd1 100644 --- a/manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml +++ b/manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml @@ -4,7 +4,7 @@ metadata: name: lifecycle-controller namespace: openshift-operator-lifecycle-manager labels: - app: lifecycle-controller + app: olm-lifecycle-controller annotations: include.release.openshift.io/self-managed-high-availability: "true" release.openshift.io/feature-set: "TechPreviewNoUpgrade" @@ -15,7 +15,7 @@ spec: replicas: 1 selector: matchLabels: - app: lifecycle-controller + app: olm-lifecycle-controller template: metadata: annotations: @@ -23,7 +23,7 @@ spec: openshift.io/required-scc: restricted-v2 kubectl.kubernetes.io/default-container: lifecycle-controller labels: - app: lifecycle-controller + app: olm-lifecycle-controller spec: securityContext: runAsNonRoot: true diff --git a/manifests/0000_50_olm_08-lifecycle-controller.networkpolicy.yaml b/manifests/0000_50_olm_08-lifecycle-controller.networkpolicy.yaml new file mode 100644 index 0000000000..0891a2fbd9 --- /dev/null +++ b/manifests/0000_50_olm_08-lifecycle-controller.networkpolicy.yaml @@ -0,0 +1,33 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager + annotations: + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + capability.openshift.io/name: "OperatorLifecycleManager" +spec: + podSelector: + matchLabels: + app: olm-lifecycle-controller + ingress: + - ports: + - port: 8443 + protocol: TCP + egress: + - ports: + - port: 6443 + protocol: TCP + - ports: + - port: 53 + protocol: TCP + - port: 53 + protocol: UDP + - port: 5353 + protocol: TCP + - port: 5353 + protocol: UDP + policyTypes: + - Ingress + - Egress \ No newline at end of file diff --git a/manifests/0000_50_olm_08-lifecycle-controller.rbac.yaml b/manifests/0000_50_olm_08-lifecycle-controller.rbac.yaml index 4ef2408f51..f2b68ee99c 100644 --- a/manifests/0000_50_olm_08-lifecycle-controller.rbac.yaml +++ b/manifests/0000_50_olm_08-lifecycle-controller.rbac.yaml @@ -17,6 +17,10 @@ metadata: release.openshift.io/feature-set: "TechPreviewNoUpgrade" capability.openshift.io/name: "OperatorLifecycleManager" rules: + # Read APIServer for TLS security profile configuration + - apiGroups: ["config.openshift.io"] + resources: ["apiservers"] + verbs: ["get", "list", "watch"] # Watch CatalogSources cluster-wide - apiGroups: ["operators.coreos.com"] resources: ["catalogsources"] @@ -37,6 +41,10 @@ rules: - apiGroups: [""] resources: ["serviceaccounts"] verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Manage lifecycle-server networkpolicies + - apiGroups: ["networking.k8s.io"] + resources: ["networkpolicies"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] # Manage lifecycle-server clusterrolebindings - apiGroups: ["rbac.authorization.k8s.io"] resources: ["clusterrolebindings"] diff --git a/pkg/lifecycle-controller/controller.go b/pkg/lifecycle-controller/controller.go index e0a6e1d62b..5b6d8eaa5f 100644 --- a/pkg/lifecycle-controller/controller.go +++ b/pkg/lifecycle-controller/controller.go @@ -26,6 +26,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" @@ -42,18 +43,26 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" ) const ( - catalogLabelKey = "olm.catalogSource" - fieldManager = "lifecycle-controller" - clusterRoleName = "operator-lifecycle-manager-lifecycle-server" - clusterRoleBindingName = "operator-lifecycle-manager-lifecycle-server" - lifecycleServerLabelKey = "olm.lifecycle-server" - lifecycleServerLabelVal = "true" - resourceBaseName = "lifecycle-server" + catalogLabelKey = "olm.catalogSource" + catalogNameLabelKey = "olm.lifecycle-server/catalog-name" + fieldManager = "lifecycle-controller" + clusterRoleName = "operator-lifecycle-manager-lifecycle-server" + clusterRoleBindingName = "operator-lifecycle-manager-lifecycle-server" + appLabelKey = "app" + appLabelVal = "olm-lifecycle-server" + resourceBaseName = "lifecycle-server" ) +// TLSConfigProvider provides access to dynamically updated TLS configuration. +type TLSConfigProvider interface { + GetMinVersion() string + GetCipherSuites() []string +} + // LifecycleControllerReconciler reconciles CatalogSources and manages lifecycle-server resources type LifecycleControllerReconciler struct { client.Client @@ -62,8 +71,7 @@ type LifecycleControllerReconciler struct { ServerImage string CatalogSourceLabelSelector labels.Selector CatalogSourceFieldSelector fields.Selector - TLSMinVersion string - TLSCipherSuites []string + TLSConfigProvider TLSConfigProvider } // matchesCatalogSource checks if a CatalogSource matches both label and field selectors @@ -184,6 +192,13 @@ func (r *LifecycleControllerReconciler) ensureResources(ctx context.Context, log return err } + // Apply NetworkPolicy (in catalog's namespace) + np := r.buildNetworkPolicy(name, cs) + if err := r.Patch(ctx, np, client.Apply, client.FieldOwner(fieldManager), client.ForceOwnership); err != nil { + log.Error(err, "failed to apply networkpolicy") + return err + } + log.Info("applied resources", "name", name, "namespace", cs.Namespace, "imageRef", imageRef, "nodeName", nodeName) return nil } @@ -237,7 +252,7 @@ func (r *LifecycleControllerReconciler) reconcileClusterRoleBinding(ctx context. ObjectMeta: metav1.ObjectMeta{ Name: clusterRoleBindingName, Labels: map[string]string{ - lifecycleServerLabelKey: lifecycleServerLabelVal, + appLabelKey: appLabelVal, }, }, RoleRef: rbacv1.RoleRef{ @@ -324,8 +339,8 @@ func (r *LifecycleControllerReconciler) buildServiceAccount(name string, cs *ope Name: name, Namespace: cs.Namespace, Labels: map[string]string{ - lifecycleServerLabelKey: lifecycleServerLabelVal, - "catalog-name": cs.Name, + appLabelKey: appLabelVal, + catalogNameLabelKey: cs.Name, }, }, } @@ -342,8 +357,8 @@ func (r *LifecycleControllerReconciler) buildService(name string, cs *operatorsv Name: name, Namespace: cs.Namespace, Labels: map[string]string{ - lifecycleServerLabelKey: lifecycleServerLabelVal, - "catalog-name": cs.Name, + appLabelKey: appLabelVal, + catalogNameLabelKey: cs.Name, }, Annotations: map[string]string{ "service.beta.openshift.io/serving-cert-secret-name": fmt.Sprintf("%s-tls", name), @@ -351,8 +366,8 @@ func (r *LifecycleControllerReconciler) buildService(name string, cs *operatorsv }, Spec: corev1.ServiceSpec{ Selector: map[string]string{ - lifecycleServerLabelKey: lifecycleServerLabelVal, - "catalog-name": cs.Name, + appLabelKey: appLabelVal, + catalogNameLabelKey: cs.Name, }, Ports: []corev1.ServicePort{ { @@ -370,8 +385,8 @@ func (r *LifecycleControllerReconciler) buildService(name string, cs *operatorsv // buildDeployment creates a Deployment for a lifecycle-server func (r *LifecycleControllerReconciler) buildDeployment(name string, cs *operatorsv1alpha1.CatalogSource, imageRef, nodeName string) *appsv1.Deployment { podLabels := map[string]string{ - lifecycleServerLabelKey: lifecycleServerLabelVal, - "catalog-name": cs.Name, + appLabelKey: appLabelVal, + catalogNameLabelKey: cs.Name, } // Determine the catalog directory inside the image @@ -409,8 +424,8 @@ func (r *LifecycleControllerReconciler) buildDeployment(name string, cs *operato ObjectMeta: metav1.ObjectMeta{ Labels: podLabels, Annotations: map[string]string{ - "target.workload.openshift.io/management": `{"effect": "PreferredDuringScheduling"}`, - "openshift.io/required-scc": "restricted-v2", + "target.workload.openshift.io/management": `{"effect": "PreferredDuringScheduling"}`, + "openshift.io/required-scc": "restricted-v2", "kubectl.kubernetes.io/default-container": "lifecycle-server", }, }, @@ -543,6 +558,62 @@ func (r *LifecycleControllerReconciler) buildDeployment(name string, cs *operato } } +// buildNetworkPolicy creates a NetworkPolicy for a lifecycle-server +func (r *LifecycleControllerReconciler) buildNetworkPolicy(name string, cs *operatorsv1alpha1.CatalogSource) *networkingv1.NetworkPolicy { + tcp := corev1.ProtocolTCP + udp := corev1.ProtocolUDP + return &networkingv1.NetworkPolicy{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "networking.k8s.io/v1", + Kind: "NetworkPolicy", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: cs.Namespace, + Labels: map[string]string{ + appLabelKey: appLabelVal, + catalogNameLabelKey: cs.Name, + }, + }, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + appLabelKey: appLabelVal, + catalogNameLabelKey: cs.Name, + }, + }, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + {Port: ptr.To(intstr.FromInt32(8443)), Protocol: &tcp}, + }, + }, + }, + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + // API server + Ports: []networkingv1.NetworkPolicyPort{ + {Port: ptr.To(intstr.FromInt32(6443)), Protocol: &tcp}, + }, + }, + { + // DNS + Ports: []networkingv1.NetworkPolicyPort{ + {Port: ptr.To(intstr.FromInt32(53)), Protocol: &tcp}, + {Port: ptr.To(intstr.FromInt32(53)), Protocol: &udp}, + {Port: ptr.To(intstr.FromInt32(5353)), Protocol: &tcp}, + {Port: ptr.To(intstr.FromInt32(5353)), Protocol: &udp}, + }, + }, + }, + PolicyTypes: []networkingv1.PolicyType{ + networkingv1.PolicyTypeIngress, + networkingv1.PolicyTypeEgress, + }, + }, + } +} + // buildLifecycleServerArgs builds the command-line arguments for lifecycle-server func (r *LifecycleControllerReconciler) buildLifecycleServerArgs(fbcPath string) []string { args := []string{ @@ -550,12 +621,14 @@ func (r *LifecycleControllerReconciler) buildLifecycleServerArgs(fbcPath string) fmt.Sprintf("--fbc-path=%s", fbcPath), } - if r.TLSMinVersion != "" { - args = append(args, fmt.Sprintf("--tls-min-version=%s", r.TLSMinVersion)) - } + if r.TLSConfigProvider != nil { + if minVersion := r.TLSConfigProvider.GetMinVersion(); minVersion != "" { + args = append(args, fmt.Sprintf("--tls-min-version=%s", minVersion)) + } - if len(r.TLSCipherSuites) > 0 { - args = append(args, fmt.Sprintf("--tls-cipher-suites=%s", strings.Join(r.TLSCipherSuites, ","))) + if cipherSuites := r.TLSConfigProvider.GetCipherSuites(); len(cipherSuites) > 0 { + args = append(args, fmt.Sprintf("--tls-cipher-suites=%s", strings.Join(cipherSuites, ","))) + } } return args @@ -603,12 +676,13 @@ func nodeAffinityForNode(nodeName string) *corev1.Affinity { // LifecycleServerLabelSelector returns a label selector matching lifecycle-server deployments func LifecycleServerLabelSelector() labels.Selector { - return labels.SelectorFromSet(labels.Set{lifecycleServerLabelKey: lifecycleServerLabelVal}) + return labels.SelectorFromSet(labels.Set{appLabelKey: appLabelVal}) } -// SetupWithManager sets up the controller with the Manager -func (r *LifecycleControllerReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). +// SetupWithManager sets up the controller with the Manager. +// tlsChangeSource is an optional channel source that triggers reconciliation when TLS config changes. +func (r *LifecycleControllerReconciler) SetupWithManager(mgr ctrl.Manager, tlsChangeSource source.Source) error { + builder := ctrl.NewControllerManagedBy(mgr). For(&operatorsv1alpha1.CatalogSource{}). // Watch Pods to detect catalog pod changes Watches(&corev1.Pod{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { @@ -638,10 +712,10 @@ func (r *LifecycleControllerReconciler) SetupWithManager(mgr ctrl.Manager) error return nil } // Only watch our deployments - if deploy.Labels[lifecycleServerLabelKey] != lifecycleServerLabelVal { + if deploy.Labels[appLabelKey] != appLabelVal { return nil } - csName := deploy.Labels["catalog-name"] + csName := deploy.Labels[catalogNameLabelKey] if csName == "" { return nil } @@ -653,6 +727,12 @@ func (r *LifecycleControllerReconciler) SetupWithManager(mgr ctrl.Manager) error }, }, } - })). - Complete(r) + })) + + // Add TLS change source if provided + if tlsChangeSource != nil { + builder = builder.WatchesRawSource(tlsChangeSource) + } + + return builder.Complete(r) } From 9f48196e164bf054819594276a115ca2f6c0cfcb Mon Sep 17 00:00:00 2001 From: Joe Lanford Date: Fri, 30 Jan 2026 21:48:10 -0500 Subject: [PATCH 4/8] go mod tidy --- go.mod | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 4d0d25fdf5..fb1aa84845 100644 --- a/go.mod +++ b/go.mod @@ -24,8 +24,11 @@ require ( gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.34.3 k8s.io/apimachinery v0.34.3 + k8s.io/apiserver v0.34.3 k8s.io/client-go v0.34.3 k8s.io/code-generator v0.34.3 + k8s.io/component-base v0.34.3 + k8s.io/klog/v2 v2.130.1 k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 sigs.k8s.io/controller-runtime v0.22.4 @@ -222,12 +225,9 @@ require ( gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiextensions-apiserver v0.34.3 // indirect - k8s.io/apiserver v0.34.3 // indirect k8s.io/cli-runtime v0.33.2 // indirect - k8s.io/component-base v0.34.3 // indirect k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f // indirect k8s.io/klog v1.0.0 // indirect - k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kms v0.34.3 // indirect k8s.io/kube-aggregator v0.34.3 // indirect k8s.io/kubectl v0.33.2 // indirect From decf7eb889a8dd2b6fc47c998332c7ee8349d55a Mon Sep 17 00:00:00 2001 From: Joe Lanford Date: Sat, 31 Jan 2026 07:03:04 -0500 Subject: [PATCH 5/8] move tls stuff to controllers package, other refactoring --- cmd/lifecycle-controller/main.go | 4 +- cmd/lifecycle-controller/start.go | 322 +++---------------------- cmd/lifecycle-controller/util.go | 15 ++ cmd/lifecycle-server/main.go | 13 +- cmd/lifecycle-server/start.go | 277 ++++++++++----------- go.mod | 8 +- pkg/lifecycle-controller/controller.go | 18 +- pkg/lifecycle-controller/tls.go | 131 ++++++++++ pkg/lifecycle-server/fbc.go | 29 +++ pkg/lifecycle-server/server.go | 18 -- 10 files changed, 359 insertions(+), 476 deletions(-) create mode 100644 pkg/lifecycle-controller/tls.go diff --git a/cmd/lifecycle-controller/main.go b/cmd/lifecycle-controller/main.go index 5cfe101a07..04c5ce578a 100644 --- a/cmd/lifecycle-controller/main.go +++ b/cmd/lifecycle-controller/main.go @@ -11,13 +11,13 @@ import ( func main() { rootCmd := &cobra.Command{ Use: "lifecycle-controller", - Short: "Lifecycle Controller for OLM", + Short: "Lifecycle Metadata Controller for OLM", } rootCmd.AddCommand(newStartCmd()) if err := rootCmd.Execute(); err != nil { - fmt.Fprintf(os.Stderr, "encountered an error while executing the binary: %v\n", err) + fmt.Fprintf(os.Stderr, "error running lifecycle-controller: %v\n", err) os.Exit(1) } } diff --git a/cmd/lifecycle-controller/start.go b/cmd/lifecycle-controller/start.go index b98a1adea1..4aefd69ff8 100644 --- a/cmd/lifecycle-controller/start.go +++ b/cmd/lifecycle-controller/start.go @@ -6,23 +6,16 @@ import ( "fmt" "net/http" "os" - "sync" "time" - "github.com/go-logr/logr" + configv1 "github.com/openshift/api/config/v1" + operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" "github.com/spf13/cobra" - "k8s.io/klog/v2" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/selection" - "k8s.io/apimachinery/pkg/types" - - configv1 "github.com/openshift/api/config/v1" - operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" @@ -31,11 +24,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" metricsfilters "sigs.k8s.io/controller-runtime/pkg/metrics/filters" metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" - "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" - "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/apiserver" - controllers "github.com/openshift/operator-framework-olm/pkg/lifecycle-controller" ) @@ -49,9 +39,6 @@ const ( defaultLeaseDuration = 137 * time.Second defaultRenewDeadline = 107 * time.Second defaultRetryPeriod = 26 * time.Second - - // Name of the cluster-scoped APIServer resource - clusterAPIServerName = "cluster" ) var ( @@ -79,151 +66,6 @@ func newStartCmd() *cobra.Command { return cmd } -// catalogPodLabelSelector returns a label selector matching pods with olm.catalogSource label -func catalogPodLabelSelector() labels.Selector { - // This call cannot fail: the label key is valid and selection.Exists requires no values. - req, err := labels.NewRequirement("olm.catalogSource", selection.Exists, nil) - if err != nil { - // Panic on impossible error to satisfy static analysis and catch programming errors - panic(fmt.Sprintf("BUG: failed to create label requirement: %v", err)) - } - return labels.NewSelector().Add(*req) -} - -// tlsConfig holds the TLS configuration extracted from the APIServer resource -type tlsConfig struct { - minVersion uint16 - cipherSuites []uint16 - // String representations for passing to lifecycle-server - minVersionString string - cipherSuiteStrings []string -} - -// TLSConfigProvider provides thread-safe access to dynamically updated TLS configuration. -// It implements controllers.TLSConfigProvider interface. -type TLSConfigProvider struct { - mu sync.RWMutex - config *tlsConfig -} - -// NewTLSConfigProvider creates a new TLSConfigProvider with the given initial config. -func NewTLSConfigProvider(initial *tlsConfig) *TLSConfigProvider { - return &TLSConfigProvider{config: initial} -} - -// Get returns the current TLS configuration. -func (p *TLSConfigProvider) Get() *tlsConfig { - p.mu.RLock() - defer p.mu.RUnlock() - return p.config -} - -// Update sets a new TLS configuration. -func (p *TLSConfigProvider) Update(cfg *tlsConfig) { - p.mu.Lock() - defer p.mu.Unlock() - p.config = cfg -} - -// GetMinVersion returns the current TLS minimum version string. -func (p *TLSConfigProvider) GetMinVersion() string { - p.mu.RLock() - defer p.mu.RUnlock() - return p.config.minVersionString -} - -// GetCipherSuites returns the current TLS cipher suites. -func (p *TLSConfigProvider) GetCipherSuites() []string { - p.mu.RLock() - defer p.mu.RUnlock() - return p.config.cipherSuiteStrings -} - -// GetConfigForClient returns a TLS config callback for dynamic TLS configuration. -func (p *TLSConfigProvider) GetConfigForClient() func(*tls.ClientHelloInfo) (*tls.Config, error) { - return func(*tls.ClientHelloInfo) (*tls.Config, error) { - cfg := p.Get() - return &tls.Config{ - MinVersion: cfg.minVersion, - CipherSuites: cfg.cipherSuites, - }, nil - } -} - -// getInitialTLSConfig reads the APIServer "cluster" resource and extracts TLS settings. -// Falls back to Intermediate profile defaults if the resource doesn't exist. -func getInitialTLSConfig(ctx context.Context, c client.Client, log logr.Logger) (*tlsConfig, error) { - var apiServer configv1.APIServer - err := c.Get(ctx, types.NamespacedName{Name: clusterAPIServerName}, &apiServer) - if err != nil { - if errors.IsNotFound(err) { - log.Info("APIServer 'cluster' not found, using TLS profile defaults") - minVersion, cipherSuites := apiserver.GetSecurityProfileConfig(nil) - return &tlsConfig{ - minVersion: minVersion, - cipherSuites: cipherSuites, - minVersionString: tlsVersionToString(minVersion), - cipherSuiteStrings: cipherSuiteIDsToNames(cipherSuites), - }, nil - } - return nil, fmt.Errorf("failed to get APIServer 'cluster': %w", err) - } - - minVersion, cipherSuites := apiserver.GetSecurityProfileConfig(apiServer.Spec.TLSSecurityProfile) - cfg := &tlsConfig{ - minVersion: minVersion, - cipherSuites: cipherSuites, - minVersionString: tlsVersionToString(minVersion), - cipherSuiteStrings: cipherSuiteIDsToNames(cipherSuites), - } - - log.Info("loaded TLS configuration from APIServer", - "profile", getTLSProfileName(apiServer.Spec.TLSSecurityProfile), - "minVersion", cfg.minVersionString, - "cipherCount", len(cfg.cipherSuites), - ) - - return cfg, nil -} - -// tlsVersionToString converts a TLS version constant to its string name -func tlsVersionToString(version uint16) string { - switch version { - case tls.VersionTLS10: - return "VersionTLS10" - case tls.VersionTLS11: - return "VersionTLS11" - case tls.VersionTLS12: - return "VersionTLS12" - case tls.VersionTLS13: - return "VersionTLS13" - default: - return "VersionTLS12" - } -} - -// cipherSuiteIDsToNames converts TLS cipher suite IDs to their IANA names -func cipherSuiteIDsToNames(ids []uint16) []string { - names := make([]string, 0, len(ids)) - for _, id := range ids { - if suite := tls.CipherSuiteName(id); suite != "" { - names = append(names, suite) - } - } - return names -} - -// getTLSProfileName returns the TLS profile name for logging -func getTLSProfileName(profile *configv1.TLSSecurityProfile) string { - if profile == nil { - return "Intermediate (default)" - } - if profile.Type == "" { - return "Intermediate (default)" - } - return string(profile.Type) -} - func run(_ *cobra.Command, _ []string) error { serverImage := os.Getenv("LIFECYCLE_SERVER_IMAGE") if serverImage == "" { @@ -272,14 +114,10 @@ func run(_ *cobra.Command, _ []string) error { // Get initial TLS configuration from APIServer "cluster" ctx := context.Background() - initialTLSConfig, err := getInitialTLSConfig(ctx, tempClient, setupLog) - if err != nil { - setupLog.Error(err, "failed to get initial TLS configuration") - return err - } + initialTLSConfig := controllers.GetClusterTLSConfig(ctx, tempClient, setupLog) // Create a TLS config provider for dynamic updates - tlsProvider := NewTLSConfigProvider(initialTLSConfig) + tlsProvider := controllers.NewTLSConfigProvider(initialTLSConfig) // Leader election timing defaults leaseDuration := defaultLeaseDuration @@ -295,7 +133,10 @@ func run(_ *cobra.Command, _ []string) error { TLSOpts: []func(*tls.Config){ func(cfg *tls.Config) { // Use GetConfigForClient for dynamic TLS configuration - cfg.GetConfigForClient = tlsProvider.GetConfigForClient() + cfg.GetConfigForClient = func(info *tls.ClientHelloInfo) (*tls.Config, error) { + cfg := tlsProvider.Get() + return cfg.Clone(), nil + } }, }, }, @@ -330,6 +171,34 @@ func run(_ *cobra.Command, _ []string) error { tlsChangeChan := make(chan event.GenericEvent) tlsChangeSource := source.Channel(tlsChangeChan, &handler.EnqueueRequestForObject{}) + tlsProfileLog := ctrl.Log.WithName("controllers").WithName("tlsprofile-controller") + tlsProfileReconciler := controllers.ClusterTLSProfileReconciler{ + Client: mgr.GetClient(), + Log: tlsProfileLog, + TLSProvider: tlsProvider, + OnChange: func(prev, cur *tls.Config) { + // Trigger reconciliation of all CatalogSources to update lifecycle-server deployments + var catalogSources operatorsv1alpha1.CatalogSourceList + if err := mgr.GetClient().List(ctx, &catalogSources); err != nil { + tlsProfileLog.Error(err, "failed to list CatalogSources to requeue for TLS reconfiguration; CatalogSources will not receive new TLS configuration until their next reconciliation") + return + } + + tlsProfileLog.Info("requeueing CatalogSources TLS reconfiguration", "count", len(catalogSources.Items)) + + // Send events to trigger reconciliation + for i := range catalogSources.Items { + cs := &catalogSources.Items[i] + tlsChangeChan <- event.GenericEvent{Object: cs} + } + }, + } + // Set up TLSProfileReconciler to reconcile TLS profile changes. + if err := tlsProfileReconciler.SetupWithManager(mgr); err != nil { + setupLog.Error(err, "failed to setup TLSProfile watcher") + return err + } + reconciler := &controllers.LifecycleControllerReconciler{ Client: mgr.GetClient(), Log: ctrl.Log.WithName("controllers").WithName("lifecycle-controller"), @@ -353,12 +222,6 @@ func run(_ *cobra.Command, _ []string) error { return err } - // Set up APIServer watcher to update TLS config and trigger CatalogSource reconciliation - if err := setupAPIServerWatcher(mgr, tlsProvider, tlsChangeChan, setupLog); err != nil { - setupLog.Error(err, "failed to setup APIServer watcher") - return err - } - setupLog.Info("starting manager") if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { setupLog.Error(err, "problem running manager") @@ -367,114 +230,3 @@ func run(_ *cobra.Command, _ []string) error { return nil } - -// apiServerWatcher watches the APIServer "cluster" resource and updates TLS config dynamically -type apiServerWatcher struct { - client client.Client - log logr.Logger - tlsProvider *TLSConfigProvider - tlsChangeChan chan<- event.GenericEvent -} - -func setupAPIServerWatcher(mgr manager.Manager, tlsProvider *TLSConfigProvider, tlsChangeChan chan<- event.GenericEvent, log logr.Logger) error { - watcher := &apiServerWatcher{ - client: mgr.GetClient(), - log: log.WithName("apiserver-watcher"), - tlsProvider: tlsProvider, - tlsChangeChan: tlsChangeChan, - } - - // Create a controller that watches APIServer and triggers reconcile - return ctrl.NewControllerManagedBy(mgr). - Named("apiserver-tls-watcher"). - WatchesRawSource(source.Kind(mgr.GetCache(), &configv1.APIServer{}, - handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, obj *configv1.APIServer) []reconcile.Request { - if obj.Name == clusterAPIServerName { - return []reconcile.Request{{NamespacedName: types.NamespacedName{Name: clusterAPIServerName}}} - } - return nil - }), - )). - Complete(watcher) -} - -func (w *apiServerWatcher) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { - if req.Name != clusterAPIServerName { - return reconcile.Result{}, nil - } - - var newConfig *tlsConfig - - var apiServer configv1.APIServer - if err := w.client.Get(ctx, req.NamespacedName, &apiServer); err != nil { - if errors.IsNotFound(err) { - // APIServer deleted - use defaults - w.log.Info("APIServer 'cluster' deleted, using TLS profile defaults") - minVersion, cipherSuites := apiserver.GetSecurityProfileConfig(nil) - newConfig = &tlsConfig{ - minVersion: minVersion, - cipherSuites: cipherSuites, - minVersionString: tlsVersionToString(minVersion), - cipherSuiteStrings: cipherSuiteIDsToNames(cipherSuites), - } - } else { - return reconcile.Result{}, err - } - } else { - // Get current TLS config from APIServer - minVersion, cipherSuites := apiserver.GetSecurityProfileConfig(apiServer.Spec.TLSSecurityProfile) - newConfig = &tlsConfig{ - minVersion: minVersion, - cipherSuites: cipherSuites, - minVersionString: tlsVersionToString(minVersion), - cipherSuiteStrings: cipherSuiteIDsToNames(cipherSuites), - } - } - - // Check if config changed - currentConfig := w.tlsProvider.Get() - if currentConfig.minVersion == newConfig.minVersion && cipherSuitesEqual(currentConfig.cipherSuites, newConfig.cipherSuites) { - // No change - return reconcile.Result{}, nil - } - - w.log.Info("TLS security profile changed, updating configuration and triggering reconciliation", - "oldMinVersion", currentConfig.minVersionString, - "newMinVersion", newConfig.minVersionString, - "oldCipherCount", len(currentConfig.cipherSuites), - "newCipherCount", len(newConfig.cipherSuites), - ) - - // Update the provider - w.tlsProvider.Update(newConfig) - - // Trigger reconciliation of all CatalogSources to update lifecycle-server deployments - var catalogSources operatorsv1alpha1.CatalogSourceList - if err := w.client.List(ctx, &catalogSources); err != nil { - w.log.Error(err, "failed to list CatalogSources for reconciliation") - return reconcile.Result{}, err - } - - w.log.Info("triggering reconciliation for CatalogSources", "count", len(catalogSources.Items)) - - // Send events to trigger reconciliation - for i := range catalogSources.Items { - cs := &catalogSources.Items[i] - w.tlsChangeChan <- event.GenericEvent{Object: cs} - } - - return reconcile.Result{}, nil -} - -// cipherSuitesEqual compares two cipher suite slices for equality -func cipherSuitesEqual(a, b []uint16) bool { - if len(a) != len(b) { - return false - } - for i := range a { - if a[i] != b[i] { - return false - } - } - return true -} diff --git a/cmd/lifecycle-controller/util.go b/cmd/lifecycle-controller/util.go index 03e94834eb..14f0bf76da 100644 --- a/cmd/lifecycle-controller/util.go +++ b/cmd/lifecycle-controller/util.go @@ -1,7 +1,11 @@ package main import ( + "fmt" + + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/selection" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" @@ -17,3 +21,14 @@ func setupScheme() *runtime.Scheme { return scheme } + +// catalogPodLabelSelector returns a label selector matching pods with olm.catalogSource label +func catalogPodLabelSelector() labels.Selector { + // This call cannot fail: the label key is valid and selection.Exists requires no values. + req, err := labels.NewRequirement("olm.catalogSource", selection.Exists, nil) + if err != nil { + // Panic on impossible error to satisfy static analysis and catch programming errors + panic(fmt.Sprintf("BUG: failed to create label requirement: %v", err)) + } + return labels.NewSelector().Add(*req) +} diff --git a/cmd/lifecycle-server/main.go b/cmd/lifecycle-server/main.go index 74596d79bb..56584328e9 100644 --- a/cmd/lifecycle-server/main.go +++ b/cmd/lifecycle-server/main.go @@ -3,13 +3,20 @@ package main import ( "fmt" "os" + + "github.com/spf13/cobra" ) func main() { - cmd := newStartCmd() + rootCmd := &cobra.Command{ + Use: "lifecycle-server", + Short: "Lifecycle Metadata Server for OLM", + } + + rootCmd.AddCommand(newStartCmd()) - if err := cmd.Execute(); err != nil { - fmt.Fprintf(os.Stderr, "encountered an error while executing the binary: %v\n", err) + if err := rootCmd.Execute(); err != nil { + fmt.Fprintf(os.Stderr, "error running lifecycle-server: %v\n", err) os.Exit(1) } } diff --git a/cmd/lifecycle-server/start.go b/cmd/lifecycle-server/start.go index cf4de58aab..69b3f22ce2 100644 --- a/cmd/lifecycle-server/start.go +++ b/cmd/lifecycle-server/start.go @@ -3,24 +3,18 @@ package main import ( "context" "crypto/tls" + "errors" "fmt" "net/http" - "os" - "os/signal" - "syscall" "time" + "github.com/openshift/library-go/pkg/crypto" "github.com/spf13/cobra" - - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apiserver/pkg/authentication/authenticatorfactory" - "k8s.io/apiserver/pkg/authorization/authorizerfactory" - "k8s.io/apiserver/pkg/endpoints/filters" - "k8s.io/apiserver/pkg/endpoints/request" - "k8s.io/client-go/kubernetes" + "golang.org/x/sync/errgroup" "k8s.io/client-go/rest" - cliflag "k8s.io/component-base/cli/flag" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/metrics/filters" + "k8s.io/klog/v2" server "github.com/openshift/operator-framework-olm/pkg/lifecycle-server" @@ -36,13 +30,13 @@ const ( ) var ( - fbcPath string - listenAddr string - healthAddr string - tlsCertPath string - tlsKeyPath string - tlsMinVersion string - tlsCipherSuites []string + fbcPath string + listenAddr string + healthAddr string + tlsCertPath string + tlsKeyPath string + tlsMinVersionStr string + tlsCipherSuiteStrs []string ) func newStartCmd() *cobra.Command { @@ -58,77 +52,67 @@ func newStartCmd() *cobra.Command { cmd.Flags().StringVar(&healthAddr, "health", defaultHealthAddr, "address to listen on for health checks") cmd.Flags().StringVar(&tlsCertPath, "tls-cert", defaultTLSCertPath, "path to TLS certificate") cmd.Flags().StringVar(&tlsKeyPath, "tls-key", defaultTLSKeyPath, "path to TLS private key") - cmd.Flags().StringVar(&tlsMinVersion, "tls-min-version", "", "minimum TLS version (VersionTLS12 or VersionTLS13)") - cmd.Flags().StringSliceVar(&tlsCipherSuites, "tls-cipher-suites", nil, "comma-separated list of cipher suites") + cmd.Flags().StringVar(&tlsMinVersionStr, "tls-min-version", "", "minimum TLS version") + cmd.Flags().StringSliceVar(&tlsCipherSuiteStrs, "tls-cipher-suites", nil, "comma-separated list of cipher suites") return cmd } -func run(_ *cobra.Command, _ []string) error { - log := klog.NewKlogr() - log.Info("starting lifecycle-server") +func parseTLSFlags(minVersionStr string, cipherSuiteStrs []string) (*tls.Config, error) { + cert, err := tls.LoadX509KeyPair(tlsCertPath, tlsKeyPath) + if err != nil { + return nil, fmt.Errorf("failed to load TLS certificate: %w", err) + } - // Parse TLS configuration - var tlsMinVersionID uint16 - var err error - if tlsMinVersion != "" { - tlsMinVersionID, err = cliflag.TLSVersion(tlsMinVersion) - if err != nil { - return fmt.Errorf("invalid tls-min-version: %w", err) - } + minVersion, err := crypto.TLSVersion(minVersionStr) + if err != nil { + return nil, fmt.Errorf("invalid TLS minimum version: %s", minVersionStr) } - var tlsCipherSuiteIDs []uint16 - if len(tlsCipherSuites) > 0 { - tlsCipherSuiteIDs, err = cliflag.TLSCipherSuites(tlsCipherSuites) + var ( + cipherSuites []uint16 + cipherSuiteErrs []error + ) + for _, tlsCipherSuiteStr := range cipherSuiteStrs { + tlsCipherSuite, err := crypto.CipherSuite(tlsCipherSuiteStr) if err != nil { - return fmt.Errorf("invalid tls-cipher-suites: %w", err) + cipherSuiteErrs = append(cipherSuiteErrs, err) + } else { + cipherSuites = append(cipherSuites, tlsCipherSuite) } } - - // Create Kubernetes client for authn/authz - config, err := rest.InClusterConfig() - if err != nil { - return fmt.Errorf("failed to get in-cluster config: %w", err) + if len(cipherSuiteErrs) != 0 { + return nil, fmt.Errorf("invalid TLS cipher suites: %v", errors.Join(cipherSuiteErrs...)) } - kubeClient, err := kubernetes.NewForConfig(config) + + return &tls.Config{ + Certificates: []tls.Certificate{cert}, + MinVersion: minVersion, + CipherSuites: cipherSuites, + }, nil +} + +func run(_ *cobra.Command, _ []string) error { + log := klog.NewKlogr() + log.Info("starting lifecycle-server") + + tlsConfig, err := parseTLSFlags(tlsMinVersionStr, tlsCipherSuiteStrs) if err != nil { - return fmt.Errorf("failed to create kubernetes client: %w", err) + return fmt.Errorf("failed to parse tls flags: %w", err) } - // Create delegating authenticator (uses TokenReview) - authnConfig := authenticatorfactory.DelegatingAuthenticatorConfig{ - Anonymous: nil, // disable anonymous auth - TokenAccessReviewClient: kubeClient.AuthenticationV1(), - TokenAccessReviewTimeout: 10 * time.Second, - CacheTTL: 2 * time.Minute, - WebhookRetryBackoff: &wait.Backoff{ - Duration: 500 * time.Millisecond, - Factor: 1.5, - Jitter: 0.2, - Steps: 5, - }, - } - authenticator, _, err := authnConfig.New() + // Create Kubernetes client for authn/authz + restCfg := ctrl.GetConfigOrDie() + httpClient, err := rest.HTTPClientFor(restCfg) if err != nil { - return fmt.Errorf("failed to create authenticator: %w", err) + log.Error(err, "failed to create http client") + return err } - // Create delegating authorizer (uses SubjectAccessReview) - authzConfig := authorizerfactory.DelegatingAuthorizerConfig{ - SubjectAccessReviewClient: kubeClient.AuthorizationV1(), - AllowCacheTTL: 5 * time.Minute, - DenyCacheTTL: 30 * time.Second, - WebhookRetryBackoff: &wait.Backoff{ - Duration: 500 * time.Millisecond, - Factor: 1.5, - Jitter: 0.2, - Steps: 5, - }, - } - authorizer, err := authzConfig.New() + authnzFilter, err := filters.WithAuthenticationAndAuthorization(restCfg, httpClient) if err != nil { - return fmt.Errorf("failed to create authorizer: %w", err) + log.Error(err, "failed to create authorization filter") + return err } // Load lifecycle data from FBC @@ -139,105 +123,96 @@ func run(_ *cobra.Command, _ []string) error { data = make(server.LifecycleIndex) } log.Info("loaded lifecycle data", - "blobCount", server.CountBlobs(data), - "versionCount", len(data), - "versions", server.ListVersions(data), + "packageCount", data.CountPackages(), + "blobCount", data.CountBlobs(), + "versions", data.ListVersions(), ) - // Create HTTP handler with authn/authz middleware + // Create HTTP apiHandler with authn/authz middleware baseHandler := server.NewHandler(data, log) - - // Wrap with authorization - authorizedHandler := filters.WithAuthorization(baseHandler, authorizer, nil) - - // Wrap with authentication - handler := filters.WithAuthentication( - authorizedHandler, - authenticator, - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - http.Error(w, "Unauthorized", http.StatusUnauthorized) - }), - nil, - nil, - ) - - // Wrap with request info (required by authorization filter) - requestInfoResolver := &request.RequestInfoFactory{ - APIPrefixes: sets.NewString("api"), - GrouplessAPIPrefixes: sets.NewString("api"), + apiHandler, err := authnzFilter(log, baseHandler) + if err != nil { + log.Error(err, "failed to create api handler") + return err } - handler = filters.WithRequestInfo(handler, requestInfoResolver) - // Create health handler (no auth required) + // Create health apiHandler (no auth required) healthHandler := http.NewServeMux() healthHandler.HandleFunc("GET /healthz", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) _, _ = w.Write([]byte("ok")) }) - // Load TLS certificate - cert, err := tls.LoadX509KeyPair(tlsCertPath, tlsKeyPath) - if err != nil { - return fmt.Errorf("failed to load TLS certificate: %w", err) - } - - // Create TLS config - tlsConfig := &tls.Config{ - Certificates: []tls.Certificate{cert}, - MinVersion: tlsMinVersionID, - CipherSuites: tlsCipherSuiteIDs, - } - // Create servers - apiServer := &http.Server{ - Addr: listenAddr, - Handler: handler, - TLSConfig: tlsConfig, + apiServer := cancelableServer{ + Server: &http.Server{ + Addr: listenAddr, + Handler: apiHandler, + TLSConfig: tlsConfig, + }, + ShutdownTimeout: shutdownTimeout, } - healthServer := &http.Server{ - Addr: healthAddr, - Handler: healthHandler, + healthServer := cancelableServer{ + Server: &http.Server{ + Addr: healthAddr, + Handler: healthHandler, + }, + ShutdownTimeout: shutdownTimeout, } - // Start servers - errCh := make(chan error, 2) - go func() { - log.Info("starting API server (HTTPS)", "addr", listenAddr) - // Cert paths are empty since TLSConfig already has certificates loaded - if err := apiServer.ListenAndServeTLS("", ""); err != nil && err != http.ErrServerClosed { - errCh <- fmt.Errorf("API server error: %w", err) + eg, ctx := errgroup.WithContext(ctrl.SetupSignalHandler()) + eg.Go(func() error { + if err := apiServer.ListenAndServeTLS(ctx, "", ""); err != nil { + return fmt.Errorf("api server error: %w", err) } - }() - go func() { - log.Info("starting health server", "addr", healthAddr) - if err := healthServer.ListenAndServe(); err != nil && err != http.ErrServerClosed { - errCh <- fmt.Errorf("health server error: %w", err) + return nil + }) + eg.Go(func() error { + if err := healthServer.ListenAndServe(ctx); err != nil { + return fmt.Errorf("health server error: %w", err) } - }() + return nil + }) + return eg.Wait() +} - // Wait for shutdown signal - sigCh := make(chan os.Signal, 1) - signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) +type cancelableServer struct { + *http.Server + ShutdownTimeout time.Duration +} - select { - case sig := <-sigCh: - log.Info("received shutdown signal", "signal", sig) - case err := <-errCh: - log.Error(err, "server error") - return err - } +func (s *cancelableServer) ListenAndServe(ctx context.Context) error { + return s.listenAndServe(ctx, + func() error { + return s.Server.ListenAndServe() + }, + s.Server.Shutdown, + ) +} +func (s *cancelableServer) ListenAndServeTLS(ctx context.Context, certFile, keyFile string) error { + return s.listenAndServe(ctx, + func() error { + return s.Server.ListenAndServeTLS(certFile, keyFile) + }, + s.Server.Shutdown, + ) +} - // Graceful shutdown - ctx, cancel := context.WithTimeout(context.Background(), shutdownTimeout) - defer cancel() +func (s *cancelableServer) listenAndServe(ctx context.Context, runFunc func() error, cancelFunc func(context.Context) error) error { + errChan := make(chan error) + go func() { + errChan <- runFunc() + }() - log.Info("shutting down servers") - if err := apiServer.Shutdown(ctx); err != nil { - log.Error(err, "API server shutdown error") - } - if err := healthServer.Shutdown(ctx); err != nil { - log.Error(err, "health server shutdown error") + select { + case err := <-errChan: + return err + case <-ctx.Done(): + shutdownCtx, cancel := context.WithTimeout(context.Background(), s.ShutdownTimeout) + defer cancel() + if err := cancelFunc(shutdownCtx); err != nil { + return err + } + return nil } - - return nil } diff --git a/go.mod b/go.mod index fb1aa84845..4eb9661b4a 100644 --- a/go.mod +++ b/go.mod @@ -13,21 +13,21 @@ require ( github.com/mikefarah/yq/v3 v3.0.0-20201202084205-8846255d1c37 github.com/onsi/ginkgo/v2 v2.27.5 github.com/openshift/api v0.0.0-20251111193948-50e2ece149d7 + github.com/openshift/library-go v0.0.0-20260108135436-db8dbd64c462 github.com/operator-framework/api v0.38.0 github.com/operator-framework/operator-lifecycle-manager v0.0.0-00010101000000-000000000000 github.com/operator-framework/operator-registry v1.62.0 github.com/sirupsen/logrus v1.9.4 github.com/spf13/cobra v1.10.2 github.com/stretchr/testify v1.11.1 + golang.org/x/sync v0.19.0 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.6.0 google.golang.org/protobuf v1.36.11 gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.34.3 k8s.io/apimachinery v0.34.3 - k8s.io/apiserver v0.34.3 k8s.io/client-go v0.34.3 k8s.io/code-generator v0.34.3 - k8s.io/component-base v0.34.3 k8s.io/klog/v2 v2.130.1 k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 @@ -154,7 +154,6 @@ require ( github.com/opencontainers/image-spec v1.1.1 // indirect github.com/opencontainers/runtime-spec v1.2.1 // indirect github.com/openshift/client-go v0.0.0-20251015124057-db0dee36e235 // indirect - github.com/openshift/library-go v0.0.0-20260108135436-db8dbd64c462 // indirect github.com/otiai10/copy v1.14.1 // indirect github.com/otiai10/mint v1.6.3 // indirect github.com/pkg/errors v0.9.1 // indirect @@ -207,7 +206,6 @@ require ( golang.org/x/mod v0.32.0 // indirect golang.org/x/net v0.49.0 // indirect golang.org/x/oauth2 v0.34.0 // indirect - golang.org/x/sync v0.19.0 // indirect golang.org/x/sys v0.40.0 // indirect golang.org/x/term v0.39.0 // indirect golang.org/x/text v0.33.0 // indirect @@ -225,7 +223,9 @@ require ( gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiextensions-apiserver v0.34.3 // indirect + k8s.io/apiserver v0.34.3 // indirect k8s.io/cli-runtime v0.33.2 // indirect + k8s.io/component-base v0.34.3 // indirect k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f // indirect k8s.io/klog v1.0.0 // indirect k8s.io/kms v0.34.3 // indirect diff --git a/pkg/lifecycle-controller/controller.go b/pkg/lifecycle-controller/controller.go index 5b6d8eaa5f..6ec9b5997a 100644 --- a/pkg/lifecycle-controller/controller.go +++ b/pkg/lifecycle-controller/controller.go @@ -23,6 +23,7 @@ import ( "strings" "github.com/go-logr/logr" + "github.com/openshift/library-go/pkg/crypto" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -57,12 +58,6 @@ const ( resourceBaseName = "lifecycle-server" ) -// TLSConfigProvider provides access to dynamically updated TLS configuration. -type TLSConfigProvider interface { - GetMinVersion() string - GetCipherSuites() []string -} - // LifecycleControllerReconciler reconciles CatalogSources and manages lifecycle-server resources type LifecycleControllerReconciler struct { client.Client @@ -71,7 +66,7 @@ type LifecycleControllerReconciler struct { ServerImage string CatalogSourceLabelSelector labels.Selector CatalogSourceFieldSelector fields.Selector - TLSConfigProvider TLSConfigProvider + TLSConfigProvider *TLSConfigProvider } // matchesCatalogSource checks if a CatalogSource matches both label and field selectors @@ -622,12 +617,9 @@ func (r *LifecycleControllerReconciler) buildLifecycleServerArgs(fbcPath string) } if r.TLSConfigProvider != nil { - if minVersion := r.TLSConfigProvider.GetMinVersion(); minVersion != "" { - args = append(args, fmt.Sprintf("--tls-min-version=%s", minVersion)) - } - - if cipherSuites := r.TLSConfigProvider.GetCipherSuites(); len(cipherSuites) > 0 { - args = append(args, fmt.Sprintf("--tls-cipher-suites=%s", strings.Join(cipherSuites, ","))) + if cfg := r.TLSConfigProvider.Get(); cfg != nil { + args = append(args, fmt.Sprintf("--tls-min-version=%s", crypto.TLSVersionToNameOrDie(cfg.MinVersion))) + args = append(args, fmt.Sprintf("--tls-cipher-suites=%s", strings.Join(crypto.CipherSuitesToNamesOrDie(cfg.CipherSuites), ","))) } } diff --git a/pkg/lifecycle-controller/tls.go b/pkg/lifecycle-controller/tls.go new file mode 100644 index 0000000000..2a77748832 --- /dev/null +++ b/pkg/lifecycle-controller/tls.go @@ -0,0 +1,131 @@ +package controllers + +import ( + "context" + "crypto/tls" + "reflect" + "sync" + + "github.com/go-logr/logr" + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/crypto" + "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/apiserver" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +const ( + // Name of the cluster-scoped APIServer resource + clusterAPIServerName = "cluster" +) + +// TLSConfig holds the TLS configuration extracted from the APIServer resource +type TLSConfig struct { + minVersion uint16 + cipherSuites []uint16 + // String representations for passing to lifecycle-server + minVersionString string + cipherSuiteStrings []string +} + +// TLSConfigProvider provides thread-safe access to dynamically updated TLS configuration. +// It implements controllers.TLSConfigProvider interface. +type TLSConfigProvider struct { + mu sync.RWMutex + config *tls.Config +} + +// NewTLSConfigProvider creates a new TLSConfigProvider with the given initial config. +func NewTLSConfigProvider(initial *tls.Config) *TLSConfigProvider { + return &TLSConfigProvider{config: initial} +} + +// Get returns the current TLS configuration. +func (p *TLSConfigProvider) Get() *tls.Config { + p.mu.RLock() + defer p.mu.RUnlock() + return p.config +} + +// Update sets a new TLS configuration. +func (p *TLSConfigProvider) Update(cfg *tls.Config) { + p.mu.Lock() + defer p.mu.Unlock() + p.config = cfg +} + +// GetClusterTLSConfig reads the APIServer "cluster" resource and extracts TLS settings. +// Falls back to defaults if an error occurs looking up the apiserver config. +func GetClusterTLSConfig(ctx context.Context, cl client.Client, log logr.Logger) *tls.Config { + var ( + apiServer configv1.APIServer + minVersion uint16 + cipherSuites []uint16 + ) + if err := cl.Get(ctx, types.NamespacedName{Name: clusterAPIServerName}, &apiServer); err != nil { + log.Error(err, "failed to lookup APIServer; using default TLS security profile") + minVersion, cipherSuites = apiserver.GetSecurityProfileConfig(nil) + } else { + minVersion, cipherSuites = apiserver.GetSecurityProfileConfig(apiServer.Spec.TLSSecurityProfile) + } + + log.Info("loaded TLS configuration from APIServer", + "minVersion", crypto.TLSVersionToNameOrDie(minVersion), + "cipherSuites", crypto.CipherSuitesToNamesOrDie(cipherSuites), + ) + + return &tls.Config{ + MinVersion: minVersion, + CipherSuites: cipherSuites, + } +} + +// ClusterTLSProfileReconciler watches the APIServer "cluster" resource and updates TLS config dynamically +type ClusterTLSProfileReconciler struct { + Client client.Client + Log logr.Logger + TLSProvider *TLSConfigProvider + OnChange func(prev, cur *tls.Config) +} + +func (r *ClusterTLSProfileReconciler) Reconcile(ctx context.Context, _ reconcile.Request) (reconcile.Result, error) { + // Check if config changed + oldConfig := r.TLSProvider.Get() + newConfig := GetClusterTLSConfig(ctx, r.Client, r.Log) + if reflect.DeepEqual(oldConfig, newConfig) { + // No change + return reconcile.Result{}, nil + } + + r.Log.Info("TLS security profile changed, updating configuration and triggering reconciliation", + "oldMinVersion", crypto.TLSVersionToNameOrDie(oldConfig.MinVersion), + "newMinVersion", crypto.TLSVersionToNameOrDie(newConfig.MinVersion), + "oldCipherSuites", crypto.CipherSuitesToNamesOrDie(oldConfig.CipherSuites), + "newCipherSuites", crypto.CipherSuitesToNamesOrDie(newConfig.CipherSuites), + ) + + // Update the provider and call the OnChange callback + r.TLSProvider.Update(newConfig) + r.OnChange(oldConfig, newConfig) + + return reconcile.Result{}, nil +} + +func (r *ClusterTLSProfileReconciler) SetupWithManager(mgr manager.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + Named("tlsprofile-reconciler"). + WatchesRawSource(source.Kind(mgr.GetCache(), &configv1.APIServer{}, + handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, obj *configv1.APIServer) []reconcile.Request { + if obj.Name == clusterAPIServerName { + return []reconcile.Request{{NamespacedName: types.NamespacedName{Name: clusterAPIServerName}}} + } + return nil + }), + )). + Complete(r) +} diff --git a/pkg/lifecycle-server/fbc.go b/pkg/lifecycle-server/fbc.go index c4548a0e0c..2e0bdf0d14 100644 --- a/pkg/lifecycle-server/fbc.go +++ b/pkg/lifecycle-server/fbc.go @@ -24,6 +24,7 @@ import ( "sync" "github.com/operator-framework/operator-registry/alpha/declcfg" + "k8s.io/apimachinery/pkg/util/sets" ) // versionPattern matches API versions like v1, v1alpha1, v2beta3 @@ -84,3 +85,31 @@ func LoadLifecycleData(fbcPath string) (LifecycleIndex, error) { return result, nil } + +// CountBlobs returns the total number of blobs in the index +func (index LifecycleIndex) CountBlobs() int { + count := 0 + for _, packages := range index { + count += len(packages) + } + return count +} + +func (index LifecycleIndex) CountPackages() int { + pkgs := sets.New[string]() + for _, packages := range index { + for pkg := range packages { + pkgs.Insert(pkg) + } + } + return pkgs.Len() +} + +// ListVersions returns the list of versions available in the index +func (index LifecycleIndex) ListVersions() []string { + versions := make([]string, 0, len(index)) + for v := range index { + versions = append(versions, v) + } + return versions +} diff --git a/pkg/lifecycle-server/server.go b/pkg/lifecycle-server/server.go index 7525cf2e8c..7571ccc017 100644 --- a/pkg/lifecycle-server/server.go +++ b/pkg/lifecycle-server/server.go @@ -64,21 +64,3 @@ func NewHandler(data LifecycleIndex, log logr.Logger) http.Handler { return mux } - -// CountBlobs returns the total number of blobs in the index -func CountBlobs(index LifecycleIndex) int { - count := 0 - for _, packages := range index { - count += len(packages) - } - return count -} - -// ListVersions returns the list of versions available in the index -func ListVersions(index LifecycleIndex) []string { - versions := make([]string, 0, len(index)) - for v := range index { - versions = append(versions, v) - } - return versions -} From 0c80e5dece80aca14052607fa03704b8ca19085e Mon Sep 17 00:00:00 2001 From: Joe Lanford Date: Sat, 31 Jan 2026 21:56:54 -0500 Subject: [PATCH 6/8] moving lifecycle-* manifests to generation script --- ...ntroller.deployment.ibm-cloud-managed.yaml | 93 +++++++ ...lm_08-lifecycle-controller.deployment.yaml | 2 +- ...08-lifecycle-controller.networkpolicy.yaml | 6 +- ...0_50_olm_08-lifecycle-controller.rbac.yaml | 12 +- .../0000_50_olm_09-lifecycle-server.rbac.yaml | 4 +- ...ntroller.deployment.ibm-cloud-managed.yaml | 93 +++++++ ...lm_08-lifecycle-controller.deployment.yaml | 92 +++++++ ...08-lifecycle-controller.networkpolicy.yaml | 35 +++ ...0_50_olm_08-lifecycle-controller.rbac.yaml | 91 +++++++ .../0000_50_olm_09-lifecycle-server.rbac.yaml | 18 ++ microshift-manifests/kustomization.yaml | 4 + scripts/generate_crds_manifests.sh | 227 ++++++++++++++++++ 12 files changed, 670 insertions(+), 7 deletions(-) create mode 100644 manifests/0000_50_olm_08-lifecycle-controller.deployment.ibm-cloud-managed.yaml create mode 100644 microshift-manifests/0000_50_olm_08-lifecycle-controller.deployment.ibm-cloud-managed.yaml create mode 100644 microshift-manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml create mode 100644 microshift-manifests/0000_50_olm_08-lifecycle-controller.networkpolicy.yaml create mode 100644 microshift-manifests/0000_50_olm_08-lifecycle-controller.rbac.yaml create mode 100644 microshift-manifests/0000_50_olm_09-lifecycle-server.rbac.yaml diff --git a/manifests/0000_50_olm_08-lifecycle-controller.deployment.ibm-cloud-managed.yaml b/manifests/0000_50_olm_08-lifecycle-controller.deployment.ibm-cloud-managed.yaml new file mode 100644 index 0000000000..098eac94ae --- /dev/null +++ b/manifests/0000_50_olm_08-lifecycle-controller.deployment.ibm-cloud-managed.yaml @@ -0,0 +1,93 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager + labels: + app: olm-lifecycle-controller + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/ibm-cloud-managed: "true" + capability.openshift.io/name: "OperatorLifecycleManager" + include.release.openshift.io/hypershift: "true" +spec: + strategy: + type: Recreate + replicas: 1 + selector: + matchLabels: + app: olm-lifecycle-controller + template: + metadata: + annotations: + target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' + openshift.io/required-scc: restricted-v2 + kubectl.kubernetes.io/default-container: lifecycle-controller + labels: + app: olm-lifecycle-controller + spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: lifecycle-controller + priorityClassName: "system-cluster-critical" + containers: + - name: lifecycle-controller + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: ["ALL"] + command: + - /bin/lifecycle-controller + args: + - start + image: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607 + imagePullPolicy: IfNotPresent + env: + - name: RELEASE_VERSION + value: "0.0.1-snapshot" + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LIFECYCLE_SERVER_IMAGE + value: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607 + - name: GOMEMLIMIT + value: "5MiB" + resources: + requests: + cpu: 10m + memory: 10Mi + ports: + - containerPort: 8081 + name: health + livenessProbe: + httpGet: + path: /healthz + port: health + scheme: HTTP + initialDelaySeconds: 30 + readinessProbe: + httpGet: + path: /healthz + port: health + scheme: HTTP + initialDelaySeconds: 30 + terminationMessagePolicy: FallbackToLogsOnError + nodeSelector: + kubernetes.io/os: linux + node-role.kubernetes.io/control-plane: "" + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 120 + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 120 diff --git a/manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml b/manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml index 5f256a2dd1..7bf49b23c0 100644 --- a/manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml +++ b/manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml @@ -6,8 +6,8 @@ metadata: labels: app: olm-lifecycle-controller annotations: - include.release.openshift.io/self-managed-high-availability: "true" release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/self-managed-high-availability: "true" capability.openshift.io/name: "OperatorLifecycleManager" spec: strategy: diff --git a/manifests/0000_50_olm_08-lifecycle-controller.networkpolicy.yaml b/manifests/0000_50_olm_08-lifecycle-controller.networkpolicy.yaml index 0891a2fbd9..c08803d707 100644 --- a/manifests/0000_50_olm_08-lifecycle-controller.networkpolicy.yaml +++ b/manifests/0000_50_olm_08-lifecycle-controller.networkpolicy.yaml @@ -4,9 +4,11 @@ metadata: name: lifecycle-controller namespace: openshift-operator-lifecycle-manager annotations: - include.release.openshift.io/self-managed-high-availability: "true" release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" capability.openshift.io/name: "OperatorLifecycleManager" + include.release.openshift.io/hypershift: "true" spec: podSelector: matchLabels: @@ -30,4 +32,4 @@ spec: protocol: UDP policyTypes: - Ingress - - Egress \ No newline at end of file + - Egress diff --git a/manifests/0000_50_olm_08-lifecycle-controller.rbac.yaml b/manifests/0000_50_olm_08-lifecycle-controller.rbac.yaml index f2b68ee99c..24da8ffaf7 100644 --- a/manifests/0000_50_olm_08-lifecycle-controller.rbac.yaml +++ b/manifests/0000_50_olm_08-lifecycle-controller.rbac.yaml @@ -4,18 +4,22 @@ metadata: name: lifecycle-controller namespace: openshift-operator-lifecycle-manager annotations: - include.release.openshift.io/self-managed-high-availability: "true" release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" capability.openshift.io/name: "OperatorLifecycleManager" + include.release.openshift.io/hypershift: "true" --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: operator-lifecycle-manager-lifecycle-controller annotations: - include.release.openshift.io/self-managed-high-availability: "true" release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" capability.openshift.io/name: "OperatorLifecycleManager" + include.release.openshift.io/hypershift: "true" rules: # Read APIServer for TLS security profile configuration - apiGroups: ["config.openshift.io"] @@ -72,9 +76,11 @@ kind: ClusterRoleBinding metadata: name: operator-lifecycle-manager-lifecycle-controller annotations: - include.release.openshift.io/self-managed-high-availability: "true" release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" capability.openshift.io/name: "OperatorLifecycleManager" + include.release.openshift.io/hypershift: "true" roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole diff --git a/manifests/0000_50_olm_09-lifecycle-server.rbac.yaml b/manifests/0000_50_olm_09-lifecycle-server.rbac.yaml index 5417ba31c6..d848837106 100644 --- a/manifests/0000_50_olm_09-lifecycle-server.rbac.yaml +++ b/manifests/0000_50_olm_09-lifecycle-server.rbac.yaml @@ -3,9 +3,11 @@ kind: ClusterRole metadata: name: operator-lifecycle-manager-lifecycle-server annotations: - include.release.openshift.io/self-managed-high-availability: "true" release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" capability.openshift.io/name: "OperatorLifecycleManager" + include.release.openshift.io/hypershift: "true" rules: # Required by kube-rbac-proxy for authn/authz - apiGroups: ["authentication.k8s.io"] diff --git a/microshift-manifests/0000_50_olm_08-lifecycle-controller.deployment.ibm-cloud-managed.yaml b/microshift-manifests/0000_50_olm_08-lifecycle-controller.deployment.ibm-cloud-managed.yaml new file mode 100644 index 0000000000..098eac94ae --- /dev/null +++ b/microshift-manifests/0000_50_olm_08-lifecycle-controller.deployment.ibm-cloud-managed.yaml @@ -0,0 +1,93 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager + labels: + app: olm-lifecycle-controller + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/ibm-cloud-managed: "true" + capability.openshift.io/name: "OperatorLifecycleManager" + include.release.openshift.io/hypershift: "true" +spec: + strategy: + type: Recreate + replicas: 1 + selector: + matchLabels: + app: olm-lifecycle-controller + template: + metadata: + annotations: + target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' + openshift.io/required-scc: restricted-v2 + kubectl.kubernetes.io/default-container: lifecycle-controller + labels: + app: olm-lifecycle-controller + spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: lifecycle-controller + priorityClassName: "system-cluster-critical" + containers: + - name: lifecycle-controller + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: ["ALL"] + command: + - /bin/lifecycle-controller + args: + - start + image: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607 + imagePullPolicy: IfNotPresent + env: + - name: RELEASE_VERSION + value: "0.0.1-snapshot" + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LIFECYCLE_SERVER_IMAGE + value: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607 + - name: GOMEMLIMIT + value: "5MiB" + resources: + requests: + cpu: 10m + memory: 10Mi + ports: + - containerPort: 8081 + name: health + livenessProbe: + httpGet: + path: /healthz + port: health + scheme: HTTP + initialDelaySeconds: 30 + readinessProbe: + httpGet: + path: /healthz + port: health + scheme: HTTP + initialDelaySeconds: 30 + terminationMessagePolicy: FallbackToLogsOnError + nodeSelector: + kubernetes.io/os: linux + node-role.kubernetes.io/control-plane: "" + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 120 + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 120 diff --git a/microshift-manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml b/microshift-manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml new file mode 100644 index 0000000000..7bf49b23c0 --- /dev/null +++ b/microshift-manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml @@ -0,0 +1,92 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager + labels: + app: olm-lifecycle-controller + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/self-managed-high-availability: "true" + capability.openshift.io/name: "OperatorLifecycleManager" +spec: + strategy: + type: Recreate + replicas: 1 + selector: + matchLabels: + app: olm-lifecycle-controller + template: + metadata: + annotations: + target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' + openshift.io/required-scc: restricted-v2 + kubectl.kubernetes.io/default-container: lifecycle-controller + labels: + app: olm-lifecycle-controller + spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: lifecycle-controller + priorityClassName: "system-cluster-critical" + containers: + - name: lifecycle-controller + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: ["ALL"] + command: + - /bin/lifecycle-controller + args: + - start + image: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607 + imagePullPolicy: IfNotPresent + env: + - name: RELEASE_VERSION + value: "0.0.1-snapshot" + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LIFECYCLE_SERVER_IMAGE + value: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607 + - name: GOMEMLIMIT + value: "5MiB" + resources: + requests: + cpu: 10m + memory: 10Mi + ports: + - containerPort: 8081 + name: health + livenessProbe: + httpGet: + path: /healthz + port: health + scheme: HTTP + initialDelaySeconds: 30 + readinessProbe: + httpGet: + path: /healthz + port: health + scheme: HTTP + initialDelaySeconds: 30 + terminationMessagePolicy: FallbackToLogsOnError + nodeSelector: + kubernetes.io/os: linux + node-role.kubernetes.io/control-plane: "" + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 120 + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 120 diff --git a/microshift-manifests/0000_50_olm_08-lifecycle-controller.networkpolicy.yaml b/microshift-manifests/0000_50_olm_08-lifecycle-controller.networkpolicy.yaml new file mode 100644 index 0000000000..c08803d707 --- /dev/null +++ b/microshift-manifests/0000_50_olm_08-lifecycle-controller.networkpolicy.yaml @@ -0,0 +1,35 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + capability.openshift.io/name: "OperatorLifecycleManager" + include.release.openshift.io/hypershift: "true" +spec: + podSelector: + matchLabels: + app: olm-lifecycle-controller + ingress: + - ports: + - port: 8443 + protocol: TCP + egress: + - ports: + - port: 6443 + protocol: TCP + - ports: + - port: 53 + protocol: TCP + - port: 53 + protocol: UDP + - port: 5353 + protocol: TCP + - port: 5353 + protocol: UDP + policyTypes: + - Ingress + - Egress diff --git a/microshift-manifests/0000_50_olm_08-lifecycle-controller.rbac.yaml b/microshift-manifests/0000_50_olm_08-lifecycle-controller.rbac.yaml new file mode 100644 index 0000000000..24da8ffaf7 --- /dev/null +++ b/microshift-manifests/0000_50_olm_08-lifecycle-controller.rbac.yaml @@ -0,0 +1,91 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + capability.openshift.io/name: "OperatorLifecycleManager" + include.release.openshift.io/hypershift: "true" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: operator-lifecycle-manager-lifecycle-controller + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + capability.openshift.io/name: "OperatorLifecycleManager" + include.release.openshift.io/hypershift: "true" +rules: + # Read APIServer for TLS security profile configuration + - apiGroups: ["config.openshift.io"] + resources: ["apiservers"] + verbs: ["get", "list", "watch"] + # Watch CatalogSources cluster-wide + - apiGroups: ["operators.coreos.com"] + resources: ["catalogsources"] + verbs: ["get", "list", "watch"] + # Watch catalog pods cluster-wide + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] + # Manage lifecycle-server deployments + - apiGroups: ["apps"] + resources: ["deployments"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Manage lifecycle-server services + - apiGroups: [""] + resources: ["services"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Manage lifecycle-server serviceaccounts + - apiGroups: [""] + resources: ["serviceaccounts"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Manage lifecycle-server networkpolicies + - apiGroups: ["networking.k8s.io"] + resources: ["networkpolicies"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Manage lifecycle-server clusterrolebindings + - apiGroups: ["rbac.authorization.k8s.io"] + resources: ["clusterrolebindings"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Required to grant these permissions to lifecycle-server via CRB + - apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["create"] + - apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["create"] + # Leader election + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: operator-lifecycle-manager-lifecycle-controller + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + capability.openshift.io/name: "OperatorLifecycleManager" + include.release.openshift.io/hypershift: "true" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-lifecycle-manager-lifecycle-controller +subjects: + - kind: ServiceAccount + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager diff --git a/microshift-manifests/0000_50_olm_09-lifecycle-server.rbac.yaml b/microshift-manifests/0000_50_olm_09-lifecycle-server.rbac.yaml new file mode 100644 index 0000000000..d848837106 --- /dev/null +++ b/microshift-manifests/0000_50_olm_09-lifecycle-server.rbac.yaml @@ -0,0 +1,18 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: operator-lifecycle-manager-lifecycle-server + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + capability.openshift.io/name: "OperatorLifecycleManager" + include.release.openshift.io/hypershift: "true" +rules: + # Required by kube-rbac-proxy for authn/authz + - apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["create"] + - apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["create"] diff --git a/microshift-manifests/kustomization.yaml b/microshift-manifests/kustomization.yaml index 206174302a..1fa6690acd 100644 --- a/microshift-manifests/kustomization.yaml +++ b/microshift-manifests/kustomization.yaml @@ -20,6 +20,10 @@ resources: - 0000_50_olm_03-services.yaml - 0000_50_olm_07-olm-operator.deployment.yaml - 0000_50_olm_08-catalog-operator.deployment.yaml + - 0000_50_olm_08-lifecycle-controller.deployment.yaml + - 0000_50_olm_08-lifecycle-controller.networkpolicy.yaml + - 0000_50_olm_08-lifecycle-controller.rbac.yaml - 0000_50_olm_09-aggregated.clusterrole.yaml + - 0000_50_olm_09-lifecycle-server.rbac.yaml - 0000_50_olm_13-operatorgroup-default.yaml - 0000_50_olm_15-csv-viewer.rbac.yaml diff --git a/scripts/generate_crds_manifests.sh b/scripts/generate_crds_manifests.sh index 743e520639..68402e5180 100755 --- a/scripts/generate_crds_manifests.sh +++ b/scripts/generate_crds_manifests.sh @@ -549,6 +549,233 @@ subjects: name: system:authenticated EOF +cat << EOF > manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager + labels: + app: olm-lifecycle-controller + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" +spec: + strategy: + type: Recreate + replicas: 1 + selector: + matchLabels: + app: olm-lifecycle-controller + template: + metadata: + annotations: + target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' + openshift.io/required-scc: restricted-v2 + kubectl.kubernetes.io/default-container: lifecycle-controller + labels: + app: olm-lifecycle-controller + spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: lifecycle-controller + priorityClassName: "system-cluster-critical" + containers: + - name: lifecycle-controller + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: ["ALL"] + command: + - /bin/lifecycle-controller + args: + - start + image: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607 + imagePullPolicy: IfNotPresent + env: + - name: RELEASE_VERSION + value: "0.0.1-snapshot" + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LIFECYCLE_SERVER_IMAGE + value: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607 + - name: GOMEMLIMIT + value: "5MiB" + resources: + requests: + cpu: 10m + memory: 10Mi + ports: + - containerPort: 8081 + name: health + livenessProbe: + httpGet: + path: /healthz + port: health + scheme: HTTP + initialDelaySeconds: 30 + readinessProbe: + httpGet: + path: /healthz + port: health + scheme: HTTP + initialDelaySeconds: 30 + terminationMessagePolicy: FallbackToLogsOnError + nodeSelector: + kubernetes.io/os: linux + node-role.kubernetes.io/control-plane: "" + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 120 + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 120 +EOF + +cat << EOF > manifests/0000_50_olm_08-lifecycle-controller.networkpolicy.yaml +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" +spec: + podSelector: + matchLabels: + app: olm-lifecycle-controller + ingress: + - ports: + - port: 8443 + protocol: TCP + egress: + - ports: + - port: 6443 + protocol: TCP + - ports: + - port: 53 + protocol: TCP + - port: 53 + protocol: UDP + - port: 5353 + protocol: TCP + - port: 5353 + protocol: UDP + policyTypes: + - Ingress + - Egress +EOF + +cat << EOF > manifests/0000_50_olm_08-lifecycle-controller.rbac.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: operator-lifecycle-manager-lifecycle-controller + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" +rules: + # Read APIServer for TLS security profile configuration + - apiGroups: ["config.openshift.io"] + resources: ["apiservers"] + verbs: ["get", "list", "watch"] + # Watch CatalogSources cluster-wide + - apiGroups: ["operators.coreos.com"] + resources: ["catalogsources"] + verbs: ["get", "list", "watch"] + # Watch catalog pods cluster-wide + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] + # Manage lifecycle-server deployments + - apiGroups: ["apps"] + resources: ["deployments"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Manage lifecycle-server services + - apiGroups: [""] + resources: ["services"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Manage lifecycle-server serviceaccounts + - apiGroups: [""] + resources: ["serviceaccounts"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Manage lifecycle-server networkpolicies + - apiGroups: ["networking.k8s.io"] + resources: ["networkpolicies"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Manage lifecycle-server clusterrolebindings + - apiGroups: ["rbac.authorization.k8s.io"] + resources: ["clusterrolebindings"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Required to grant these permissions to lifecycle-server via CRB + - apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["create"] + - apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["create"] + # Leader election + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: operator-lifecycle-manager-lifecycle-controller + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-lifecycle-manager-lifecycle-controller +subjects: + - kind: ServiceAccount + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager +EOF + +cat << EOF > manifests/0000_50_olm_09-lifecycle-server.rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: operator-lifecycle-manager-lifecycle-server + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" +rules: + # Required by kube-rbac-proxy for authn/authz + - apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["create"] + - apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["create"] +EOF + + add_ibm_managed_cloud_annotations "${ROOT_DIR}/manifests" hypershift_manifests_dir="${ROOT_DIR}/manifests" From 679322eef42e4384b4d547a6e3e4d5f5a5022d14 Mon Sep 17 00:00:00 2001 From: Joe Lanford Date: Sat, 31 Jan 2026 22:48:42 -0500 Subject: [PATCH 7/8] simplify downstream CRD manifest handling --- manifests/0000_50_olm_00-catalogsources.crd.yaml | 6 ++---- .../0000_50_olm_00-clusterserviceversions.crd.yaml | 3 ++- manifests/0000_50_olm_00-installplans.crd.yaml | 2 +- manifests/0000_50_olm_00-olmconfigs.crd.yaml | 2 +- .../0000_50_olm_00-operatorconditions.crd.yaml | 2 +- manifests/0000_50_olm_00-operatorgroups.crd.yaml | 2 +- manifests/0000_50_olm_00-operators.crd.yaml | 2 +- manifests/0000_50_olm_00-subscriptions.crd.yaml | 2 +- .../0000_50_olm_00-catalogsources.crd.yaml | 6 ++---- .../0000_50_olm_00-clusterserviceversions.crd.yaml | 3 ++- .../0000_50_olm_00-installplans.crd.yaml | 2 +- .../0000_50_olm_00-olmconfigs.crd.yaml | 2 +- .../0000_50_olm_00-operatorconditions.crd.yaml | 2 +- .../0000_50_olm_00-operatorgroups.crd.yaml | 2 +- .../0000_50_olm_00-operators.crd.yaml | 2 +- .../0000_50_olm_00-subscriptions.crd.yaml | 2 +- scripts/generate_crds_manifests.sh | 14 +++----------- 17 files changed, 23 insertions(+), 33 deletions(-) diff --git a/manifests/0000_50_olm_00-catalogsources.crd.yaml b/manifests/0000_50_olm_00-catalogsources.crd.yaml index e0bde39811..5882e5b768 100644 --- a/manifests/0000_50_olm_00-catalogsources.crd.yaml +++ b/manifests/0000_50_olm_00-catalogsources.crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" capability.openshift.io/name: "OperatorLifecycleManager" @@ -1094,9 +1094,7 @@ spec: publisher: type: string runAsRoot: - description: |- - RunAsRoot allows admins to indicate that they wish to run the CatalogSource pod in a privileged - pod as root. This should only be enabled when running older catalog images which could not be run as non-root. + description: RunAsRoot allows admins to indicate that they wish to run the CatalogSource pod in a privileged pod as root. This should only be enabled when running older catalog images which could not be run as non-root. type: boolean secrets: description: |- diff --git a/manifests/0000_50_olm_00-clusterserviceversions.crd.yaml b/manifests/0000_50_olm_00-clusterserviceversions.crd.yaml index b2ed5a0f71..f737a45d13 100644 --- a/manifests/0000_50_olm_00-clusterserviceversions.crd.yaml +++ b/manifests/0000_50_olm_00-clusterserviceversions.crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" capability.openshift.io/name: "OperatorLifecycleManager" @@ -4710,6 +4710,7 @@ spec: ip: description: IP address of the host file entry. type: string + default: "" x-kubernetes-list-map-keys: - ip x-kubernetes-list-type: map diff --git a/manifests/0000_50_olm_00-installplans.crd.yaml b/manifests/0000_50_olm_00-installplans.crd.yaml index 08f6701336..6f3936c69e 100644 --- a/manifests/0000_50_olm_00-installplans.crd.yaml +++ b/manifests/0000_50_olm_00-installplans.crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" capability.openshift.io/name: "OperatorLifecycleManager" diff --git a/manifests/0000_50_olm_00-olmconfigs.crd.yaml b/manifests/0000_50_olm_00-olmconfigs.crd.yaml index ec2291246b..90af18a959 100644 --- a/manifests/0000_50_olm_00-olmconfigs.crd.yaml +++ b/manifests/0000_50_olm_00-olmconfigs.crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" capability.openshift.io/name: "OperatorLifecycleManager" diff --git a/manifests/0000_50_olm_00-operatorconditions.crd.yaml b/manifests/0000_50_olm_00-operatorconditions.crd.yaml index 2f5a208669..797a47b740 100644 --- a/manifests/0000_50_olm_00-operatorconditions.crd.yaml +++ b/manifests/0000_50_olm_00-operatorconditions.crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" capability.openshift.io/name: "OperatorLifecycleManager" diff --git a/manifests/0000_50_olm_00-operatorgroups.crd.yaml b/manifests/0000_50_olm_00-operatorgroups.crd.yaml index acf2160ddc..1409d8aa27 100644 --- a/manifests/0000_50_olm_00-operatorgroups.crd.yaml +++ b/manifests/0000_50_olm_00-operatorgroups.crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" capability.openshift.io/name: "OperatorLifecycleManager" diff --git a/manifests/0000_50_olm_00-operators.crd.yaml b/manifests/0000_50_olm_00-operators.crd.yaml index c571a3264a..f1c29977d1 100644 --- a/manifests/0000_50_olm_00-operators.crd.yaml +++ b/manifests/0000_50_olm_00-operators.crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" capability.openshift.io/name: "OperatorLifecycleManager" diff --git a/manifests/0000_50_olm_00-subscriptions.crd.yaml b/manifests/0000_50_olm_00-subscriptions.crd.yaml index 87ce80f720..bb718c89a8 100644 --- a/manifests/0000_50_olm_00-subscriptions.crd.yaml +++ b/manifests/0000_50_olm_00-subscriptions.crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" capability.openshift.io/name: "OperatorLifecycleManager" diff --git a/microshift-manifests/0000_50_olm_00-catalogsources.crd.yaml b/microshift-manifests/0000_50_olm_00-catalogsources.crd.yaml index e0bde39811..5882e5b768 100644 --- a/microshift-manifests/0000_50_olm_00-catalogsources.crd.yaml +++ b/microshift-manifests/0000_50_olm_00-catalogsources.crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" capability.openshift.io/name: "OperatorLifecycleManager" @@ -1094,9 +1094,7 @@ spec: publisher: type: string runAsRoot: - description: |- - RunAsRoot allows admins to indicate that they wish to run the CatalogSource pod in a privileged - pod as root. This should only be enabled when running older catalog images which could not be run as non-root. + description: RunAsRoot allows admins to indicate that they wish to run the CatalogSource pod in a privileged pod as root. This should only be enabled when running older catalog images which could not be run as non-root. type: boolean secrets: description: |- diff --git a/microshift-manifests/0000_50_olm_00-clusterserviceversions.crd.yaml b/microshift-manifests/0000_50_olm_00-clusterserviceversions.crd.yaml index b2ed5a0f71..f737a45d13 100644 --- a/microshift-manifests/0000_50_olm_00-clusterserviceversions.crd.yaml +++ b/microshift-manifests/0000_50_olm_00-clusterserviceversions.crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" capability.openshift.io/name: "OperatorLifecycleManager" @@ -4710,6 +4710,7 @@ spec: ip: description: IP address of the host file entry. type: string + default: "" x-kubernetes-list-map-keys: - ip x-kubernetes-list-type: map diff --git a/microshift-manifests/0000_50_olm_00-installplans.crd.yaml b/microshift-manifests/0000_50_olm_00-installplans.crd.yaml index 08f6701336..6f3936c69e 100644 --- a/microshift-manifests/0000_50_olm_00-installplans.crd.yaml +++ b/microshift-manifests/0000_50_olm_00-installplans.crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" capability.openshift.io/name: "OperatorLifecycleManager" diff --git a/microshift-manifests/0000_50_olm_00-olmconfigs.crd.yaml b/microshift-manifests/0000_50_olm_00-olmconfigs.crd.yaml index ec2291246b..90af18a959 100644 --- a/microshift-manifests/0000_50_olm_00-olmconfigs.crd.yaml +++ b/microshift-manifests/0000_50_olm_00-olmconfigs.crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" capability.openshift.io/name: "OperatorLifecycleManager" diff --git a/microshift-manifests/0000_50_olm_00-operatorconditions.crd.yaml b/microshift-manifests/0000_50_olm_00-operatorconditions.crd.yaml index 2f5a208669..797a47b740 100644 --- a/microshift-manifests/0000_50_olm_00-operatorconditions.crd.yaml +++ b/microshift-manifests/0000_50_olm_00-operatorconditions.crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" capability.openshift.io/name: "OperatorLifecycleManager" diff --git a/microshift-manifests/0000_50_olm_00-operatorgroups.crd.yaml b/microshift-manifests/0000_50_olm_00-operatorgroups.crd.yaml index acf2160ddc..1409d8aa27 100644 --- a/microshift-manifests/0000_50_olm_00-operatorgroups.crd.yaml +++ b/microshift-manifests/0000_50_olm_00-operatorgroups.crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" capability.openshift.io/name: "OperatorLifecycleManager" diff --git a/microshift-manifests/0000_50_olm_00-operators.crd.yaml b/microshift-manifests/0000_50_olm_00-operators.crd.yaml index c571a3264a..f1c29977d1 100644 --- a/microshift-manifests/0000_50_olm_00-operators.crd.yaml +++ b/microshift-manifests/0000_50_olm_00-operators.crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" capability.openshift.io/name: "OperatorLifecycleManager" diff --git a/microshift-manifests/0000_50_olm_00-subscriptions.crd.yaml b/microshift-manifests/0000_50_olm_00-subscriptions.crd.yaml index 87ce80f720..bb718c89a8 100644 --- a/microshift-manifests/0000_50_olm_00-subscriptions.crd.yaml +++ b/microshift-manifests/0000_50_olm_00-subscriptions.crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" capability.openshift.io/name: "OperatorLifecycleManager" diff --git a/scripts/generate_crds_manifests.sh b/scripts/generate_crds_manifests.sh index 68402e5180..5e1c25ed4f 100755 --- a/scripts/generate_crds_manifests.sh +++ b/scripts/generate_crds_manifests.sh @@ -18,13 +18,11 @@ export GOFLAGS="-mod=vendor" source .bingo/variables.env YQ="go run ./vendor/github.com/mikefarah/yq/v3/" -CONTROLLER_GEN="go run ./vendor/sigs.k8s.io/controller-tools/cmd/controller-gen" ver=${OLM_VERSION:-"0.0.0-dev"} tmpdir="$(mktemp -p . -d 2>/dev/null || mktemp -d ./tmpdir.XXXXXXX)" chartdir="${tmpdir}/chart" crddir="${chartdir}/crds" -crdsrcdir="${tmpdir}/operators" SED="sed" if ! command -v ${SED} &> /dev/null; then @@ -44,21 +42,15 @@ fi cp -R "${ROOT_DIR}/staging/operator-lifecycle-manager/deploy/chart/" "${chartdir}" cp "${ROOT_DIR}"/values*.yaml "${tmpdir}" -cp -R "${ROOT_DIR}/staging/api/pkg/operators/" ${crdsrcdir} rm -rf ./manifests/* ${crddir}/* trap "rm -rf ${tmpdir}" EXIT -${CONTROLLER_GEN} crd:crdVersions=v1 output:crd:dir=${crddir} paths=${crdsrcdir}/... -${CONTROLLER_GEN} schemapatch:manifests=${crddir} output:dir=${crddir} paths=${crdsrcdir}/... - -${YQ} w --inplace ${crddir}/operators.coreos.com_clusterserviceversions.yaml spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.install.properties.spec.properties.deployments.items.properties.spec.properties.template.properties.spec.properties.containers.items.properties.ports.items.properties.protocol.default TCP -${YQ} w --inplace ${crddir}/operators.coreos.com_clusterserviceversions.yaml spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.install.properties.spec.properties.deployments.items.properties.spec.properties.template.properties.spec.properties.initContainers.items.properties.ports.items.properties.protocol.default TCP -${YQ} w --inplace ${crddir}/operators.coreos.com_clusterserviceversions.yaml spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.install.properties.spec.properties.deployments.items.properties.spec.properties.template.properties.metadata.x-kubernetes-preserve-unknown-fields true -${YQ} d --inplace ${crddir}/operators.coreos.com_operatorconditions.yaml 'spec.versions[*].schema.openAPIV3Schema.properties.spec.properties.overrides.items.required(.==lastTransitionTime)' +# Copy upstream CRDs directly instead of regenerating with controller-gen +cp "${ROOT_DIR}"/staging/api/crds/*.yaml "${crddir}/" +# Rename CRD files to match OpenShift manifest naming convention for f in ${crddir}/*.yaml ; do - ${YQ} d --inplace $f status mv -v "$f" "${crddir}/0000_50_olm_00-$(basename $f | ${SED} 's/^.*_\([^.]\+\)\.yaml/\1.crd.yaml/')" done From 65648e6244008e6a7ef92698306432fac24931cd Mon Sep 17 00:00:00 2001 From: Joe Lanford Date: Sun, 1 Feb 2026 03:37:42 -0500 Subject: [PATCH 8/8] field selector for redhat-operators, tls 1.3 doesn't support cipher suites --- ...08-lifecycle-controller.deployment.ibm-cloud-managed.yaml | 1 + .../0000_50_olm_08-lifecycle-controller.deployment.yaml | 1 + ...08-lifecycle-controller.deployment.ibm-cloud-managed.yaml | 1 + .../0000_50_olm_08-lifecycle-controller.deployment.yaml | 1 + pkg/lifecycle-controller/controller.go | 5 ++++- scripts/generate_crds_manifests.sh | 1 + 6 files changed, 9 insertions(+), 1 deletion(-) diff --git a/manifests/0000_50_olm_08-lifecycle-controller.deployment.ibm-cloud-managed.yaml b/manifests/0000_50_olm_08-lifecycle-controller.deployment.ibm-cloud-managed.yaml index 098eac94ae..0f3d1fdc50 100644 --- a/manifests/0000_50_olm_08-lifecycle-controller.deployment.ibm-cloud-managed.yaml +++ b/manifests/0000_50_olm_08-lifecycle-controller.deployment.ibm-cloud-managed.yaml @@ -43,6 +43,7 @@ spec: - /bin/lifecycle-controller args: - start + - --catalog-source-field-selector=metadata.namespace=openshift-marketplace,metadata.name=redhat-operators image: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607 imagePullPolicy: IfNotPresent env: diff --git a/manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml b/manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml index 7bf49b23c0..cb7b28e352 100644 --- a/manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml +++ b/manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml @@ -42,6 +42,7 @@ spec: - /bin/lifecycle-controller args: - start + - --catalog-source-field-selector=metadata.namespace=openshift-marketplace,metadata.name=redhat-operators image: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607 imagePullPolicy: IfNotPresent env: diff --git a/microshift-manifests/0000_50_olm_08-lifecycle-controller.deployment.ibm-cloud-managed.yaml b/microshift-manifests/0000_50_olm_08-lifecycle-controller.deployment.ibm-cloud-managed.yaml index 098eac94ae..0f3d1fdc50 100644 --- a/microshift-manifests/0000_50_olm_08-lifecycle-controller.deployment.ibm-cloud-managed.yaml +++ b/microshift-manifests/0000_50_olm_08-lifecycle-controller.deployment.ibm-cloud-managed.yaml @@ -43,6 +43,7 @@ spec: - /bin/lifecycle-controller args: - start + - --catalog-source-field-selector=metadata.namespace=openshift-marketplace,metadata.name=redhat-operators image: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607 imagePullPolicy: IfNotPresent env: diff --git a/microshift-manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml b/microshift-manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml index 7bf49b23c0..cb7b28e352 100644 --- a/microshift-manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml +++ b/microshift-manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml @@ -42,6 +42,7 @@ spec: - /bin/lifecycle-controller args: - start + - --catalog-source-field-selector=metadata.namespace=openshift-marketplace,metadata.name=redhat-operators image: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607 imagePullPolicy: IfNotPresent env: diff --git a/pkg/lifecycle-controller/controller.go b/pkg/lifecycle-controller/controller.go index 6ec9b5997a..27f861e145 100644 --- a/pkg/lifecycle-controller/controller.go +++ b/pkg/lifecycle-controller/controller.go @@ -18,6 +18,7 @@ package controllers import ( "context" + "crypto/tls" "fmt" "sort" "strings" @@ -619,7 +620,9 @@ func (r *LifecycleControllerReconciler) buildLifecycleServerArgs(fbcPath string) if r.TLSConfigProvider != nil { if cfg := r.TLSConfigProvider.Get(); cfg != nil { args = append(args, fmt.Sprintf("--tls-min-version=%s", crypto.TLSVersionToNameOrDie(cfg.MinVersion))) - args = append(args, fmt.Sprintf("--tls-cipher-suites=%s", strings.Join(crypto.CipherSuitesToNamesOrDie(cfg.CipherSuites), ","))) + if cfg.MinVersion <= tls.VersionTLS12 { + args = append(args, fmt.Sprintf("--tls-cipher-suites=%s", strings.Join(crypto.CipherSuitesToNamesOrDie(cfg.CipherSuites), ","))) + } } } diff --git a/scripts/generate_crds_manifests.sh b/scripts/generate_crds_manifests.sh index 5e1c25ed4f..a0a5518a28 100755 --- a/scripts/generate_crds_manifests.sh +++ b/scripts/generate_crds_manifests.sh @@ -584,6 +584,7 @@ spec: - /bin/lifecycle-controller args: - start + - --catalog-source-field-selector=metadata.namespace=openshift-marketplace,metadata.name=redhat-operators image: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607 imagePullPolicy: IfNotPresent env: