diff --git a/Makefile b/Makefile index d9739044e8..66af13bff8 100644 --- a/Makefile +++ b/Makefile @@ -31,6 +31,8 @@ COLLECT_PROFILES_CMD := $(addprefix bin/, collect-profiles) OPM := $(addprefix bin/, opm) OLM_CMDS := $(shell go list -mod=vendor $(OLM_PKG)/cmd/...) PSM_CMD := $(addprefix bin/, psm) +LIFECYCLE_CONTROLLER_CMD := $(addprefix bin/, lifecycle-controller) +LIFECYCLE_SERVER_CMD := $(addprefix bin/, lifecycle-server) REGISTRY_CMDS := $(addprefix bin/, $(shell ls staging/operator-registry/cmd | grep -v opm)) # Default image tag for build/olm-container and build/registry-container @@ -77,7 +79,7 @@ build/registry: $(MAKE) $(REGISTRY_CMDS) $(OPM) build/olm: - $(MAKE) $(PSM_CMD) $(OLM_CMDS) $(COLLECT_PROFILES_CMD) bin/copy-content + $(MAKE) $(PSM_CMD) $(OLM_CMDS) $(COLLECT_PROFILES_CMD) bin/copy-content $(LIFECYCLE_CONTROLLER_CMD) $(LIFECYCLE_SERVER_CMD) $(OPM): version_flags=-ldflags "-X '$(REGISTRY_PKG)/cmd/opm/version.gitCommit=$(GIT_COMMIT)' -X '$(REGISTRY_PKG)/cmd/opm/version.opmVersion=$(OPM_VERSION)' -X '$(REGISTRY_PKG)/cmd/opm/version.buildDate=$(BUILD_DATE)'" $(OPM): @@ -97,6 +99,12 @@ $(PSM_CMD): FORCE $(COLLECT_PROFILES_CMD): FORCE go build $(GO_BUILD_OPTS) $(GO_BUILD_TAGS) -o $(COLLECT_PROFILES_CMD) $(ROOT_PKG)/cmd/collect-profiles +$(LIFECYCLE_CONTROLLER_CMD): FORCE + go build $(GO_BUILD_OPTS) $(GO_BUILD_TAGS) -o $(LIFECYCLE_CONTROLLER_CMD) $(ROOT_PKG)/cmd/lifecycle-controller + +$(LIFECYCLE_SERVER_CMD): FORCE + go build $(GO_BUILD_OPTS) $(GO_BUILD_TAGS) -o $(LIFECYCLE_SERVER_CMD) $(ROOT_PKG)/cmd/lifecycle-server + .PHONY: cross cross: version_flags=-X '$(REGISTRY_PKG)/cmd/opm/version.gitCommit=$(GIT_COMMIT)' -X '$(REGISTRY_PKG)/cmd/opm/version.opmVersion=$(OPM_VERSION)' -X '$(REGISTRY_PKG)/cmd/opm/version.buildDate=$(BUILD_DATE)' cross: diff --git a/cmd/lifecycle-controller/main.go b/cmd/lifecycle-controller/main.go new file mode 100644 index 0000000000..04c5ce578a --- /dev/null +++ b/cmd/lifecycle-controller/main.go @@ -0,0 +1,23 @@ +package main + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" + _ "k8s.io/client-go/plugin/pkg/client/auth" +) + +func main() { + rootCmd := &cobra.Command{ + Use: "lifecycle-controller", + Short: "Lifecycle Metadata Controller for OLM", + } + + rootCmd.AddCommand(newStartCmd()) + + if err := rootCmd.Execute(); err != nil { + fmt.Fprintf(os.Stderr, "error running lifecycle-controller: %v\n", err) + os.Exit(1) + } +} diff --git a/cmd/lifecycle-controller/start.go b/cmd/lifecycle-controller/start.go new file mode 100644 index 0000000000..4aefd69ff8 --- /dev/null +++ b/cmd/lifecycle-controller/start.go @@ -0,0 +1,232 @@ +package main + +import ( + "context" + "crypto/tls" + "fmt" + "net/http" + "os" + "time" + + configv1 "github.com/openshift/api/config/v1" + operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" + "github.com/spf13/cobra" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/klog/v2" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + metricsfilters "sigs.k8s.io/controller-runtime/pkg/metrics/filters" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + "sigs.k8s.io/controller-runtime/pkg/source" + + controllers "github.com/openshift/operator-framework-olm/pkg/lifecycle-controller" +) + +const ( + defaultMetricsAddr = ":8443" + defaultHealthCheckAddr = ":8081" + leaderElectionID = "lifecycle-controller-lock" + + // Leader election defaults per OpenShift conventions + // https://github.com/openshift/enhancements/blob/master/CONVENTIONS.md#high-availability + defaultLeaseDuration = 137 * time.Second + defaultRenewDeadline = 107 * time.Second + defaultRetryPeriod = 26 * time.Second +) + +var ( + disableLeaderElection bool + healthCheckAddr string + metricsAddr string + catalogSourceLabelSelector string + catalogSourceFieldSelector string +) + +func newStartCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "start", + Short: "Start the Lifecycle Controller", + SilenceUsage: true, + RunE: run, + } + + cmd.Flags().StringVar(&healthCheckAddr, "health", defaultHealthCheckAddr, "health check address") + cmd.Flags().StringVar(&metricsAddr, "metrics", defaultMetricsAddr, "metrics address") + cmd.Flags().BoolVar(&disableLeaderElection, "disable-leader-election", false, "disable leader election") + cmd.Flags().StringVar(&catalogSourceLabelSelector, "catalog-source-label-selector", "", "label selector for catalog sources to manage (empty means all)") + cmd.Flags().StringVar(&catalogSourceFieldSelector, "catalog-source-field-selector", "", "field selector for catalog sources to manage (empty means all)") + + return cmd +} + +func run(_ *cobra.Command, _ []string) error { + serverImage := os.Getenv("LIFECYCLE_SERVER_IMAGE") + if serverImage == "" { + return fmt.Errorf("LIFECYCLE_SERVER_IMAGE environment variable is required") + } + + namespace := os.Getenv("NAMESPACE") + if !disableLeaderElection && namespace == "" { + return fmt.Errorf("NAMESPACE environment variable is required when leader election is enabled") + } + + ctrl.SetLogger(klog.NewKlogr()) + setupLog := ctrl.Log.WithName("setup") + + version := os.Getenv("RELEASE_VERSION") + if version == "" { + version = "unknown" + } + setupLog.Info("starting lifecycle-controller", "version", version) + + // Parse the catalog source label selector + labelSelector, err := labels.Parse(catalogSourceLabelSelector) + if err != nil { + setupLog.Error(err, "failed to parse catalog-source-label-selector", "selector", catalogSourceLabelSelector) + return fmt.Errorf("invalid catalog-source-label-selector %q: %w", catalogSourceLabelSelector, err) + } + setupLog.Info("using catalog source label selector", "selector", labelSelector.String()) + + // Parse the catalog source field selector + fieldSelector, err := fields.ParseSelector(catalogSourceFieldSelector) + if err != nil { + setupLog.Error(err, "failed to parse catalog-source-field-selector", "selector", catalogSourceFieldSelector) + return fmt.Errorf("invalid catalog-source-field-selector %q: %w", catalogSourceFieldSelector, err) + } + setupLog.Info("using catalog source field selector", "selector", fieldSelector.String()) + + restConfig := ctrl.GetConfigOrDie() + scheme := setupScheme() + + // Create a temporary client to read initial TLS configuration + tempClient, err := client.New(restConfig, client.Options{Scheme: scheme}) + if err != nil { + setupLog.Error(err, "failed to create temporary client for TLS config") + return err + } + + // Get initial TLS configuration from APIServer "cluster" + ctx := context.Background() + initialTLSConfig := controllers.GetClusterTLSConfig(ctx, tempClient, setupLog) + + // Create a TLS config provider for dynamic updates + tlsProvider := controllers.NewTLSConfigProvider(initialTLSConfig) + + // Leader election timing defaults + leaseDuration := defaultLeaseDuration + renewDeadline := defaultRenewDeadline + retryPeriod := defaultRetryPeriod + + mgr, err := ctrl.NewManager(restConfig, manager.Options{ + Scheme: scheme, + Metrics: metricsserver.Options{ + BindAddress: metricsAddr, + SecureServing: true, + FilterProvider: metricsfilters.WithAuthenticationAndAuthorization, + TLSOpts: []func(*tls.Config){ + func(cfg *tls.Config) { + // Use GetConfigForClient for dynamic TLS configuration + cfg.GetConfigForClient = func(info *tls.ClientHelloInfo) (*tls.Config, error) { + cfg := tlsProvider.Get() + return cfg.Clone(), nil + } + }, + }, + }, + LeaderElection: !disableLeaderElection, + LeaderElectionNamespace: namespace, + LeaderElectionID: leaderElectionID, + LeaseDuration: &leaseDuration, + RenewDeadline: &renewDeadline, + RetryPeriod: &retryPeriod, + HealthProbeBindAddress: healthCheckAddr, + LeaderElectionReleaseOnCancel: true, + Cache: cache.Options{ + ByObject: map[client.Object]cache.ByObject{ + &operatorsv1alpha1.CatalogSource{}: {}, + &corev1.Pod{}: { + Label: catalogPodLabelSelector(), + }, + &appsv1.Deployment{}: { + Label: controllers.LifecycleServerLabelSelector(), + }, + &configv1.APIServer{}: {}, + }, + }, + }) + if err != nil { + setupLog.Error(err, "failed to setup manager instance") + return err + } + + // Create channel for TLS config change notifications + // The apiServerWatcher sends events to this channel after updating the TLS provider + tlsChangeChan := make(chan event.GenericEvent) + tlsChangeSource := source.Channel(tlsChangeChan, &handler.EnqueueRequestForObject{}) + + tlsProfileLog := ctrl.Log.WithName("controllers").WithName("tlsprofile-controller") + tlsProfileReconciler := controllers.ClusterTLSProfileReconciler{ + Client: mgr.GetClient(), + Log: tlsProfileLog, + TLSProvider: tlsProvider, + OnChange: func(prev, cur *tls.Config) { + // Trigger reconciliation of all CatalogSources to update lifecycle-server deployments + var catalogSources operatorsv1alpha1.CatalogSourceList + if err := mgr.GetClient().List(ctx, &catalogSources); err != nil { + tlsProfileLog.Error(err, "failed to list CatalogSources to requeue for TLS reconfiguration; CatalogSources will not receive new TLS configuration until their next reconciliation") + return + } + + tlsProfileLog.Info("requeueing CatalogSources TLS reconfiguration", "count", len(catalogSources.Items)) + + // Send events to trigger reconciliation + for i := range catalogSources.Items { + cs := &catalogSources.Items[i] + tlsChangeChan <- event.GenericEvent{Object: cs} + } + }, + } + // Set up TLSProfileReconciler to reconcile TLS profile changes. + if err := tlsProfileReconciler.SetupWithManager(mgr); err != nil { + setupLog.Error(err, "failed to setup TLSProfile watcher") + return err + } + + reconciler := &controllers.LifecycleControllerReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("lifecycle-controller"), + Scheme: mgr.GetScheme(), + ServerImage: serverImage, + CatalogSourceLabelSelector: labelSelector, + CatalogSourceFieldSelector: fieldSelector, + TLSConfigProvider: tlsProvider, + } + + if err := reconciler.SetupWithManager(mgr, tlsChangeSource); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "lifecycle-controller") + return err + } + + // Add health check endpoint (used for both liveness and readiness probes) + if err := mgr.AddHealthzCheck("healthz", func(req *http.Request) error { + return nil + }); err != nil { + setupLog.Error(err, "unable to set up health check") + return err + } + + setupLog.Info("starting manager") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + setupLog.Error(err, "problem running manager") + return err + } + + return nil +} diff --git a/cmd/lifecycle-controller/util.go b/cmd/lifecycle-controller/util.go new file mode 100644 index 0000000000..14f0bf76da --- /dev/null +++ b/cmd/lifecycle-controller/util.go @@ -0,0 +1,34 @@ +package main + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/selection" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + + configv1 "github.com/openshift/api/config/v1" + operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" +) + +func setupScheme() *runtime.Scheme { + scheme := runtime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(operatorsv1alpha1.AddToScheme(scheme)) + utilruntime.Must(configv1.AddToScheme(scheme)) + + return scheme +} + +// catalogPodLabelSelector returns a label selector matching pods with olm.catalogSource label +func catalogPodLabelSelector() labels.Selector { + // This call cannot fail: the label key is valid and selection.Exists requires no values. + req, err := labels.NewRequirement("olm.catalogSource", selection.Exists, nil) + if err != nil { + // Panic on impossible error to satisfy static analysis and catch programming errors + panic(fmt.Sprintf("BUG: failed to create label requirement: %v", err)) + } + return labels.NewSelector().Add(*req) +} diff --git a/cmd/lifecycle-server/main.go b/cmd/lifecycle-server/main.go new file mode 100644 index 0000000000..56584328e9 --- /dev/null +++ b/cmd/lifecycle-server/main.go @@ -0,0 +1,22 @@ +package main + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" +) + +func main() { + rootCmd := &cobra.Command{ + Use: "lifecycle-server", + Short: "Lifecycle Metadata Server for OLM", + } + + rootCmd.AddCommand(newStartCmd()) + + if err := rootCmd.Execute(); err != nil { + fmt.Fprintf(os.Stderr, "error running lifecycle-server: %v\n", err) + os.Exit(1) + } +} diff --git a/cmd/lifecycle-server/start.go b/cmd/lifecycle-server/start.go new file mode 100644 index 0000000000..69b3f22ce2 --- /dev/null +++ b/cmd/lifecycle-server/start.go @@ -0,0 +1,218 @@ +package main + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "net/http" + "time" + + "github.com/openshift/library-go/pkg/crypto" + "github.com/spf13/cobra" + "golang.org/x/sync/errgroup" + "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/metrics/filters" + + "k8s.io/klog/v2" + + server "github.com/openshift/operator-framework-olm/pkg/lifecycle-server" +) + +const ( + defaultFBCPath = "/catalog/configs" + defaultListenAddr = ":8443" + defaultHealthAddr = ":8081" + defaultTLSCertPath = "/var/run/secrets/serving-cert/tls.crt" + defaultTLSKeyPath = "/var/run/secrets/serving-cert/tls.key" + shutdownTimeout = 10 * time.Second +) + +var ( + fbcPath string + listenAddr string + healthAddr string + tlsCertPath string + tlsKeyPath string + tlsMinVersionStr string + tlsCipherSuiteStrs []string +) + +func newStartCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "start", + Short: "Start the Lifecycle Server", + SilenceUsage: true, + RunE: run, + } + + cmd.Flags().StringVar(&fbcPath, "fbc-path", defaultFBCPath, "path to FBC catalog data") + cmd.Flags().StringVar(&listenAddr, "listen", defaultListenAddr, "address to listen on for HTTPS API") + cmd.Flags().StringVar(&healthAddr, "health", defaultHealthAddr, "address to listen on for health checks") + cmd.Flags().StringVar(&tlsCertPath, "tls-cert", defaultTLSCertPath, "path to TLS certificate") + cmd.Flags().StringVar(&tlsKeyPath, "tls-key", defaultTLSKeyPath, "path to TLS private key") + cmd.Flags().StringVar(&tlsMinVersionStr, "tls-min-version", "", "minimum TLS version") + cmd.Flags().StringSliceVar(&tlsCipherSuiteStrs, "tls-cipher-suites", nil, "comma-separated list of cipher suites") + + return cmd +} + +func parseTLSFlags(minVersionStr string, cipherSuiteStrs []string) (*tls.Config, error) { + cert, err := tls.LoadX509KeyPair(tlsCertPath, tlsKeyPath) + if err != nil { + return nil, fmt.Errorf("failed to load TLS certificate: %w", err) + } + + minVersion, err := crypto.TLSVersion(minVersionStr) + if err != nil { + return nil, fmt.Errorf("invalid TLS minimum version: %s", minVersionStr) + } + + var ( + cipherSuites []uint16 + cipherSuiteErrs []error + ) + for _, tlsCipherSuiteStr := range cipherSuiteStrs { + tlsCipherSuite, err := crypto.CipherSuite(tlsCipherSuiteStr) + if err != nil { + cipherSuiteErrs = append(cipherSuiteErrs, err) + } else { + cipherSuites = append(cipherSuites, tlsCipherSuite) + } + } + if len(cipherSuiteErrs) != 0 { + return nil, fmt.Errorf("invalid TLS cipher suites: %v", errors.Join(cipherSuiteErrs...)) + } + + return &tls.Config{ + Certificates: []tls.Certificate{cert}, + MinVersion: minVersion, + CipherSuites: cipherSuites, + }, nil +} + +func run(_ *cobra.Command, _ []string) error { + log := klog.NewKlogr() + log.Info("starting lifecycle-server") + + tlsConfig, err := parseTLSFlags(tlsMinVersionStr, tlsCipherSuiteStrs) + if err != nil { + return fmt.Errorf("failed to parse tls flags: %w", err) + } + + // Create Kubernetes client for authn/authz + restCfg := ctrl.GetConfigOrDie() + httpClient, err := rest.HTTPClientFor(restCfg) + if err != nil { + log.Error(err, "failed to create http client") + return err + } + + authnzFilter, err := filters.WithAuthenticationAndAuthorization(restCfg, httpClient) + if err != nil { + log.Error(err, "failed to create authorization filter") + return err + } + + // Load lifecycle data from FBC + log.Info("loading lifecycle data from FBC", "path", fbcPath) + data, err := server.LoadLifecycleData(fbcPath) + if err != nil { + log.Error(err, "failed to load lifecycle data, starting with empty data") + data = make(server.LifecycleIndex) + } + log.Info("loaded lifecycle data", + "packageCount", data.CountPackages(), + "blobCount", data.CountBlobs(), + "versions", data.ListVersions(), + ) + + // Create HTTP apiHandler with authn/authz middleware + baseHandler := server.NewHandler(data, log) + apiHandler, err := authnzFilter(log, baseHandler) + if err != nil { + log.Error(err, "failed to create api handler") + return err + } + + // Create health apiHandler (no auth required) + healthHandler := http.NewServeMux() + healthHandler.HandleFunc("GET /healthz", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte("ok")) + }) + + // Create servers + apiServer := cancelableServer{ + Server: &http.Server{ + Addr: listenAddr, + Handler: apiHandler, + TLSConfig: tlsConfig, + }, + ShutdownTimeout: shutdownTimeout, + } + healthServer := cancelableServer{ + Server: &http.Server{ + Addr: healthAddr, + Handler: healthHandler, + }, + ShutdownTimeout: shutdownTimeout, + } + + eg, ctx := errgroup.WithContext(ctrl.SetupSignalHandler()) + eg.Go(func() error { + if err := apiServer.ListenAndServeTLS(ctx, "", ""); err != nil { + return fmt.Errorf("api server error: %w", err) + } + return nil + }) + eg.Go(func() error { + if err := healthServer.ListenAndServe(ctx); err != nil { + return fmt.Errorf("health server error: %w", err) + } + return nil + }) + return eg.Wait() +} + +type cancelableServer struct { + *http.Server + ShutdownTimeout time.Duration +} + +func (s *cancelableServer) ListenAndServe(ctx context.Context) error { + return s.listenAndServe(ctx, + func() error { + return s.Server.ListenAndServe() + }, + s.Server.Shutdown, + ) +} +func (s *cancelableServer) ListenAndServeTLS(ctx context.Context, certFile, keyFile string) error { + return s.listenAndServe(ctx, + func() error { + return s.Server.ListenAndServeTLS(certFile, keyFile) + }, + s.Server.Shutdown, + ) +} + +func (s *cancelableServer) listenAndServe(ctx context.Context, runFunc func() error, cancelFunc func(context.Context) error) error { + errChan := make(chan error) + go func() { + errChan <- runFunc() + }() + + select { + case err := <-errChan: + return err + case <-ctx.Done(): + shutdownCtx, cancel := context.WithTimeout(context.Background(), s.ShutdownTimeout) + defer cancel() + if err := cancelFunc(shutdownCtx); err != nil { + return err + } + return nil + } +} diff --git a/cmd/lifecycle-server/util.go b/cmd/lifecycle-server/util.go new file mode 100644 index 0000000000..cd6410f5f2 --- /dev/null +++ b/cmd/lifecycle-server/util.go @@ -0,0 +1,14 @@ +package main + +import ( + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" +) + +func setupScheme() *runtime.Scheme { + scheme := runtime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + + return scheme +} diff --git a/go.mod b/go.mod index 4d0d25fdf5..4eb9661b4a 100644 --- a/go.mod +++ b/go.mod @@ -13,12 +13,14 @@ require ( github.com/mikefarah/yq/v3 v3.0.0-20201202084205-8846255d1c37 github.com/onsi/ginkgo/v2 v2.27.5 github.com/openshift/api v0.0.0-20251111193948-50e2ece149d7 + github.com/openshift/library-go v0.0.0-20260108135436-db8dbd64c462 github.com/operator-framework/api v0.38.0 github.com/operator-framework/operator-lifecycle-manager v0.0.0-00010101000000-000000000000 github.com/operator-framework/operator-registry v1.62.0 github.com/sirupsen/logrus v1.9.4 github.com/spf13/cobra v1.10.2 github.com/stretchr/testify v1.11.1 + golang.org/x/sync v0.19.0 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.6.0 google.golang.org/protobuf v1.36.11 gopkg.in/yaml.v2 v2.4.0 @@ -26,6 +28,7 @@ require ( k8s.io/apimachinery v0.34.3 k8s.io/client-go v0.34.3 k8s.io/code-generator v0.34.3 + k8s.io/klog/v2 v2.130.1 k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 sigs.k8s.io/controller-runtime v0.22.4 @@ -151,7 +154,6 @@ require ( github.com/opencontainers/image-spec v1.1.1 // indirect github.com/opencontainers/runtime-spec v1.2.1 // indirect github.com/openshift/client-go v0.0.0-20251015124057-db0dee36e235 // indirect - github.com/openshift/library-go v0.0.0-20260108135436-db8dbd64c462 // indirect github.com/otiai10/copy v1.14.1 // indirect github.com/otiai10/mint v1.6.3 // indirect github.com/pkg/errors v0.9.1 // indirect @@ -204,7 +206,6 @@ require ( golang.org/x/mod v0.32.0 // indirect golang.org/x/net v0.49.0 // indirect golang.org/x/oauth2 v0.34.0 // indirect - golang.org/x/sync v0.19.0 // indirect golang.org/x/sys v0.40.0 // indirect golang.org/x/term v0.39.0 // indirect golang.org/x/text v0.33.0 // indirect @@ -227,7 +228,6 @@ require ( k8s.io/component-base v0.34.3 // indirect k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f // indirect k8s.io/klog v1.0.0 // indirect - k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kms v0.34.3 // indirect k8s.io/kube-aggregator v0.34.3 // indirect k8s.io/kubectl v0.33.2 // indirect diff --git a/manifests/0000_50_olm_00-catalogsources.crd.yaml b/manifests/0000_50_olm_00-catalogsources.crd.yaml index e0bde39811..5882e5b768 100644 --- a/manifests/0000_50_olm_00-catalogsources.crd.yaml +++ b/manifests/0000_50_olm_00-catalogsources.crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" capability.openshift.io/name: "OperatorLifecycleManager" @@ -1094,9 +1094,7 @@ spec: publisher: type: string runAsRoot: - description: |- - RunAsRoot allows admins to indicate that they wish to run the CatalogSource pod in a privileged - pod as root. This should only be enabled when running older catalog images which could not be run as non-root. + description: RunAsRoot allows admins to indicate that they wish to run the CatalogSource pod in a privileged pod as root. This should only be enabled when running older catalog images which could not be run as non-root. type: boolean secrets: description: |- diff --git a/manifests/0000_50_olm_00-clusterserviceversions.crd.yaml b/manifests/0000_50_olm_00-clusterserviceversions.crd.yaml index b2ed5a0f71..f737a45d13 100644 --- a/manifests/0000_50_olm_00-clusterserviceversions.crd.yaml +++ b/manifests/0000_50_olm_00-clusterserviceversions.crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" capability.openshift.io/name: "OperatorLifecycleManager" @@ -4710,6 +4710,7 @@ spec: ip: description: IP address of the host file entry. type: string + default: "" x-kubernetes-list-map-keys: - ip x-kubernetes-list-type: map diff --git a/manifests/0000_50_olm_00-installplans.crd.yaml b/manifests/0000_50_olm_00-installplans.crd.yaml index 08f6701336..6f3936c69e 100644 --- a/manifests/0000_50_olm_00-installplans.crd.yaml +++ b/manifests/0000_50_olm_00-installplans.crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" capability.openshift.io/name: "OperatorLifecycleManager" diff --git a/manifests/0000_50_olm_00-olmconfigs.crd.yaml b/manifests/0000_50_olm_00-olmconfigs.crd.yaml index ec2291246b..90af18a959 100644 --- a/manifests/0000_50_olm_00-olmconfigs.crd.yaml +++ b/manifests/0000_50_olm_00-olmconfigs.crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" capability.openshift.io/name: "OperatorLifecycleManager" diff --git a/manifests/0000_50_olm_00-operatorconditions.crd.yaml b/manifests/0000_50_olm_00-operatorconditions.crd.yaml index 2f5a208669..797a47b740 100644 --- a/manifests/0000_50_olm_00-operatorconditions.crd.yaml +++ b/manifests/0000_50_olm_00-operatorconditions.crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" capability.openshift.io/name: "OperatorLifecycleManager" diff --git a/manifests/0000_50_olm_00-operatorgroups.crd.yaml b/manifests/0000_50_olm_00-operatorgroups.crd.yaml index acf2160ddc..1409d8aa27 100644 --- a/manifests/0000_50_olm_00-operatorgroups.crd.yaml +++ b/manifests/0000_50_olm_00-operatorgroups.crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" capability.openshift.io/name: "OperatorLifecycleManager" diff --git a/manifests/0000_50_olm_00-operators.crd.yaml b/manifests/0000_50_olm_00-operators.crd.yaml index c571a3264a..f1c29977d1 100644 --- a/manifests/0000_50_olm_00-operators.crd.yaml +++ b/manifests/0000_50_olm_00-operators.crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" capability.openshift.io/name: "OperatorLifecycleManager" diff --git a/manifests/0000_50_olm_00-subscriptions.crd.yaml b/manifests/0000_50_olm_00-subscriptions.crd.yaml index 87ce80f720..bb718c89a8 100644 --- a/manifests/0000_50_olm_00-subscriptions.crd.yaml +++ b/manifests/0000_50_olm_00-subscriptions.crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" capability.openshift.io/name: "OperatorLifecycleManager" diff --git a/manifests/0000_50_olm_08-lifecycle-controller.deployment.ibm-cloud-managed.yaml b/manifests/0000_50_olm_08-lifecycle-controller.deployment.ibm-cloud-managed.yaml new file mode 100644 index 0000000000..0f3d1fdc50 --- /dev/null +++ b/manifests/0000_50_olm_08-lifecycle-controller.deployment.ibm-cloud-managed.yaml @@ -0,0 +1,94 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager + labels: + app: olm-lifecycle-controller + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/ibm-cloud-managed: "true" + capability.openshift.io/name: "OperatorLifecycleManager" + include.release.openshift.io/hypershift: "true" +spec: + strategy: + type: Recreate + replicas: 1 + selector: + matchLabels: + app: olm-lifecycle-controller + template: + metadata: + annotations: + target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' + openshift.io/required-scc: restricted-v2 + kubectl.kubernetes.io/default-container: lifecycle-controller + labels: + app: olm-lifecycle-controller + spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: lifecycle-controller + priorityClassName: "system-cluster-critical" + containers: + - name: lifecycle-controller + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: ["ALL"] + command: + - /bin/lifecycle-controller + args: + - start + - --catalog-source-field-selector=metadata.namespace=openshift-marketplace,metadata.name=redhat-operators + image: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607 + imagePullPolicy: IfNotPresent + env: + - name: RELEASE_VERSION + value: "0.0.1-snapshot" + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LIFECYCLE_SERVER_IMAGE + value: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607 + - name: GOMEMLIMIT + value: "5MiB" + resources: + requests: + cpu: 10m + memory: 10Mi + ports: + - containerPort: 8081 + name: health + livenessProbe: + httpGet: + path: /healthz + port: health + scheme: HTTP + initialDelaySeconds: 30 + readinessProbe: + httpGet: + path: /healthz + port: health + scheme: HTTP + initialDelaySeconds: 30 + terminationMessagePolicy: FallbackToLogsOnError + nodeSelector: + kubernetes.io/os: linux + node-role.kubernetes.io/control-plane: "" + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 120 + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 120 diff --git a/manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml b/manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml new file mode 100644 index 0000000000..cb7b28e352 --- /dev/null +++ b/manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml @@ -0,0 +1,93 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager + labels: + app: olm-lifecycle-controller + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/self-managed-high-availability: "true" + capability.openshift.io/name: "OperatorLifecycleManager" +spec: + strategy: + type: Recreate + replicas: 1 + selector: + matchLabels: + app: olm-lifecycle-controller + template: + metadata: + annotations: + target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' + openshift.io/required-scc: restricted-v2 + kubectl.kubernetes.io/default-container: lifecycle-controller + labels: + app: olm-lifecycle-controller + spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: lifecycle-controller + priorityClassName: "system-cluster-critical" + containers: + - name: lifecycle-controller + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: ["ALL"] + command: + - /bin/lifecycle-controller + args: + - start + - --catalog-source-field-selector=metadata.namespace=openshift-marketplace,metadata.name=redhat-operators + image: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607 + imagePullPolicy: IfNotPresent + env: + - name: RELEASE_VERSION + value: "0.0.1-snapshot" + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LIFECYCLE_SERVER_IMAGE + value: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607 + - name: GOMEMLIMIT + value: "5MiB" + resources: + requests: + cpu: 10m + memory: 10Mi + ports: + - containerPort: 8081 + name: health + livenessProbe: + httpGet: + path: /healthz + port: health + scheme: HTTP + initialDelaySeconds: 30 + readinessProbe: + httpGet: + path: /healthz + port: health + scheme: HTTP + initialDelaySeconds: 30 + terminationMessagePolicy: FallbackToLogsOnError + nodeSelector: + kubernetes.io/os: linux + node-role.kubernetes.io/control-plane: "" + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 120 + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 120 diff --git a/manifests/0000_50_olm_08-lifecycle-controller.networkpolicy.yaml b/manifests/0000_50_olm_08-lifecycle-controller.networkpolicy.yaml new file mode 100644 index 0000000000..c08803d707 --- /dev/null +++ b/manifests/0000_50_olm_08-lifecycle-controller.networkpolicy.yaml @@ -0,0 +1,35 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + capability.openshift.io/name: "OperatorLifecycleManager" + include.release.openshift.io/hypershift: "true" +spec: + podSelector: + matchLabels: + app: olm-lifecycle-controller + ingress: + - ports: + - port: 8443 + protocol: TCP + egress: + - ports: + - port: 6443 + protocol: TCP + - ports: + - port: 53 + protocol: TCP + - port: 53 + protocol: UDP + - port: 5353 + protocol: TCP + - port: 5353 + protocol: UDP + policyTypes: + - Ingress + - Egress diff --git a/manifests/0000_50_olm_08-lifecycle-controller.rbac.yaml b/manifests/0000_50_olm_08-lifecycle-controller.rbac.yaml new file mode 100644 index 0000000000..24da8ffaf7 --- /dev/null +++ b/manifests/0000_50_olm_08-lifecycle-controller.rbac.yaml @@ -0,0 +1,91 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + capability.openshift.io/name: "OperatorLifecycleManager" + include.release.openshift.io/hypershift: "true" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: operator-lifecycle-manager-lifecycle-controller + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + capability.openshift.io/name: "OperatorLifecycleManager" + include.release.openshift.io/hypershift: "true" +rules: + # Read APIServer for TLS security profile configuration + - apiGroups: ["config.openshift.io"] + resources: ["apiservers"] + verbs: ["get", "list", "watch"] + # Watch CatalogSources cluster-wide + - apiGroups: ["operators.coreos.com"] + resources: ["catalogsources"] + verbs: ["get", "list", "watch"] + # Watch catalog pods cluster-wide + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] + # Manage lifecycle-server deployments + - apiGroups: ["apps"] + resources: ["deployments"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Manage lifecycle-server services + - apiGroups: [""] + resources: ["services"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Manage lifecycle-server serviceaccounts + - apiGroups: [""] + resources: ["serviceaccounts"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Manage lifecycle-server networkpolicies + - apiGroups: ["networking.k8s.io"] + resources: ["networkpolicies"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Manage lifecycle-server clusterrolebindings + - apiGroups: ["rbac.authorization.k8s.io"] + resources: ["clusterrolebindings"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Required to grant these permissions to lifecycle-server via CRB + - apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["create"] + - apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["create"] + # Leader election + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: operator-lifecycle-manager-lifecycle-controller + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + capability.openshift.io/name: "OperatorLifecycleManager" + include.release.openshift.io/hypershift: "true" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-lifecycle-manager-lifecycle-controller +subjects: + - kind: ServiceAccount + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager diff --git a/manifests/0000_50_olm_09-lifecycle-server.rbac.yaml b/manifests/0000_50_olm_09-lifecycle-server.rbac.yaml new file mode 100644 index 0000000000..d848837106 --- /dev/null +++ b/manifests/0000_50_olm_09-lifecycle-server.rbac.yaml @@ -0,0 +1,18 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: operator-lifecycle-manager-lifecycle-server + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + capability.openshift.io/name: "OperatorLifecycleManager" + include.release.openshift.io/hypershift: "true" +rules: + # Required by kube-rbac-proxy for authn/authz + - apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["create"] + - apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["create"] diff --git a/microshift-manifests/0000_50_olm_00-catalogsources.crd.yaml b/microshift-manifests/0000_50_olm_00-catalogsources.crd.yaml index e0bde39811..5882e5b768 100644 --- a/microshift-manifests/0000_50_olm_00-catalogsources.crd.yaml +++ b/microshift-manifests/0000_50_olm_00-catalogsources.crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" capability.openshift.io/name: "OperatorLifecycleManager" @@ -1094,9 +1094,7 @@ spec: publisher: type: string runAsRoot: - description: |- - RunAsRoot allows admins to indicate that they wish to run the CatalogSource pod in a privileged - pod as root. This should only be enabled when running older catalog images which could not be run as non-root. + description: RunAsRoot allows admins to indicate that they wish to run the CatalogSource pod in a privileged pod as root. This should only be enabled when running older catalog images which could not be run as non-root. type: boolean secrets: description: |- diff --git a/microshift-manifests/0000_50_olm_00-clusterserviceversions.crd.yaml b/microshift-manifests/0000_50_olm_00-clusterserviceversions.crd.yaml index b2ed5a0f71..f737a45d13 100644 --- a/microshift-manifests/0000_50_olm_00-clusterserviceversions.crd.yaml +++ b/microshift-manifests/0000_50_olm_00-clusterserviceversions.crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" capability.openshift.io/name: "OperatorLifecycleManager" @@ -4710,6 +4710,7 @@ spec: ip: description: IP address of the host file entry. type: string + default: "" x-kubernetes-list-map-keys: - ip x-kubernetes-list-type: map diff --git a/microshift-manifests/0000_50_olm_00-installplans.crd.yaml b/microshift-manifests/0000_50_olm_00-installplans.crd.yaml index 08f6701336..6f3936c69e 100644 --- a/microshift-manifests/0000_50_olm_00-installplans.crd.yaml +++ b/microshift-manifests/0000_50_olm_00-installplans.crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" capability.openshift.io/name: "OperatorLifecycleManager" diff --git a/microshift-manifests/0000_50_olm_00-olmconfigs.crd.yaml b/microshift-manifests/0000_50_olm_00-olmconfigs.crd.yaml index ec2291246b..90af18a959 100644 --- a/microshift-manifests/0000_50_olm_00-olmconfigs.crd.yaml +++ b/microshift-manifests/0000_50_olm_00-olmconfigs.crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" capability.openshift.io/name: "OperatorLifecycleManager" diff --git a/microshift-manifests/0000_50_olm_00-operatorconditions.crd.yaml b/microshift-manifests/0000_50_olm_00-operatorconditions.crd.yaml index 2f5a208669..797a47b740 100644 --- a/microshift-manifests/0000_50_olm_00-operatorconditions.crd.yaml +++ b/microshift-manifests/0000_50_olm_00-operatorconditions.crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" capability.openshift.io/name: "OperatorLifecycleManager" diff --git a/microshift-manifests/0000_50_olm_00-operatorgroups.crd.yaml b/microshift-manifests/0000_50_olm_00-operatorgroups.crd.yaml index acf2160ddc..1409d8aa27 100644 --- a/microshift-manifests/0000_50_olm_00-operatorgroups.crd.yaml +++ b/microshift-manifests/0000_50_olm_00-operatorgroups.crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" capability.openshift.io/name: "OperatorLifecycleManager" diff --git a/microshift-manifests/0000_50_olm_00-operators.crd.yaml b/microshift-manifests/0000_50_olm_00-operators.crd.yaml index c571a3264a..f1c29977d1 100644 --- a/microshift-manifests/0000_50_olm_00-operators.crd.yaml +++ b/microshift-manifests/0000_50_olm_00-operators.crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" capability.openshift.io/name: "OperatorLifecycleManager" diff --git a/microshift-manifests/0000_50_olm_00-subscriptions.crd.yaml b/microshift-manifests/0000_50_olm_00-subscriptions.crd.yaml index 87ce80f720..bb718c89a8 100644 --- a/microshift-manifests/0000_50_olm_00-subscriptions.crd.yaml +++ b/microshift-manifests/0000_50_olm_00-subscriptions.crd.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.19.0 + controller-gen.kubebuilder.io/version: v0.20.0 include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" capability.openshift.io/name: "OperatorLifecycleManager" diff --git a/microshift-manifests/0000_50_olm_08-lifecycle-controller.deployment.ibm-cloud-managed.yaml b/microshift-manifests/0000_50_olm_08-lifecycle-controller.deployment.ibm-cloud-managed.yaml new file mode 100644 index 0000000000..0f3d1fdc50 --- /dev/null +++ b/microshift-manifests/0000_50_olm_08-lifecycle-controller.deployment.ibm-cloud-managed.yaml @@ -0,0 +1,94 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager + labels: + app: olm-lifecycle-controller + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/ibm-cloud-managed: "true" + capability.openshift.io/name: "OperatorLifecycleManager" + include.release.openshift.io/hypershift: "true" +spec: + strategy: + type: Recreate + replicas: 1 + selector: + matchLabels: + app: olm-lifecycle-controller + template: + metadata: + annotations: + target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' + openshift.io/required-scc: restricted-v2 + kubectl.kubernetes.io/default-container: lifecycle-controller + labels: + app: olm-lifecycle-controller + spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: lifecycle-controller + priorityClassName: "system-cluster-critical" + containers: + - name: lifecycle-controller + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: ["ALL"] + command: + - /bin/lifecycle-controller + args: + - start + - --catalog-source-field-selector=metadata.namespace=openshift-marketplace,metadata.name=redhat-operators + image: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607 + imagePullPolicy: IfNotPresent + env: + - name: RELEASE_VERSION + value: "0.0.1-snapshot" + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LIFECYCLE_SERVER_IMAGE + value: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607 + - name: GOMEMLIMIT + value: "5MiB" + resources: + requests: + cpu: 10m + memory: 10Mi + ports: + - containerPort: 8081 + name: health + livenessProbe: + httpGet: + path: /healthz + port: health + scheme: HTTP + initialDelaySeconds: 30 + readinessProbe: + httpGet: + path: /healthz + port: health + scheme: HTTP + initialDelaySeconds: 30 + terminationMessagePolicy: FallbackToLogsOnError + nodeSelector: + kubernetes.io/os: linux + node-role.kubernetes.io/control-plane: "" + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 120 + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 120 diff --git a/microshift-manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml b/microshift-manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml new file mode 100644 index 0000000000..cb7b28e352 --- /dev/null +++ b/microshift-manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml @@ -0,0 +1,93 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager + labels: + app: olm-lifecycle-controller + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/self-managed-high-availability: "true" + capability.openshift.io/name: "OperatorLifecycleManager" +spec: + strategy: + type: Recreate + replicas: 1 + selector: + matchLabels: + app: olm-lifecycle-controller + template: + metadata: + annotations: + target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' + openshift.io/required-scc: restricted-v2 + kubectl.kubernetes.io/default-container: lifecycle-controller + labels: + app: olm-lifecycle-controller + spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: lifecycle-controller + priorityClassName: "system-cluster-critical" + containers: + - name: lifecycle-controller + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: ["ALL"] + command: + - /bin/lifecycle-controller + args: + - start + - --catalog-source-field-selector=metadata.namespace=openshift-marketplace,metadata.name=redhat-operators + image: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607 + imagePullPolicy: IfNotPresent + env: + - name: RELEASE_VERSION + value: "0.0.1-snapshot" + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LIFECYCLE_SERVER_IMAGE + value: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607 + - name: GOMEMLIMIT + value: "5MiB" + resources: + requests: + cpu: 10m + memory: 10Mi + ports: + - containerPort: 8081 + name: health + livenessProbe: + httpGet: + path: /healthz + port: health + scheme: HTTP + initialDelaySeconds: 30 + readinessProbe: + httpGet: + path: /healthz + port: health + scheme: HTTP + initialDelaySeconds: 30 + terminationMessagePolicy: FallbackToLogsOnError + nodeSelector: + kubernetes.io/os: linux + node-role.kubernetes.io/control-plane: "" + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 120 + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 120 diff --git a/microshift-manifests/0000_50_olm_08-lifecycle-controller.networkpolicy.yaml b/microshift-manifests/0000_50_olm_08-lifecycle-controller.networkpolicy.yaml new file mode 100644 index 0000000000..c08803d707 --- /dev/null +++ b/microshift-manifests/0000_50_olm_08-lifecycle-controller.networkpolicy.yaml @@ -0,0 +1,35 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + capability.openshift.io/name: "OperatorLifecycleManager" + include.release.openshift.io/hypershift: "true" +spec: + podSelector: + matchLabels: + app: olm-lifecycle-controller + ingress: + - ports: + - port: 8443 + protocol: TCP + egress: + - ports: + - port: 6443 + protocol: TCP + - ports: + - port: 53 + protocol: TCP + - port: 53 + protocol: UDP + - port: 5353 + protocol: TCP + - port: 5353 + protocol: UDP + policyTypes: + - Ingress + - Egress diff --git a/microshift-manifests/0000_50_olm_08-lifecycle-controller.rbac.yaml b/microshift-manifests/0000_50_olm_08-lifecycle-controller.rbac.yaml new file mode 100644 index 0000000000..24da8ffaf7 --- /dev/null +++ b/microshift-manifests/0000_50_olm_08-lifecycle-controller.rbac.yaml @@ -0,0 +1,91 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + capability.openshift.io/name: "OperatorLifecycleManager" + include.release.openshift.io/hypershift: "true" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: operator-lifecycle-manager-lifecycle-controller + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + capability.openshift.io/name: "OperatorLifecycleManager" + include.release.openshift.io/hypershift: "true" +rules: + # Read APIServer for TLS security profile configuration + - apiGroups: ["config.openshift.io"] + resources: ["apiservers"] + verbs: ["get", "list", "watch"] + # Watch CatalogSources cluster-wide + - apiGroups: ["operators.coreos.com"] + resources: ["catalogsources"] + verbs: ["get", "list", "watch"] + # Watch catalog pods cluster-wide + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] + # Manage lifecycle-server deployments + - apiGroups: ["apps"] + resources: ["deployments"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Manage lifecycle-server services + - apiGroups: [""] + resources: ["services"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Manage lifecycle-server serviceaccounts + - apiGroups: [""] + resources: ["serviceaccounts"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Manage lifecycle-server networkpolicies + - apiGroups: ["networking.k8s.io"] + resources: ["networkpolicies"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Manage lifecycle-server clusterrolebindings + - apiGroups: ["rbac.authorization.k8s.io"] + resources: ["clusterrolebindings"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Required to grant these permissions to lifecycle-server via CRB + - apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["create"] + - apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["create"] + # Leader election + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: operator-lifecycle-manager-lifecycle-controller + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + capability.openshift.io/name: "OperatorLifecycleManager" + include.release.openshift.io/hypershift: "true" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-lifecycle-manager-lifecycle-controller +subjects: + - kind: ServiceAccount + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager diff --git a/microshift-manifests/0000_50_olm_09-lifecycle-server.rbac.yaml b/microshift-manifests/0000_50_olm_09-lifecycle-server.rbac.yaml new file mode 100644 index 0000000000..d848837106 --- /dev/null +++ b/microshift-manifests/0000_50_olm_09-lifecycle-server.rbac.yaml @@ -0,0 +1,18 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: operator-lifecycle-manager-lifecycle-server + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + capability.openshift.io/name: "OperatorLifecycleManager" + include.release.openshift.io/hypershift: "true" +rules: + # Required by kube-rbac-proxy for authn/authz + - apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["create"] + - apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["create"] diff --git a/microshift-manifests/kustomization.yaml b/microshift-manifests/kustomization.yaml index 206174302a..1fa6690acd 100644 --- a/microshift-manifests/kustomization.yaml +++ b/microshift-manifests/kustomization.yaml @@ -20,6 +20,10 @@ resources: - 0000_50_olm_03-services.yaml - 0000_50_olm_07-olm-operator.deployment.yaml - 0000_50_olm_08-catalog-operator.deployment.yaml + - 0000_50_olm_08-lifecycle-controller.deployment.yaml + - 0000_50_olm_08-lifecycle-controller.networkpolicy.yaml + - 0000_50_olm_08-lifecycle-controller.rbac.yaml - 0000_50_olm_09-aggregated.clusterrole.yaml + - 0000_50_olm_09-lifecycle-server.rbac.yaml - 0000_50_olm_13-operatorgroup-default.yaml - 0000_50_olm_15-csv-viewer.rbac.yaml diff --git a/operator-lifecycle-manager.Dockerfile b/operator-lifecycle-manager.Dockerfile index f1fe671ea4..839daf5907 100644 --- a/operator-lifecycle-manager.Dockerfile +++ b/operator-lifecycle-manager.Dockerfile @@ -40,6 +40,8 @@ COPY --from=builder /build/bin/cpb /bin/cpb COPY --from=builder /build/bin/psm /bin/psm COPY --from=builder /build/bin/copy-content /bin/copy-content COPY --from=builder /tmp/build/olmv0-tests-ext.gz /usr/bin/olmv0-tests-ext.gz +COPY --from=builder /build/bin/lifecycle-controller /bin/lifecycle-controller +COPY --from=builder /build/bin/lifecycle-server /bin/lifecycle-server # This image doesn't need to run as root user. USER 1001 diff --git a/pkg/lifecycle-controller/controller.go b/pkg/lifecycle-controller/controller.go new file mode 100644 index 0000000000..27f861e145 --- /dev/null +++ b/pkg/lifecycle-controller/controller.go @@ -0,0 +1,733 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "crypto/tls" + "fmt" + "sort" + "strings" + + "github.com/go-logr/logr" + "github.com/openshift/library-go/pkg/crypto" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/ptr" + + operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +const ( + catalogLabelKey = "olm.catalogSource" + catalogNameLabelKey = "olm.lifecycle-server/catalog-name" + fieldManager = "lifecycle-controller" + clusterRoleName = "operator-lifecycle-manager-lifecycle-server" + clusterRoleBindingName = "operator-lifecycle-manager-lifecycle-server" + appLabelKey = "app" + appLabelVal = "olm-lifecycle-server" + resourceBaseName = "lifecycle-server" +) + +// LifecycleControllerReconciler reconciles CatalogSources and manages lifecycle-server resources +type LifecycleControllerReconciler struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + ServerImage string + CatalogSourceLabelSelector labels.Selector + CatalogSourceFieldSelector fields.Selector + TLSConfigProvider *TLSConfigProvider +} + +// matchesCatalogSource checks if a CatalogSource matches both label and field selectors +func (r *LifecycleControllerReconciler) matchesCatalogSource(cs *operatorsv1alpha1.CatalogSource) bool { + if !r.CatalogSourceLabelSelector.Matches(labels.Set(cs.Labels)) { + return false + } + fieldSet := fields.Set{ + "metadata.name": cs.Name, + "metadata.namespace": cs.Namespace, + } + return r.CatalogSourceFieldSelector.Matches(fieldSet) +} + +// Reconcile watches CatalogSources and manages lifecycle-server resources per catalog +func (r *LifecycleControllerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := r.Log.WithValues("catalogSource", req.NamespacedName) + + log.Info("handling reconciliation request") + defer log.Info("finished reconciliation") + + // Get the CatalogSource + var cs operatorsv1alpha1.CatalogSource + if err := r.Get(ctx, req.NamespacedName, &cs); err != nil { + if errors.IsNotFound(err) { + // CatalogSource was deleted, cleanup resources + if err := r.cleanupResources(ctx, log, req.Namespace, req.Name); err != nil { + return ctrl.Result{}, err + } + // Also reconcile the shared CRB to remove this SA + return ctrl.Result{}, r.reconcileClusterRoleBinding(ctx, log) + } + log.Error(err, "failed to get catalog source") + return ctrl.Result{}, err + } + + // Check if CatalogSource matches our selectors + if !r.matchesCatalogSource(&cs) { + // CatalogSource doesn't match, cleanup any existing resources + if err := r.cleanupResources(ctx, log, cs.Namespace, cs.Name); err != nil { + return ctrl.Result{}, err + } + // Also reconcile the shared CRB to remove this SA + return ctrl.Result{}, r.reconcileClusterRoleBinding(ctx, log) + } + + // Get the catalog image ref from running pod + imageRef, nodeName, err := r.getCatalogPodInfo(ctx, &cs) + if err != nil { + log.Error(err, "failed to get catalog pod info") + return ctrl.Result{}, err + } + if imageRef == "" { + log.Info("no valid image ref for catalog source, waiting for pod") + return ctrl.Result{}, nil + } + + // Ensure all resources exist for this CatalogSource + if err := r.ensureResources(ctx, log, &cs, imageRef, nodeName); err != nil { + return ctrl.Result{}, err + } + + // Reconcile the shared ClusterRoleBinding + if err := r.reconcileClusterRoleBinding(ctx, log); err != nil { + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil +} + +// getCatalogPodInfo gets the image digest and node name from the catalog's running pod +func (r *LifecycleControllerReconciler) getCatalogPodInfo(ctx context.Context, cs *operatorsv1alpha1.CatalogSource) (string, string, error) { + var pods corev1.PodList + if err := r.List(ctx, &pods, + client.InNamespace(cs.Namespace), + client.MatchingLabels{catalogLabelKey: cs.Name}, + ); err != nil { + return "", "", err + } + + // Find a running pod with a valid digest + for i := range pods.Items { + p := &pods.Items[i] + if p.Status.Phase != corev1.PodRunning { + continue + } + digest := imageID(p) + if digest != "" { + return digest, p.Spec.NodeName, nil + } + } + + return "", "", nil +} + +// ensureResources creates or updates namespace-scoped resources for a CatalogSource +func (r *LifecycleControllerReconciler) ensureResources(ctx context.Context, log logr.Logger, cs *operatorsv1alpha1.CatalogSource, imageRef, nodeName string) error { + name := resourceName(cs.Name) + + // Apply ServiceAccount (in catalog's namespace) + sa := r.buildServiceAccount(name, cs) + if err := r.Patch(ctx, sa, client.Apply, client.FieldOwner(fieldManager), client.ForceOwnership); err != nil { + log.Error(err, "failed to apply serviceaccount") + return err + } + + // Apply Service (in catalog's namespace) + svc := r.buildService(name, cs) + if err := r.Patch(ctx, svc, client.Apply, client.FieldOwner(fieldManager), client.ForceOwnership); err != nil { + log.Error(err, "failed to apply service") + return err + } + + // Apply Deployment (in catalog's namespace) + deploy := r.buildDeployment(name, cs, imageRef, nodeName) + if err := r.Patch(ctx, deploy, client.Apply, client.FieldOwner(fieldManager), client.ForceOwnership); err != nil { + log.Error(err, "failed to apply deployment") + return err + } + + // Apply NetworkPolicy (in catalog's namespace) + np := r.buildNetworkPolicy(name, cs) + if err := r.Patch(ctx, np, client.Apply, client.FieldOwner(fieldManager), client.ForceOwnership); err != nil { + log.Error(err, "failed to apply networkpolicy") + return err + } + + log.Info("applied resources", "name", name, "namespace", cs.Namespace, "imageRef", imageRef, "nodeName", nodeName) + return nil +} + +// reconcileClusterRoleBinding maintains a single CRB with all lifecycle-server ServiceAccounts +func (r *LifecycleControllerReconciler) reconcileClusterRoleBinding(ctx context.Context, log logr.Logger) error { + // List all matching CatalogSources + var allCatalogSources operatorsv1alpha1.CatalogSourceList + if err := r.List(ctx, &allCatalogSources); err != nil { + log.Error(err, "failed to list catalog sources for CRB reconciliation") + return err + } + + // Build subjects list from matching CatalogSources + var subjects []rbacv1.Subject + for i := range allCatalogSources.Items { + cs := &allCatalogSources.Items[i] + if !r.matchesCatalogSource(cs) { + continue + } + // Check if SA exists (only add if we've created resources for this catalog) + saName := resourceName(cs.Name) + var sa corev1.ServiceAccount + if err := r.Get(ctx, types.NamespacedName{Name: saName, Namespace: cs.Namespace}, &sa); err != nil { + if errors.IsNotFound(err) { + continue // SA doesn't exist yet, skip + } + return err + } + subjects = append(subjects, rbacv1.Subject{ + Kind: "ServiceAccount", + Name: saName, + Namespace: cs.Namespace, + }) + } + + // Sort subjects for deterministic ordering + sort.Slice(subjects, func(i, j int) bool { + if subjects[i].Namespace != subjects[j].Namespace { + return subjects[i].Namespace < subjects[j].Namespace + } + return subjects[i].Name < subjects[j].Name + }) + + // Apply the CRB + crb := &rbacv1.ClusterRoleBinding{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "rbac.authorization.k8s.io/v1", + Kind: "ClusterRoleBinding", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: clusterRoleBindingName, + Labels: map[string]string{ + appLabelKey: appLabelVal, + }, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: clusterRoleName, + }, + Subjects: subjects, + } + + if err := r.Patch(ctx, crb, client.Apply, client.FieldOwner(fieldManager), client.ForceOwnership); err != nil { + log.Error(err, "failed to apply clusterrolebinding") + return err + } + + log.Info("reconciled clusterrolebinding", "subjectCount", len(subjects)) + return nil +} + +// cleanupResources deletes namespace-scoped resources for a CatalogSource +func (r *LifecycleControllerReconciler) cleanupResources(ctx context.Context, log logr.Logger, csNamespace, csName string) error { + name := resourceName(csName) + log = log.WithValues("resourceName", name, "namespace", csNamespace) + + // Delete Deployment + deploy := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: csNamespace, + }, + } + if err := r.Delete(ctx, deploy); err != nil && !errors.IsNotFound(err) { + log.Error(err, "failed to delete deployment") + return err + } + + // Delete Service + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: csNamespace, + }, + } + if err := r.Delete(ctx, svc); err != nil && !errors.IsNotFound(err) { + log.Error(err, "failed to delete service") + return err + } + + // Delete ServiceAccount + sa := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: csNamespace, + }, + } + if err := r.Delete(ctx, sa); err != nil && !errors.IsNotFound(err) { + log.Error(err, "failed to delete serviceaccount") + return err + } + + log.Info("cleaned up resources") + return nil +} + +// resourceName generates a DNS-compatible name for lifecycle-server resources +func resourceName(csName string) string { + name := fmt.Sprintf("%s-%s", csName, resourceBaseName) + name = strings.ReplaceAll(name, ".", "-") + name = strings.ReplaceAll(name, "_", "-") + if len(name) > 63 { + name = name[:63] + } + return strings.ToLower(name) +} + +// buildServiceAccount creates a ServiceAccount for a lifecycle-server +func (r *LifecycleControllerReconciler) buildServiceAccount(name string, cs *operatorsv1alpha1.CatalogSource) *corev1.ServiceAccount { + return &corev1.ServiceAccount{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ServiceAccount", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: cs.Namespace, + Labels: map[string]string{ + appLabelKey: appLabelVal, + catalogNameLabelKey: cs.Name, + }, + }, + } +} + +// buildService creates a Service for a lifecycle-server +func (r *LifecycleControllerReconciler) buildService(name string, cs *operatorsv1alpha1.CatalogSource) *corev1.Service { + return &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: cs.Namespace, + Labels: map[string]string{ + appLabelKey: appLabelVal, + catalogNameLabelKey: cs.Name, + }, + Annotations: map[string]string{ + "service.beta.openshift.io/serving-cert-secret-name": fmt.Sprintf("%s-tls", name), + }, + }, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{ + appLabelKey: appLabelVal, + catalogNameLabelKey: cs.Name, + }, + Ports: []corev1.ServicePort{ + { + Name: "api", + Port: 8443, + TargetPort: intstr.FromString("api"), + Protocol: corev1.ProtocolTCP, + }, + }, + Type: corev1.ServiceTypeClusterIP, + }, + } +} + +// buildDeployment creates a Deployment for a lifecycle-server +func (r *LifecycleControllerReconciler) buildDeployment(name string, cs *operatorsv1alpha1.CatalogSource, imageRef, nodeName string) *appsv1.Deployment { + podLabels := map[string]string{ + appLabelKey: appLabelVal, + catalogNameLabelKey: cs.Name, + } + + // Determine the catalog directory inside the image + catalogDir := "/configs" // default for standard catalog images + if cs.Spec.GrpcPodConfig != nil && cs.Spec.GrpcPodConfig.ExtractContent != nil && cs.Spec.GrpcPodConfig.ExtractContent.CatalogDir != "" { + catalogDir = cs.Spec.GrpcPodConfig.ExtractContent.CatalogDir + } + + const catalogMountPath = "/catalog" + fbcPath := catalogMountPath + catalogDir + + return &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: cs.Namespace, + Labels: podLabels, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To(int32(1)), + Strategy: appsv1.DeploymentStrategy{ + Type: appsv1.RollingUpdateDeploymentStrategyType, + RollingUpdate: &appsv1.RollingUpdateDeployment{ + MaxUnavailable: ptr.To(intstr.FromInt(1)), + MaxSurge: ptr.To(intstr.FromInt(1)), + }, + }, + Selector: &metav1.LabelSelector{ + MatchLabels: podLabels, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: podLabels, + Annotations: map[string]string{ + "target.workload.openshift.io/management": `{"effect": "PreferredDuringScheduling"}`, + "openshift.io/required-scc": "restricted-v2", + "kubectl.kubernetes.io/default-container": "lifecycle-server", + }, + }, + Spec: corev1.PodSpec{ + SecurityContext: &corev1.PodSecurityContext{ + RunAsNonRoot: ptr.To(true), + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + }, + ServiceAccountName: name, + PriorityClassName: "system-cluster-critical", + // Prefer scheduling on the same node as the catalog pod (only if nodeName is known) + Affinity: nodeAffinityForNode(nodeName), + NodeSelector: map[string]string{ + "kubernetes.io/os": "linux", + }, + Tolerations: []corev1.Toleration{ + { + Key: "node-role.kubernetes.io/master", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoSchedule, + }, + { + Key: "node.kubernetes.io/unreachable", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoExecute, + TolerationSeconds: ptr.To(int64(120)), + }, + { + Key: "node.kubernetes.io/not-ready", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoExecute, + TolerationSeconds: ptr.To(int64(120)), + }, + }, + Containers: []corev1.Container{ + { + Name: "lifecycle-server", + Image: r.ServerImage, + ImagePullPolicy: corev1.PullIfNotPresent, + Command: []string{"/bin/lifecycle-server"}, + Args: r.buildLifecycleServerArgs(fbcPath), + Env: []corev1.EnvVar{ + { + Name: "GOMEMLIMIT", + Value: "50MiB", + }, + }, + Ports: []corev1.ContainerPort{ + { + Name: "api", + ContainerPort: 8443, + }, + { + Name: "health", + ContainerPort: 8081, + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "catalog", + MountPath: catalogMountPath, + ReadOnly: true, + }, + { + Name: "serving-cert", + MountPath: "/var/run/secrets/serving-cert", + ReadOnly: true, + }, + }, + LivenessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/healthz", + Port: intstr.FromString("health"), + Scheme: corev1.URISchemeHTTP, + }, + }, + InitialDelaySeconds: 30, + }, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/healthz", + Port: intstr.FromString("health"), + Scheme: corev1.URISchemeHTTP, + }, + }, + InitialDelaySeconds: 30, + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("10m"), + corev1.ResourceMemory: resource.MustParse("50Mi"), + }, + }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: ptr.To(false), + ReadOnlyRootFilesystem: ptr.To(true), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, + TerminationMessagePolicy: corev1.TerminationMessageFallbackToLogsOnError, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "catalog", + VolumeSource: corev1.VolumeSource{ + Image: &corev1.ImageVolumeSource{ + Reference: imageRef, + PullPolicy: corev1.PullIfNotPresent, + }, + }, + }, + { + Name: "serving-cert", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: fmt.Sprintf("%s-tls", name), + }, + }, + }, + }, + }, + }, + }, + } +} + +// buildNetworkPolicy creates a NetworkPolicy for a lifecycle-server +func (r *LifecycleControllerReconciler) buildNetworkPolicy(name string, cs *operatorsv1alpha1.CatalogSource) *networkingv1.NetworkPolicy { + tcp := corev1.ProtocolTCP + udp := corev1.ProtocolUDP + return &networkingv1.NetworkPolicy{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "networking.k8s.io/v1", + Kind: "NetworkPolicy", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: cs.Namespace, + Labels: map[string]string{ + appLabelKey: appLabelVal, + catalogNameLabelKey: cs.Name, + }, + }, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + appLabelKey: appLabelVal, + catalogNameLabelKey: cs.Name, + }, + }, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + {Port: ptr.To(intstr.FromInt32(8443)), Protocol: &tcp}, + }, + }, + }, + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + // API server + Ports: []networkingv1.NetworkPolicyPort{ + {Port: ptr.To(intstr.FromInt32(6443)), Protocol: &tcp}, + }, + }, + { + // DNS + Ports: []networkingv1.NetworkPolicyPort{ + {Port: ptr.To(intstr.FromInt32(53)), Protocol: &tcp}, + {Port: ptr.To(intstr.FromInt32(53)), Protocol: &udp}, + {Port: ptr.To(intstr.FromInt32(5353)), Protocol: &tcp}, + {Port: ptr.To(intstr.FromInt32(5353)), Protocol: &udp}, + }, + }, + }, + PolicyTypes: []networkingv1.PolicyType{ + networkingv1.PolicyTypeIngress, + networkingv1.PolicyTypeEgress, + }, + }, + } +} + +// buildLifecycleServerArgs builds the command-line arguments for lifecycle-server +func (r *LifecycleControllerReconciler) buildLifecycleServerArgs(fbcPath string) []string { + args := []string{ + "start", + fmt.Sprintf("--fbc-path=%s", fbcPath), + } + + if r.TLSConfigProvider != nil { + if cfg := r.TLSConfigProvider.Get(); cfg != nil { + args = append(args, fmt.Sprintf("--tls-min-version=%s", crypto.TLSVersionToNameOrDie(cfg.MinVersion))) + if cfg.MinVersion <= tls.VersionTLS12 { + args = append(args, fmt.Sprintf("--tls-cipher-suites=%s", strings.Join(crypto.CipherSuitesToNamesOrDie(cfg.CipherSuites), ","))) + } + } + } + + return args +} + +// imageID extracts digest from pod status (handles extract-content mode) +func imageID(pod *corev1.Pod) string { + // In extract-content mode, look for the "extract-content" init container + for i := range pod.Status.InitContainerStatuses { + if pod.Status.InitContainerStatuses[i].Name == "extract-content" { + return pod.Status.InitContainerStatuses[i].ImageID + } + } + // Fallback to the first container (standard grpc mode) + if len(pod.Status.ContainerStatuses) > 0 { + return pod.Status.ContainerStatuses[0].ImageID + } + return "" +} + +// nodeAffinityForNode returns a node affinity preferring the given node, or nil if nodeName is empty +func nodeAffinityForNode(nodeName string) *corev1.Affinity { + if nodeName == "" { + return nil + } + return &corev1.Affinity{ + NodeAffinity: &corev1.NodeAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []corev1.PreferredSchedulingTerm{ + { + Weight: 100, + Preference: corev1.NodeSelectorTerm{ + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: "kubernetes.io/hostname", + Operator: corev1.NodeSelectorOpIn, + Values: []string{nodeName}, + }, + }, + }, + }, + }, + }, + } +} + +// LifecycleServerLabelSelector returns a label selector matching lifecycle-server deployments +func LifecycleServerLabelSelector() labels.Selector { + return labels.SelectorFromSet(labels.Set{appLabelKey: appLabelVal}) +} + +// SetupWithManager sets up the controller with the Manager. +// tlsChangeSource is an optional channel source that triggers reconciliation when TLS config changes. +func (r *LifecycleControllerReconciler) SetupWithManager(mgr ctrl.Manager, tlsChangeSource source.Source) error { + builder := ctrl.NewControllerManagedBy(mgr). + For(&operatorsv1alpha1.CatalogSource{}). + // Watch Pods to detect catalog pod changes + Watches(&corev1.Pod{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { + pod, ok := obj.(*corev1.Pod) + if !ok { + return nil + } + // Check if this is a catalog pod + catalogName, ok := pod.Labels[catalogLabelKey] + if !ok { + return nil + } + // Enqueue the CatalogSource for reconciliation + return []reconcile.Request{ + { + NamespacedName: types.NamespacedName{ + Name: catalogName, + Namespace: pod.Namespace, + }, + }, + } + })). + // Watch lifecycle-server Deployments to detect changes/deletion + Watches(&appsv1.Deployment{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { + deploy, ok := obj.(*appsv1.Deployment) + if !ok { + return nil + } + // Only watch our deployments + if deploy.Labels[appLabelKey] != appLabelVal { + return nil + } + csName := deploy.Labels[catalogNameLabelKey] + if csName == "" { + return nil + } + return []reconcile.Request{ + { + NamespacedName: types.NamespacedName{ + Name: csName, + Namespace: deploy.Namespace, + }, + }, + } + })) + + // Add TLS change source if provided + if tlsChangeSource != nil { + builder = builder.WatchesRawSource(tlsChangeSource) + } + + return builder.Complete(r) +} diff --git a/pkg/lifecycle-controller/tls.go b/pkg/lifecycle-controller/tls.go new file mode 100644 index 0000000000..2a77748832 --- /dev/null +++ b/pkg/lifecycle-controller/tls.go @@ -0,0 +1,131 @@ +package controllers + +import ( + "context" + "crypto/tls" + "reflect" + "sync" + + "github.com/go-logr/logr" + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/crypto" + "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/apiserver" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +const ( + // Name of the cluster-scoped APIServer resource + clusterAPIServerName = "cluster" +) + +// TLSConfig holds the TLS configuration extracted from the APIServer resource +type TLSConfig struct { + minVersion uint16 + cipherSuites []uint16 + // String representations for passing to lifecycle-server + minVersionString string + cipherSuiteStrings []string +} + +// TLSConfigProvider provides thread-safe access to dynamically updated TLS configuration. +// It implements controllers.TLSConfigProvider interface. +type TLSConfigProvider struct { + mu sync.RWMutex + config *tls.Config +} + +// NewTLSConfigProvider creates a new TLSConfigProvider with the given initial config. +func NewTLSConfigProvider(initial *tls.Config) *TLSConfigProvider { + return &TLSConfigProvider{config: initial} +} + +// Get returns the current TLS configuration. +func (p *TLSConfigProvider) Get() *tls.Config { + p.mu.RLock() + defer p.mu.RUnlock() + return p.config +} + +// Update sets a new TLS configuration. +func (p *TLSConfigProvider) Update(cfg *tls.Config) { + p.mu.Lock() + defer p.mu.Unlock() + p.config = cfg +} + +// GetClusterTLSConfig reads the APIServer "cluster" resource and extracts TLS settings. +// Falls back to defaults if an error occurs looking up the apiserver config. +func GetClusterTLSConfig(ctx context.Context, cl client.Client, log logr.Logger) *tls.Config { + var ( + apiServer configv1.APIServer + minVersion uint16 + cipherSuites []uint16 + ) + if err := cl.Get(ctx, types.NamespacedName{Name: clusterAPIServerName}, &apiServer); err != nil { + log.Error(err, "failed to lookup APIServer; using default TLS security profile") + minVersion, cipherSuites = apiserver.GetSecurityProfileConfig(nil) + } else { + minVersion, cipherSuites = apiserver.GetSecurityProfileConfig(apiServer.Spec.TLSSecurityProfile) + } + + log.Info("loaded TLS configuration from APIServer", + "minVersion", crypto.TLSVersionToNameOrDie(minVersion), + "cipherSuites", crypto.CipherSuitesToNamesOrDie(cipherSuites), + ) + + return &tls.Config{ + MinVersion: minVersion, + CipherSuites: cipherSuites, + } +} + +// ClusterTLSProfileReconciler watches the APIServer "cluster" resource and updates TLS config dynamically +type ClusterTLSProfileReconciler struct { + Client client.Client + Log logr.Logger + TLSProvider *TLSConfigProvider + OnChange func(prev, cur *tls.Config) +} + +func (r *ClusterTLSProfileReconciler) Reconcile(ctx context.Context, _ reconcile.Request) (reconcile.Result, error) { + // Check if config changed + oldConfig := r.TLSProvider.Get() + newConfig := GetClusterTLSConfig(ctx, r.Client, r.Log) + if reflect.DeepEqual(oldConfig, newConfig) { + // No change + return reconcile.Result{}, nil + } + + r.Log.Info("TLS security profile changed, updating configuration and triggering reconciliation", + "oldMinVersion", crypto.TLSVersionToNameOrDie(oldConfig.MinVersion), + "newMinVersion", crypto.TLSVersionToNameOrDie(newConfig.MinVersion), + "oldCipherSuites", crypto.CipherSuitesToNamesOrDie(oldConfig.CipherSuites), + "newCipherSuites", crypto.CipherSuitesToNamesOrDie(newConfig.CipherSuites), + ) + + // Update the provider and call the OnChange callback + r.TLSProvider.Update(newConfig) + r.OnChange(oldConfig, newConfig) + + return reconcile.Result{}, nil +} + +func (r *ClusterTLSProfileReconciler) SetupWithManager(mgr manager.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + Named("tlsprofile-reconciler"). + WatchesRawSource(source.Kind(mgr.GetCache(), &configv1.APIServer{}, + handler.TypedEnqueueRequestsFromMapFunc(func(ctx context.Context, obj *configv1.APIServer) []reconcile.Request { + if obj.Name == clusterAPIServerName { + return []reconcile.Request{{NamespacedName: types.NamespacedName{Name: clusterAPIServerName}}} + } + return nil + }), + )). + Complete(r) +} diff --git a/pkg/lifecycle-server/fbc.go b/pkg/lifecycle-server/fbc.go new file mode 100644 index 0000000000..2e0bdf0d14 --- /dev/null +++ b/pkg/lifecycle-server/fbc.go @@ -0,0 +1,115 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "context" + "encoding/json" + "os" + "regexp" + "sync" + + "github.com/operator-framework/operator-registry/alpha/declcfg" + "k8s.io/apimachinery/pkg/util/sets" +) + +// versionPattern matches API versions like v1, v1alpha1, v2beta3 +// Matches: v1, v1alpha1, v1beta1, v200beta300 +// Does not match: 1, v0, v1beta0 +const versionPattern = `v[1-9][0-9]*(?:(?:alpha|beta)[1-9][0-9]*)?` + +// schemaVersionRegex matches lifecycle schema versions in FBC blobs +var schemaVersionRegex = regexp.MustCompile(`^io\.openshift\.operators\.lifecycles\.(` + versionPattern + `)$`) + +// LifecycleIndex maps schema version -> package name -> raw JSON blob +type LifecycleIndex map[string]map[string]json.RawMessage + +// LoadLifecycleData loads lifecycle blobs from FBC files at the given path +func LoadLifecycleData(fbcPath string) (LifecycleIndex, error) { + result := make(LifecycleIndex) + var mu sync.Mutex + + // Check if path exists + if _, err := os.Stat(fbcPath); os.IsNotExist(err) { + return result, nil + } + + root := os.DirFS(fbcPath) + err := declcfg.WalkMetasFS(context.Background(), root, func(path string, meta *declcfg.Meta, err error) error { + if err != nil { + return nil // Skip errors, continue walking + } + if meta == nil { + return nil + } + + // Check if schema matches our pattern + matches := schemaVersionRegex.FindStringSubmatch(meta.Schema) + if matches == nil { + return nil + } + schemaVersion := matches[1] // e.g., "v1alpha1" + + if meta.Package == "" { + return nil + } + + // Store in index (thread-safe) + mu.Lock() + if result[schemaVersion] == nil { + result[schemaVersion] = make(map[string]json.RawMessage) + } + result[schemaVersion][meta.Package] = meta.Blob + mu.Unlock() + + return nil + }) + + if err != nil { + return nil, err + } + + return result, nil +} + +// CountBlobs returns the total number of blobs in the index +func (index LifecycleIndex) CountBlobs() int { + count := 0 + for _, packages := range index { + count += len(packages) + } + return count +} + +func (index LifecycleIndex) CountPackages() int { + pkgs := sets.New[string]() + for _, packages := range index { + for pkg := range packages { + pkgs.Insert(pkg) + } + } + return pkgs.Len() +} + +// ListVersions returns the list of versions available in the index +func (index LifecycleIndex) ListVersions() []string { + versions := make([]string, 0, len(index)) + for v := range index { + versions = append(versions, v) + } + return versions +} diff --git a/pkg/lifecycle-server/server.go b/pkg/lifecycle-server/server.go new file mode 100644 index 0000000000..7571ccc017 --- /dev/null +++ b/pkg/lifecycle-server/server.go @@ -0,0 +1,66 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package server + +import ( + "net/http" + + "github.com/go-logr/logr" +) + +// NewHandler creates a new HTTP handler for the lifecycle API +func NewHandler(data LifecycleIndex, log logr.Logger) http.Handler { + mux := http.NewServeMux() + + // GET /api/{version}/lifecycles/{package} + mux.HandleFunc("GET /api/{version}/lifecycles/{package}", func(w http.ResponseWriter, r *http.Request) { + version := r.PathValue("version") + pkg := r.PathValue("package") + + // If no lifecycle data is available, return 503 Service Unavailable + if len(data) == 0 { + log.V(1).Info("no lifecycle data available, returning 503") + http.Error(w, "No lifecycle data available", http.StatusServiceUnavailable) + return + } + + // Look up version in index + versionData, ok := data[version] + if !ok { + log.V(1).Info("version not found", "version", version, "package", pkg) + http.NotFound(w, r) + return + } + + // Look up package in version + rawJSON, ok := versionData[pkg] + if !ok { + log.V(1).Info("package not found", "version", version, "package", pkg) + http.NotFound(w, r) + return + } + + log.V(1).Info("returning lifecycle data", "version", version, "package", pkg) + + w.Header().Set("Content-Type", "application/json") + if _, err := w.Write(rawJSON); err != nil { + log.V(1).Error(err, "failed to write response") + } + }) + + return mux +} diff --git a/scripts/generate_crds_manifests.sh b/scripts/generate_crds_manifests.sh index 743e520639..a0a5518a28 100755 --- a/scripts/generate_crds_manifests.sh +++ b/scripts/generate_crds_manifests.sh @@ -18,13 +18,11 @@ export GOFLAGS="-mod=vendor" source .bingo/variables.env YQ="go run ./vendor/github.com/mikefarah/yq/v3/" -CONTROLLER_GEN="go run ./vendor/sigs.k8s.io/controller-tools/cmd/controller-gen" ver=${OLM_VERSION:-"0.0.0-dev"} tmpdir="$(mktemp -p . -d 2>/dev/null || mktemp -d ./tmpdir.XXXXXXX)" chartdir="${tmpdir}/chart" crddir="${chartdir}/crds" -crdsrcdir="${tmpdir}/operators" SED="sed" if ! command -v ${SED} &> /dev/null; then @@ -44,21 +42,15 @@ fi cp -R "${ROOT_DIR}/staging/operator-lifecycle-manager/deploy/chart/" "${chartdir}" cp "${ROOT_DIR}"/values*.yaml "${tmpdir}" -cp -R "${ROOT_DIR}/staging/api/pkg/operators/" ${crdsrcdir} rm -rf ./manifests/* ${crddir}/* trap "rm -rf ${tmpdir}" EXIT -${CONTROLLER_GEN} crd:crdVersions=v1 output:crd:dir=${crddir} paths=${crdsrcdir}/... -${CONTROLLER_GEN} schemapatch:manifests=${crddir} output:dir=${crddir} paths=${crdsrcdir}/... - -${YQ} w --inplace ${crddir}/operators.coreos.com_clusterserviceversions.yaml spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.install.properties.spec.properties.deployments.items.properties.spec.properties.template.properties.spec.properties.containers.items.properties.ports.items.properties.protocol.default TCP -${YQ} w --inplace ${crddir}/operators.coreos.com_clusterserviceversions.yaml spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.install.properties.spec.properties.deployments.items.properties.spec.properties.template.properties.spec.properties.initContainers.items.properties.ports.items.properties.protocol.default TCP -${YQ} w --inplace ${crddir}/operators.coreos.com_clusterserviceversions.yaml spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.install.properties.spec.properties.deployments.items.properties.spec.properties.template.properties.metadata.x-kubernetes-preserve-unknown-fields true -${YQ} d --inplace ${crddir}/operators.coreos.com_operatorconditions.yaml 'spec.versions[*].schema.openAPIV3Schema.properties.spec.properties.overrides.items.required(.==lastTransitionTime)' +# Copy upstream CRDs directly instead of regenerating with controller-gen +cp "${ROOT_DIR}"/staging/api/crds/*.yaml "${crddir}/" +# Rename CRD files to match OpenShift manifest naming convention for f in ${crddir}/*.yaml ; do - ${YQ} d --inplace $f status mv -v "$f" "${crddir}/0000_50_olm_00-$(basename $f | ${SED} 's/^.*_\([^.]\+\)\.yaml/\1.crd.yaml/')" done @@ -549,6 +541,234 @@ subjects: name: system:authenticated EOF +cat << EOF > manifests/0000_50_olm_08-lifecycle-controller.deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager + labels: + app: olm-lifecycle-controller + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" +spec: + strategy: + type: Recreate + replicas: 1 + selector: + matchLabels: + app: olm-lifecycle-controller + template: + metadata: + annotations: + target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' + openshift.io/required-scc: restricted-v2 + kubectl.kubernetes.io/default-container: lifecycle-controller + labels: + app: olm-lifecycle-controller + spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: lifecycle-controller + priorityClassName: "system-cluster-critical" + containers: + - name: lifecycle-controller + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: ["ALL"] + command: + - /bin/lifecycle-controller + args: + - start + - --catalog-source-field-selector=metadata.namespace=openshift-marketplace,metadata.name=redhat-operators + image: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607 + imagePullPolicy: IfNotPresent + env: + - name: RELEASE_VERSION + value: "0.0.1-snapshot" + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LIFECYCLE_SERVER_IMAGE + value: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607 + - name: GOMEMLIMIT + value: "5MiB" + resources: + requests: + cpu: 10m + memory: 10Mi + ports: + - containerPort: 8081 + name: health + livenessProbe: + httpGet: + path: /healthz + port: health + scheme: HTTP + initialDelaySeconds: 30 + readinessProbe: + httpGet: + path: /healthz + port: health + scheme: HTTP + initialDelaySeconds: 30 + terminationMessagePolicy: FallbackToLogsOnError + nodeSelector: + kubernetes.io/os: linux + node-role.kubernetes.io/control-plane: "" + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 120 + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 120 +EOF + +cat << EOF > manifests/0000_50_olm_08-lifecycle-controller.networkpolicy.yaml +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" +spec: + podSelector: + matchLabels: + app: olm-lifecycle-controller + ingress: + - ports: + - port: 8443 + protocol: TCP + egress: + - ports: + - port: 6443 + protocol: TCP + - ports: + - port: 53 + protocol: TCP + - port: 53 + protocol: UDP + - port: 5353 + protocol: TCP + - port: 5353 + protocol: UDP + policyTypes: + - Ingress + - Egress +EOF + +cat << EOF > manifests/0000_50_olm_08-lifecycle-controller.rbac.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: operator-lifecycle-manager-lifecycle-controller + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" +rules: + # Read APIServer for TLS security profile configuration + - apiGroups: ["config.openshift.io"] + resources: ["apiservers"] + verbs: ["get", "list", "watch"] + # Watch CatalogSources cluster-wide + - apiGroups: ["operators.coreos.com"] + resources: ["catalogsources"] + verbs: ["get", "list", "watch"] + # Watch catalog pods cluster-wide + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] + # Manage lifecycle-server deployments + - apiGroups: ["apps"] + resources: ["deployments"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Manage lifecycle-server services + - apiGroups: [""] + resources: ["services"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Manage lifecycle-server serviceaccounts + - apiGroups: [""] + resources: ["serviceaccounts"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Manage lifecycle-server networkpolicies + - apiGroups: ["networking.k8s.io"] + resources: ["networkpolicies"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Manage lifecycle-server clusterrolebindings + - apiGroups: ["rbac.authorization.k8s.io"] + resources: ["clusterrolebindings"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + # Required to grant these permissions to lifecycle-server via CRB + - apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["create"] + - apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["create"] + # Leader election + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: operator-lifecycle-manager-lifecycle-controller + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: operator-lifecycle-manager-lifecycle-controller +subjects: + - kind: ServiceAccount + name: lifecycle-controller + namespace: openshift-operator-lifecycle-manager +EOF + +cat << EOF > manifests/0000_50_olm_09-lifecycle-server.rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: operator-lifecycle-manager-lifecycle-server + annotations: + release.openshift.io/feature-set: "TechPreviewNoUpgrade" +rules: + # Required by kube-rbac-proxy for authn/authz + - apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["create"] + - apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["create"] +EOF + + add_ibm_managed_cloud_annotations "${ROOT_DIR}/manifests" hypershift_manifests_dir="${ROOT_DIR}/manifests"