-
Notifications
You must be signed in to change notification settings - Fork 8
HYPERFLEET-569 - feat: Implement maestro DSL loader #41
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -76,7 +76,31 @@ spec: | |
| # Resources with valid K8s manifests | ||
| resources: | ||
| - name: "maestro" | ||
| transport: | ||
| client: "maestro" | ||
| maestro: | ||
| targetCluster: cluster1 | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. manifestwork.ref here should be something required and have a validation |
||
| manifest: | ||
| apiVersion: v1 | ||
| kind: Namespace | ||
| metadata: | ||
| name: "maestro-{{ .clusterId }}" | ||
| labels: | ||
| hyperfleet.io/cluster-id: "{{ .clusterId }}" | ||
| hyperfleet.io/cluster-name: "{{ .clusterName }}" | ||
| annotations: | ||
| hyperfleet.io/generation: "{{ .generationSpec }}" | ||
| discovery: | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Discovery is manifest level not manifestwork.
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. the discovery can help us locate the manifest attributes with jsonPath |
||
| namespace: "*" # Cluster-scoped resource (Namespace) | ||
| bySelectors: | ||
| labelSelector: | ||
| hyperfleet.io/cluster-id: "{{ .clusterId }}" | ||
| hyperfleet.io/cluster-name: "{{ .clusterName }}" | ||
|
|
||
| - name: "clusterNamespace" | ||
| transport: | ||
| client: "kubernetes" | ||
| manifest: | ||
| apiVersion: v1 | ||
| kind: Namespace | ||
|
|
@@ -98,6 +122,8 @@ spec: | |
| # in the namespace created above | ||
| # it will require a service account to be created in that namespace as well as a role and rolebinding | ||
| - name: "jobServiceAccount" | ||
| transport: | ||
| client: "kubernetes" | ||
| manifest: | ||
| ref: "/etc/adapter/job-serviceaccount.yaml" | ||
| discovery: | ||
|
|
@@ -107,6 +133,8 @@ spec: | |
| hyperfleet.io/cluster-id: "{{ .clusterId }}" | ||
|
|
||
| - name: "job_role" | ||
| transport: | ||
| client: "kubernetes" | ||
| manifest: | ||
| ref: "/etc/adapter/job-role.yaml" | ||
| discovery: | ||
|
|
@@ -118,6 +146,8 @@ spec: | |
| hyperfleet.io/resource-type: "role" | ||
|
|
||
| - name: "job_rolebinding" | ||
| transport: | ||
| client: "kubernetes" | ||
| manifest: | ||
| ref: "/etc/adapter/job-rolebinding.yaml" | ||
| discovery: | ||
|
|
@@ -129,6 +159,8 @@ spec: | |
| hyperfleet.io/resource-type: "rolebinding" | ||
|
|
||
| - name: "jobNamespace" | ||
| transport: | ||
| client: "kubernetes" | ||
| manifest: | ||
| ref: "/etc/adapter/job.yaml" | ||
| discovery: | ||
|
|
@@ -143,6 +175,8 @@ spec: | |
| # and using the same service account as the adapter | ||
|
|
||
| - name: "deploymentNamespace" | ||
| transport: | ||
| client: "kubernetes" | ||
| manifest: | ||
| ref: "/etc/adapter/deployment.yaml" | ||
| discovery: | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -13,6 +13,7 @@ import ( | |
| "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/executor" | ||
| "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/hyperfleet_api" | ||
| "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/k8s_client" | ||
| "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/maestro_client" | ||
| "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/health" | ||
| "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" | ||
| "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/otel" | ||
|
|
@@ -285,14 +286,32 @@ func runServe() error { | |
| return fmt.Errorf("failed to create Kubernetes client: %w", err) | ||
| } | ||
|
|
||
| // Create Maestro client if configured | ||
| var maestroClient maestro_client.ManifestWorkClient | ||
| if config.Spec.Clients.Maestro != nil { | ||
| log.Info(ctx, "Creating Maestro client...") | ||
| maestroClient, err = createMaestroClient(ctx, config.Spec.Clients.Maestro, log) | ||
| if err != nil { | ||
| errCtx := logger.WithErrorField(ctx, err) | ||
| log.Errorf(errCtx, "Failed to create Maestro client") | ||
| return fmt.Errorf("failed to create Maestro client: %w", err) | ||
| } | ||
| log.Info(ctx, "Maestro client created successfully") | ||
| } | ||
|
|
||
| // Create the executor using the builder pattern | ||
| log.Info(ctx, "Creating event executor...") | ||
| exec, err := executor.NewBuilder(). | ||
| execBuilder := executor.NewBuilder(). | ||
| WithConfig(config). | ||
| WithAPIClient(apiClient). | ||
| WithK8sClient(k8sClient). | ||
| WithLogger(log). | ||
| Build() | ||
| WithLogger(log) | ||
|
|
||
| if maestroClient != nil { | ||
| execBuilder = execBuilder.WithMaestroClient(maestroClient) | ||
| } | ||
|
|
||
| exec, err := execBuilder.Build() | ||
| if err != nil { | ||
| errCtx := logger.WithErrorField(ctx, err) | ||
| log.Errorf(errCtx, "Failed to create executor") | ||
|
|
@@ -494,3 +513,31 @@ func createK8sClient(ctx context.Context, k8sConfig config_loader.KubernetesConf | |
| } | ||
| return k8s_client.NewClient(ctx, clientConfig, log) | ||
| } | ||
|
|
||
| // createMaestroClient creates a Maestro client from the config | ||
| func createMaestroClient(ctx context.Context, maestroConfig *config_loader.MaestroClientConfig, log logger.Logger) (*maestro_client.Client, error) { | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. How about move the createXXXClient functions to another file to simplify main.go file? |
||
| clientConfig := &maestro_client.Config{ | ||
| MaestroServerAddr: maestroConfig.HTTPServerAddress, | ||
| GRPCServerAddr: maestroConfig.GRPCServerAddress, | ||
| SourceID: maestroConfig.SourceID, | ||
| Insecure: maestroConfig.Insecure, | ||
| } | ||
|
|
||
| // Parse timeout if specified | ||
| if maestroConfig.Timeout != "" { | ||
| timeout, err := time.ParseDuration(maestroConfig.Timeout) | ||
| if err != nil { | ||
| return nil, fmt.Errorf("invalid maestro timeout %q: %w", maestroConfig.Timeout, err) | ||
| } | ||
| clientConfig.HTTPTimeout = timeout | ||
| } | ||
|
|
||
| // Configure TLS if auth type is "tls" | ||
| if maestroConfig.Auth.Type == "tls" && maestroConfig.Auth.TLSConfig != nil { | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This could lead to a confusing scenario where a user Consider returning an explicit error when the configuration is inconsistent: if maestroConfig.Auth.Type == "tls" {
if maestroConfig.Auth.TLSConfig == nil {
return nil, fmt.Errorf("maestro auth type is 'tls' but tlsConfig is not provided")
}
clientConfig.CAFile = maestroConfig.Auth.TLSConfig.CAFile
clientConfig.ClientCertFile = maestroConfig.Auth.TLSConfig.CertFile
clientConfig.ClientKeyFile = maestroConfig.Auth.TLSConfig.KeyFile
} |
||
| clientConfig.CAFile = maestroConfig.Auth.TLSConfig.CAFile | ||
| clientConfig.ClientCertFile = maestroConfig.Auth.TLSConfig.CertFile | ||
| clientConfig.ClientKeyFile = maestroConfig.Auth.TLSConfig.KeyFile | ||
| } | ||
|
|
||
| return maestro_client.NewMaestroClient(ctx, clientConfig, log) | ||
| } | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This kind of configuration lose the manifestwork ref. And for there should be multiple manifests in the manifestwork.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
And we should put manifests in same manifestwork together. Manifests number cannot be changed once the manifestwork created.