diff --git a/testdata/adapter-configs/cl-maestro/adapter-config.yaml b/testdata/adapter-configs/cl-maestro/adapter-config.yaml new file mode 100644 index 0000000..8c6c004 --- /dev/null +++ b/testdata/adapter-configs/cl-maestro/adapter-config.yaml @@ -0,0 +1,69 @@ +# Example HyperFleet Adapter deployment configuration +apiVersion: hyperfleet.redhat.com/v1alpha1 +kind: AdapterConfig +metadata: + name: cl-maestro + labels: + hyperfleet.io/adapter-type: cl-maestro + hyperfleet.io/component: adapter +spec: + adapter: + version: "0.1.0" + + # Log the full merged configuration after load (default: false) + debugConfig: true + + clients: + hyperfleetApi: + baseUrl: CHANGE_ME + version: v1 + timeout: 2s + retryAttempts: 3 + retryBackoff: exponential + + broker: + # These values are overridden at deploy time via env vars from Helm values + subscriptionId: CHANGE_ME + topic: CHANGE_ME + + maestro: + grpcServerAddress: "maestro-grpc.maestro.svc.cluster.local:8090" + + # HTTPS server address for REST API operations (optional) + # Environment variable: HYPERFLEET_MAESTRO_HTTP_SERVER_ADDRESS + httpServerAddress: "http://maestro.maestro.svc.cluster.local:8000" + + # Source identifier for CloudEvents routing (must be unique across adapters) + # Environment variable: HYPERFLEET_MAESTRO_SOURCE_ID + sourceId: "cl-maestro" + + # Client identifier (defaults to sourceId if not specified) + # Environment variable: HYPERFLEET_MAESTRO_CLIENT_ID + clientId: "cl-maestro-client" + insecure: true + + # Authentication configuration + #auth: + # type: "tls" # TLS certificate-based mTLS + # + # tlsConfig: + # # gRPC TLS configuration + # # Certificate paths (mounted from Kubernetes secrets) + # # Environment variable: HYPERFLEET_MAESTRO_CA_FILE + # caFile: "/etc/maestro/certs/grpc/ca.crt" + # + # # Environment variable: HYPERFLEET_MAESTRO_CERT_FILE + # certFile: "/etc/maestro/certs/grpc/client.crt" + # + # # Environment variable: HYPERFLEET_MAESTRO_KEY_FILE + # keyFile: "/etc/maestro/certs/grpc/client.key" + # + # # Server name for TLS verification + # # Environment variable: HYPERFLEET_MAESTRO_SERVER_NAME + # serverName: "maestro-grpc.maestro.svc.cluster.local" + # + # # HTTP API TLS configuration (may use different CA than gRPC) + # # If not set, falls back to caFile for backwards compatibility + # # Environment variable: HYPERFLEET_MAESTRO_HTTP_CA_FILE + # httpCaFile: "/etc/maestro/certs/https/ca.crt" + diff --git a/testdata/adapter-configs/cl-maestro/adapter-task-config.yaml b/testdata/adapter-configs/cl-maestro/adapter-task-config.yaml new file mode 100644 index 0000000..2740c08 --- /dev/null +++ b/testdata/adapter-configs/cl-maestro/adapter-task-config.yaml @@ -0,0 +1,347 @@ +# Example HyperFleet Adapter task configuration +apiVersion: hyperfleet.redhat.com/v1alpha1 +kind: AdapterTaskConfig +metadata: + name: cl-maestro + labels: + hyperfleet.io/adapter-type: cl-maestro + hyperfleet.io/component: adapter +spec: + # Parameters with all required variables + params: + + - name: "clusterId" + source: "event.id" + type: "string" + required: true + + - name: "generation" + source: "event.generation" + type: "int" + required: true + + - name: "namespace" + source: "env.NAMESPACE" + type: "string" + + + # Preconditions with valid operators and CEL expressions + preconditions: + - name: "clusterStatus" + apiCall: + method: "GET" + url: "/clusters/{{ .clusterId }}" + timeout: 10s + retryAttempts: 3 + retryBackoff: "exponential" + capture: + - name: "clusterName" + field: "name" + - name: "generation" + field: "generation" + - name: "timestamp" + field: "created_time" + - name: "readyConditionStatus" + expression: | + status.conditions.filter(c, c.type == "Ready").size() > 0 + ? status.conditions.filter(c, c.type == "Ready")[0].status + : "False" + - name: "placementClusterName" + expression: "\"cluster1\"" # TBC coming from placement adapter + description: "Unique identifier for the target maestro" + + + # Structured conditions with valid operators + conditions: + - field: "readyConditionStatus" + operator: "equals" + value: "False" + + - name: "validationCheck" + # Valid CEL expression + expression: | + readyConditionStatus == "False" + + # Resources with valid K8s manifests + resources: + - name: "resource0" + transport: + client: "maestro" + maestro: + targetCluster: "{{ .placementClusterName }}" + + # ManifestWork is a kind of manifest that can be used to create resources on the cluster. + # It is a collection of resources that are created together. + manifest: + apiVersion: work.open-cluster-management.io/v1 + kind: ManifestWork + metadata: + # ManifestWork name - must be unique within consumer namespace + name: "{{ .clusterId }}-{{ .metadata.name }}" + + # Labels for identification, filtering, and management + labels: + # HyperFleet tracking labels + hyperfleet.io/cluster-id: "{{ .clusterId }}" + hyperfleet.io/adapter: "{{ .metadata.name }}" + hyperfleet.io/component: "infrastructure" + hyperfleet.io/generation: "{{ .generation }}" + hyperfleet.io/resource-group: "cluster-setup" + + # Maestro-specific labels + maestro.io/source-id: "{{ .metadata.name }}" + maestro.io/resource-type: "manifestwork" + maestro.io/priority: "normal" + + # Standard Kubernetes application labels + app.kubernetes.io/name: "aro-hcp-cluster" + app.kubernetes.io/instance: "{{ .clusterId }}" + app.kubernetes.io/version: "v1.0.0" + app.kubernetes.io/component: "infrastructure" + app.kubernetes.io/part-of: "hyperfleet" + app.kubernetes.io/managed-by: "cl-maestro" + app.kubernetes.io/created-by: "{{ .metadata.name }}" + + # Annotations for metadata and operational information + annotations: + # Tracking and lifecycle + hyperfleet.io/created-by: "cl-maestro-framework" + hyperfleet.io/managed-by: "{{ .metadata.name }}" + hyperfleet.io/generation: "{{ .generation }}" + hyperfleet.io/cluster-name: "{{ .clusterId }}" + hyperfleet.io/deployment-time: "{{ .timestamp }}" + + # Maestro-specific annotations + maestro.io/applied-time: "{{ .timestamp }}" + maestro.io/source-adapter: "{{ .metadata.name }}" + + # Documentation + description: "Complete cluster setup including namespace, configuration, and RBAC" + + # ManifestWork specification + spec: + # ============================================================================ + # Workload - Contains the Kubernetes manifests to deploy + # ============================================================================ + workload: + # Kubernetes manifests array - injected by framework from business logic config + manifests: + - apiVersion: v1 + kind: Namespace + metadata: + name: "{{ .clusterId | lower }}-{{ .metadata.name }}-namespace" + labels: + app.kubernetes.io/component: adapter-task-config + app.kubernetes.io/instance: "{{ .metadata.name }}" + app.kubernetes.io/name: cl-maestro + app.kubernetes.io/transport: maestro + annotations: + hyperfleet.io/generation: "{{ .generation }}" + - apiVersion: v1 + kind: ConfigMap + data: + cluster_id: "{{ .clusterId }}" + cluster_name: "{{ .clusterName }}" + metadata: + name: "{{ .clusterId | lower }}-{{ .metadata.name }}-configmap" + namespace: "{{ .clusterId | lower }}-{{ .metadata.name }}-namespace" + labels: + app.kubernetes.io/component: adapter-task-config + app.kubernetes.io/instance: "{{ .metadata.name }}" + app.kubernetes.io/name: cl-maestro + app.kubernetes.io/version: 1.0.0 + app.kubernetes.io/transport: maestro + annotations: + hyperfleet.io/generation: "{{ .generation }}" + + # ============================================================================ + # Delete Options - How resources should be removed + # ============================================================================ + deleteOption: + # Propagation policy for resource deletion + # - "Foreground": Wait for dependent resources to be deleted first + # - "Background": Delete immediately, let cluster handle dependents + # - "Orphan": Leave resources on cluster when ManifestWork is deleted + propagationPolicy: "Foreground" + + # Grace period for graceful deletion (seconds) + gracePeriodSeconds: 30 + + # ============================================================================ + # Manifest Configurations - Per-resource settings for update and feedback + # ============================================================================ + manifestConfigs: + - resourceIdentifier: + group: "" # Core API group (empty for v1 resources) + resource: "namespaces" # Resource type + name: "{{ .clusterId | lower }}-{{ .metadata.name }}-namespace" # Specific resource name + updateStrategy: + type: "ServerSideApply" # Use server-side apply for namespaces + feedbackRules: + - type: "JSONPaths" # Use JSON path expressions for status feedback + jsonPaths: + - name: "phase" + path: ".status.phase" + # ======================================================================== + # Configuration for Namespace resources + # ======================================================================== + - resourceIdentifier: + group: "" # Core API group (empty for v1 resources) + resource: "configmaps" # Resource type + name: "{{ .clusterId | lower }}-{{ .metadata.name }}-configmap" # Specific resource name + namespace: "{{ .clusterId | lower }}-{{ .metadata.name }}-namespace" + updateStrategy: + type: "ServerSideApply" # Use server-side apply for namespaces + serverSideApply: + fieldManager: "cl-maestro" # Field manager name for conflict resolution + force: false # Don't force conflicts (fail on conflicts) + feedbackRules: + - type: "JSONPaths" # Use JSON path expressions for status feedback + jsonPaths: + - name: "data" + path: ".data" + - name: "resourceVersion" + path: ".metadata.resourceVersion" + # Discover the ResourceBundle (ManifestWork) by name from Maestro + discovery: + byName: "{{ .clusterId }}-{{ .metadata.name }}" + + # Discover nested resources deployed by the ManifestWork + nestedDiscoveries: + - name: "namespace0" + discovery: + byName: "{{ .clusterId | lower }}-{{ .metadata.name }}-namespace" + - name: "configmap0" + discovery: + byName: "{{ .clusterId | lower }}-{{ .metadata.name }}-configmap" + + post: + payloads: + - name: "statusPayload" + build: + adapter: "{{ .metadata.name }}" + conditions: + # Applied: Check if ManifestWork exists and has type="Applied", status="True" + - type: "Applied" + status: + expression: | + has(resources.resource0) && has(resources.resource0.status) && has(resources.resource0.status.conditions) && resources.resource0.status.conditions.filter(c, has(c.type) && c.type == "Applied").size() > 0 ? resources.resource0.status.conditions.filter(c, c.type == "Applied")[0].status : "False" + reason: + expression: | + has(resources.resource0) && has(resources.resource0.status) && has(resources.resource0.status.conditions) && resources.resource0.status.conditions.filter(c, has(c.type) && c.type == "Applied").size() > 0 ? resources.resource0.status.conditions.filter(c, c.type == "Applied")[0].reason : "ManifestWorkNotDiscovered" + message: + expression: | + has(resources.resource0) && has(resources.resource0.status) && has(resources.resource0.status.conditions) && resources.resource0.status.conditions.filter(c, has(c.type) && c.type == "Applied").size() > 0 ? resources.resource0.status.conditions.filter(c, c.type == "Applied")[0].message : "ManifestWork not discovered from Maestro or no Applied condition" + + # Available: Check if nested discovered manifests are available on the spoke cluster + # Each nested discovery is enriched with top-level "conditions" from status.resourceStatus.manifests[] + - type: "Available" + status: + expression: | + has(resources.namespace0) && has(resources.namespace0.conditions) + && resources.namespace0.conditions.exists(c, has(c.type) && c.type == "Available" && has(c.status) && c.status == "True") + && has(resources.configmap0) && has(resources.configmap0.conditions) + && resources.configmap0.conditions.exists(c, c.type == "Available" && has(c.status) && c.status == "True") + ? "True" + : "False" + reason: + expression: | + !(has(resources.namespace0) && has(resources.namespace0.conditions)) + ? "NamespaceNotDiscovered" + : !resources.namespace0.conditions.exists(c, has(c.type) && c.type == "Available" && has(c.status) && c.status == "True") + ? "NamespaceNotAvailable" + : !(has(resources.configmap0) && has(resources.configmap0.conditions)) + ? "ConfigMapNotDiscovered" + : !resources.configmap0.conditions.exists(c, c.type == "Available" && has(c.status) && c.status == "True") + ? "ConfigMapNotAvailable" + : "AllResourcesAvailable" + message: + expression: | + !(has(resources.namespace0) && has(resources.namespace0.conditions)) + ? "Namespace not discovered from ManifestWork" + : !resources.namespace0.conditions.exists(c, has(c.type) && c.type == "Available" && has(c.status) && c.status == "True") + ? "Namespace not yet available on spoke cluster" + : !(has(resources.configmap0) && has(resources.configmap0.conditions)) + ? "ConfigMap not discovered from ManifestWork" + : !resources.configmap0.conditions.exists(c, c.type == "Available" && has(c.status) && c.status == "True") + ? "ConfigMap not yet available on spoke cluster" + : "All manifests (namespace, configmap) are available on spoke cluster" + + # Health: Adapter execution status — surfaces errors from any phase + - type: "Health" + status: + expression: | + adapter.?executionStatus.orValue("") == "success" + && !adapter.?resourcesSkipped.orValue(false) + ? "True" + : "False" + reason: + expression: | + adapter.?executionStatus.orValue("") != "success" + ? "ExecutionFailed:" + adapter.?executionError.?phase.orValue("unknown") + : adapter.?resourcesSkipped.orValue(false) + ? "ResourcesSkipped" + : "Healthy" + message: + expression: | + adapter.?executionStatus.orValue("") != "success" + ? "Adapter failed at phase [" + + adapter.?executionError.?phase.orValue("unknown") + + "] step [" + + adapter.?executionError.?step.orValue("unknown") + + "]: " + + adapter.?executionError.?message.orValue(adapter.?errorMessage.orValue("no details")) + : adapter.?resourcesSkipped.orValue(false) + ? "Resources skipped: " + adapter.?skipReason.orValue("unknown reason") + : "Adapter execution completed successfully" + + observed_generation: + expression: "generation" + observed_time: "{{ now | date \"2006-01-02T15:04:05Z07:00\" }}" + + # Extract data from discovered ManifestWork from Maestro + data: + manifestwork: + name: + expression: | + has(resources.resource0) && has(resources.resource0.metadata) + ? resources.resource0.metadata.name + : "" + consumer: + expression: | + has(resources.resource0) && has(resources.resource0.metadata) + ? resources.resource0.metadata.namespace + : placementClusterName + configmap: + name: + expression: | + has(resources.configmap0) && has(resources.configmap0.metadata) + ? resources.configmap0.metadata.name + : "" + clusterId: + expression: | + has(resources.configmap0) && has(resources.configmap0.data) && has(resources.configmap0.data.cluster_id) + ? resources.configmap0.data.cluster_id + : clusterId + namespace: + name: + expression: | + has(resources.namespace0) && has(resources.namespace0.metadata) + ? resources.namespace0.metadata.name + : "" + phase: + expression: | + has(resources.namespace0) && has(resources.namespace0.statusFeedback) && has(resources.namespace0.statusFeedback.values) + && resources.namespace0.statusFeedback.values.exists(v, has(v.name) && v.name == "phase" && has(v.fieldValue)) + ? resources.namespace0.statusFeedback.values.filter(v, v.name == "phase")[0].fieldValue.string + : "Unknown" + + postActions: + - name: "reportClusterStatus" + apiCall: + method: "POST" + url: "/clusters/{{ .clusterId }}/statuses" + headers: + - name: "Content-Type" + value: "application/json" + body: "{{ .statusPayload }}" diff --git a/testdata/adapter-configs/cl-maestro/values.yaml b/testdata/adapter-configs/cl-maestro/values.yaml new file mode 100644 index 0000000..5a5397b --- /dev/null +++ b/testdata/adapter-configs/cl-maestro/values.yaml @@ -0,0 +1,31 @@ +adapterConfig: + create: true + files: + adapter-config.yaml: cl-maestro/adapter-config.yaml + log: + level: debug + +adapterTaskConfig: + create: true + files: + task-config.yaml: cl-maestro/adapter-task-config.yaml + +broker: + create: true + googlepubsub: + projectId: CHANGE_ME + subscriptionId: CHANGE_ME + topic: CHANGE_ME + deadLetterTopic: CHANGE_ME + +image: + registry: CHANGE_ME + repository: hyperfleet-adapter + pullPolicy: Always + tag: latest + +rbac: + resources: + - namespaces + - configmaps + - configmaps/status