From 247d44d97d1ab5cf7d4fb42739d4795d7ceeb9ad Mon Sep 17 00:00:00 2001 From: Reham Tarek Date: Thu, 1 Aug 2024 10:42:22 +0100 Subject: [PATCH] Add Hyperpod cluster tests --- generator/test_case_generator.go | 4 + terraform/eks/daemon/hyperpod/main.tf | 482 ++++++++++++++++++ terraform/eks/daemon/hyperpod/providers.tf | 17 + terraform/eks/daemon/hyperpod/variables.tf | 37 ++ terraform/setup/main.tf | 2 +- test/hyperpod/hyperpod_metrics_test.go | 67 +++ test/hyperpod/hyperpod_test.go | 79 +++ test/hyperpod/resources/config.json | 16 + .../test_schemas/node_hyperpod.json | 28 + .../eks_resources/util.go | 3 + 10 files changed, 734 insertions(+), 1 deletion(-) create mode 100644 terraform/eks/daemon/hyperpod/main.tf create mode 100644 terraform/eks/daemon/hyperpod/providers.tf create mode 100644 terraform/eks/daemon/hyperpod/variables.tf create mode 100644 test/hyperpod/hyperpod_metrics_test.go create mode 100644 test/hyperpod/hyperpod_test.go create mode 100644 test/hyperpod/resources/config.json create mode 100644 test/metric_value_benchmark/eks_resources/test_schemas/node_hyperpod.json diff --git a/generator/test_case_generator.go b/generator/test_case_generator.go index f7f042267..53d2161f2 100644 --- a/generator/test_case_generator.go +++ b/generator/test_case_generator.go @@ -223,6 +223,10 @@ var testTypeToTestConfig = map[string][]testConfig{ testDir: "./test/gpu", terraformDir: "terraform/eks/daemon/gpu", targets: map[string]map[string]struct{}{"arc": {"amd64": {}}}, }, + { + testDir: "./test/hyperpod", terraformDir: "terraform/eks/daemon/hyperpod", + targets: map[string]map[string]struct{}{"arc": {"amd64": {}}}, + }, }, "eks_deployment": { {testDir: "./test/metric_value_benchmark"}, diff --git a/terraform/eks/daemon/hyperpod/main.tf b/terraform/eks/daemon/hyperpod/main.tf new file mode 100644 index 000000000..a810d8c75 --- /dev/null +++ b/terraform/eks/daemon/hyperpod/main.tf @@ -0,0 +1,482 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +module "common" { + source = "../../../common" + cwagent_image_repo = var.cwagent_image_repo + cwagent_image_tag = var.cwagent_image_tag +} + +module "basic_components" { + source = "../../../basic_components" + + region = var.region +} + +data "aws_eks_cluster_auth" "this" { + name = aws_eks_cluster.this.name +} + +resource "aws_eks_cluster" "this" { + name = "cwagent-eks-integ-${module.common.testing_id}" + role_arn = module.basic_components.role_arn + version = var.k8s_version + enabled_cluster_log_types = [ + "api", + "audit", + "authenticator", + "controllerManager", + "scheduler" + ] + vpc_config { + subnet_ids = module.basic_components.public_subnet_ids + security_group_ids = [module.basic_components.security_group] + } +} + +# EKS Node Groups +resource "aws_eks_node_group" "this" { + cluster_name = aws_eks_cluster.this.name + node_group_name = "cwagent-eks-integ-node" + node_role_arn = aws_iam_role.node_role.arn + subnet_ids = module.basic_components.public_subnet_ids + + scaling_config { + desired_size = 1 + max_size = 1 + min_size = 1 + } + + ami_type = "AL2_x86_64" + capacity_type = "ON_DEMAND" + disk_size = 20 + instance_types = ["t3.medium"] + + labels = { "beta.kubernetes.io/instance-type" : "ml.t3.medium", "sagemaker.amazonaws.com/node-health-status" : "Schedulable", } + + depends_on = [ + aws_iam_role_policy_attachment.node_AmazonEC2ContainerRegistryReadOnly, + aws_iam_role_policy_attachment.node_AmazonEKS_CNI_Policy, + aws_iam_role_policy_attachment.node_AmazonEKSWorkerNodePolicy, + aws_iam_role_policy_attachment.node_CloudWatchAgentServerPolicy + ] +} + +# EKS Node IAM Role +resource "aws_iam_role" "node_role" { + name = "cwagent-eks-Worker-Role-${module.common.testing_id}" + assume_role_policy = jsonencode({ + Version = "2012-10-17", + Statement = [ + { + Effect = "Allow", + Principal = { + Service = "ec2.amazonaws.com" + }, + Action = "sts:AssumeRole" + } + ] + }) + +} + +resource "aws_iam_role_policy_attachment" "node_AmazonEKSWorkerNodePolicy" { + policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy" + role = aws_iam_role.node_role.name +} + +resource "aws_iam_role_policy_attachment" "node_AmazonEKS_CNI_Policy" { + policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy" + role = aws_iam_role.node_role.name +} + +resource "aws_iam_role_policy_attachment" "node_AmazonEC2ContainerRegistryReadOnly" { + policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" + role = aws_iam_role.node_role.name +} + +resource "aws_iam_role_policy_attachment" "node_CloudWatchAgentServerPolicy" { + policy_arn = "arn:aws:iam::aws:policy/CloudWatchAgentServerPolicy" + role = aws_iam_role.node_role.name +} + +# TODO: these security groups be created once and then reused +# EKS Cluster Security Group +resource "aws_security_group" "eks_cluster_sg" { + name = "cwagent-eks-cluster-sg-${module.common.testing_id}" + description = "Cluster communication with worker nodes" + vpc_id = module.basic_components.vpc_id +} + +resource "aws_security_group_rule" "cluster_inbound" { + description = "Allow worker nodes to communicate with the cluster API Server" + from_port = 443 + protocol = "tcp" + security_group_id = aws_security_group.eks_cluster_sg.id + source_security_group_id = aws_security_group.eks_nodes_sg.id + to_port = 443 + type = "ingress" +} + +resource "aws_security_group_rule" "cluster_outbound" { + description = "Allow cluster API Server to communicate with the worker nodes" + from_port = 1024 + protocol = "tcp" + security_group_id = aws_security_group.eks_cluster_sg.id + source_security_group_id = aws_security_group.eks_nodes_sg.id + to_port = 65535 + type = "egress" +} + + +# EKS Node Security Group +resource "aws_security_group" "eks_nodes_sg" { + name = "cwagent-eks-node-sg-${module.common.testing_id}" + description = "Security group for all nodes in the cluster" + vpc_id = module.basic_components.vpc_id + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} + +resource "aws_security_group_rule" "nodes_internal" { + description = "Allow nodes to communicate with each other" + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.eks_nodes_sg.id + source_security_group_id = aws_security_group.eks_nodes_sg.id + to_port = 65535 + type = "ingress" +} + +resource "aws_security_group_rule" "nodes_cluster_inbound" { + description = "Allow worker Kubelets and pods to receive communication from the cluster control plane" + from_port = 1025 + protocol = "tcp" + security_group_id = aws_security_group.eks_nodes_sg.id + source_security_group_id = aws_security_group.eks_cluster_sg.id + to_port = 65535 + type = "ingress" +} + +resource "kubernetes_namespace" "namespace" { + metadata { + name = "amazon-cloudwatch" + } +} + +# TODO: how do we support different deployment types? Should they be in separate terraform +# files, and spawn separate tests? +resource "kubernetes_daemonset" "service" { + depends_on = [ + kubernetes_namespace.namespace, + kubernetes_config_map.cwagentconfig, + kubernetes_service_account.cwagentservice, + aws_eks_node_group.this + ] + metadata { + name = "cloudwatch-agent" + namespace = "amazon-cloudwatch" + } + spec { + selector { + match_labels = { + "name" : "cloudwatch-agent" + } + } + template { + metadata { + labels = { + "name" : "cloudwatch-agent" + } + } + spec { + node_selector = { + "kubernetes.io/os" : "linux" + } + container { + name = "cwagent" + image = "${var.cwagent_image_repo}:${var.cwagent_image_tag}" + image_pull_policy = "Always" + resources { + limits = { + "cpu" : "200m", + "memory" : "200Mi" + } + requests = { + "cpu" : "200m", + "memory" : "200Mi" + } + } + port { + container_port = 25888 + host_port = 25888 + protocol = "UDP" + } + env { + name = "HOST_IP" + value_from { + field_ref { + field_path = "status.hostIP" + } + } + } + env { + name = "HOST_NAME" + value_from { + field_ref { + field_path = "spec.nodeName" + } + } + } + env { + name = "K8S_NAMESPACE" + value_from { + field_ref { + field_path = "metadata.namespace" + } + } + } + volume_mount { + mount_path = "/etc/cwagentconfig" + name = "cwagentconfig" + } + volume_mount { + mount_path = "/rootfs" + name = "rootfs" + read_only = true + } + volume_mount { + mount_path = "/var/run/docker.sock" + name = "dockersock" + read_only = true + } + volume_mount { + mount_path = "/var/lib/docker" + name = "varlibdocker" + read_only = true + } + volume_mount { + mount_path = "/run/containerd/containerd.sock" + name = "containerdsock" + read_only = true + } + volume_mount { + mount_path = "/sys" + name = "sys" + read_only = true + } + volume_mount { + mount_path = "/dev/disk" + name = "devdisk" + read_only = true + } + } + volume { + name = "cwagentconfig" + config_map { + name = "cwagentconfig" + } + } + volume { + name = "rootfs" + host_path { + path = "/" + } + } + volume { + name = "dockersock" + host_path { + path = "/var/run/docker.sock" + } + } + volume { + name = "varlibdocker" + host_path { + path = "/var/lib/docker" + } + } + volume { + name = "containerdsock" + host_path { + path = "/run/containerd/containerd.sock" + } + } + volume { + name = "sys" + host_path { + path = "/sys" + } + } + volume { + name = "devdisk" + host_path { + path = "/dev/disk" + } + } + + container { + name = "hyperpod-eks-testing" + image = "alpine/socat:latest" + image_pull_policy = "Always" + resources { + limits = { + "cpu" : "50m", + "memory" : "50Mi" + } + requests = { + "cpu" : "50m", + "memory" : "50Mi" + } + } + + command = [ + "/bin/sh", + "-c", + "while true; do CURRENT_TIME=\"$(date +%s%3N)\"; TIMESTAMP=\"$(($CURRENT_TIME *1000))\"; echo '{\"_aws\":{\"Timestamp\":'\"$${TIMESTAMP}\"',\"LogGroupName\":\"EMFEKSLogGroup\",\"CloudWatchMetrics\":[{\"Namespace\":\"EMFEKSNameSpace\",\"Dimensions\":[[\"Type\",\"ClusterName\"]],\"Metrics\":[{\"Name\":\"EMFCounter\",\"Unit\":\"Count\"}]}]},\"Type\":\"Counter\",\"EMFCounter\":5, \"ClusterName\": \"${aws_eks_cluster.this.name}\"}' | socat -v -t 0 - UDP:0.0.0.0:25888; sleep 60; done" + ] + env { + name = "HOST_IP" + value_from { + field_ref { + field_path = "status.hostIP" + } + } + } + env { + name = "HOST_NAME" + value_from { + field_ref { + field_path = "spec.nodeName" + } + } + } + env { + name = "K8S_NAMESPACE" + value_from { + field_ref { + field_path = "metadata.namespace" + } + } + } + volume_mount { + mount_path = "/etc/cwagentconfig" + name = "cwagentconfig" + } + } + service_account_name = "cloudwatch-agent" + termination_grace_period_seconds = 60 + } + } + } +} + +########################################## +# Template Files +########################################## +locals { + cwagent_config = fileexists("../../../../${var.test_dir}/resources/config.json") ? "../../../../${var.test_dir}/resources/config.json" : "../default_resources/default_amazon_cloudwatch_agent.json" +} + +data "template_file" "cwagent_config" { + template = file(local.cwagent_config) + vars = { + } +} + +resource "kubernetes_config_map" "cwagentconfig" { + depends_on = [ + kubernetes_namespace.namespace, + kubernetes_service_account.cwagentservice + ] + metadata { + name = "cwagentconfig" + namespace = "amazon-cloudwatch" + } + data = { + "cwagentconfig.json" : data.template_file.cwagent_config.rendered + } +} + +resource "kubernetes_service_account" "cwagentservice" { + depends_on = [kubernetes_namespace.namespace] + metadata { + name = "cloudwatch-agent" + namespace = "amazon-cloudwatch" + } +} + +resource "kubernetes_cluster_role" "clusterrole" { + depends_on = [kubernetes_namespace.namespace] + metadata { + name = "cloudwatch-agent-role" + } + rule { + verbs = ["list", "watch"] + resources = ["pods", "nodes", "endpoints"] + api_groups = [""] + } + rule { + verbs = ["list", "watch"] + resources = ["replicasets"] + api_groups = ["apps"] + } + rule { + verbs = ["list", "watch"] + resources = ["jobs"] + api_groups = ["batch"] + } + rule { + verbs = ["get"] + resources = ["nodes/proxy"] + api_groups = [""] + } + rule { + verbs = ["create"] + resources = ["nodes/stats", "configmaps", "events"] + api_groups = [""] + } + rule { + verbs = ["get", "update"] + resource_names = ["cwagent-clusterleader"] + resources = ["configmaps"] + api_groups = [""] + } +} + +resource "kubernetes_cluster_role_binding" "rolebinding" { + depends_on = [kubernetes_namespace.namespace] + metadata { + name = "cloudwatch-agent-role-binding" + } + role_ref { + api_group = "rbac.authorization.k8s.io" + kind = "ClusterRole" + name = "cloudwatch-agent-role" + } + subject { + kind = "ServiceAccount" + name = "cloudwatch-agent" + namespace = "amazon-cloudwatch" + } +} + +resource "null_resource" "validator" { + depends_on = [ + aws_eks_node_group.this, + kubernetes_daemonset.service, + kubernetes_cluster_role_binding.rolebinding, + kubernetes_service_account.cwagentservice, + ] + provisioner "local-exec" { + command = <<-EOT + echo "Validating HyperPod Metrics" + cd ../../../.. + go test -timeout 120m ${var.test_dir} -eksClusterName=${aws_eks_cluster.this.name} -computeType=EKS -v -eksDeploymentStrategy=DAEMON + EOT + } +} \ No newline at end of file diff --git a/terraform/eks/daemon/hyperpod/providers.tf b/terraform/eks/daemon/hyperpod/providers.tf new file mode 100644 index 000000000..9bd2885f5 --- /dev/null +++ b/terraform/eks/daemon/hyperpod/providers.tf @@ -0,0 +1,17 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +provider "aws" { + region = var.region +} + +provider "kubernetes" { + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + args = ["eks", "get-token", "--cluster-name", aws_eks_cluster.this.name] + } + host = aws_eks_cluster.this.endpoint + cluster_ca_certificate = base64decode(aws_eks_cluster.this.certificate_authority.0.data) + token = data.aws_eks_cluster_auth.this.token +} \ No newline at end of file diff --git a/terraform/eks/daemon/hyperpod/variables.tf b/terraform/eks/daemon/hyperpod/variables.tf new file mode 100644 index 000000000..a2a619230 --- /dev/null +++ b/terraform/eks/daemon/hyperpod/variables.tf @@ -0,0 +1,37 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +variable "region" { + type = string + default = "us-west-2" +} + +variable "test_dir" { + type = string + default = "./test/hyperpod" +} + +variable "cwagent_image_repo" { + type = string + default = "public.ecr.aws/cloudwatch-agent/cloudwatch-agent" +} + +variable "cwagent_image_tag" { + type = string + default = "latest" +} + +variable "k8s_version" { + type = string + default = "1.28" +} + +variable "ami_type" { + type = string + default = "AL2_x86_64" +} + +variable "instance_type" { + type = string + default = "t3a.medium" +} \ No newline at end of file diff --git a/terraform/setup/main.tf b/terraform/setup/main.tf index f9f41d21d..9c6ed221d 100644 --- a/terraform/setup/main.tf +++ b/terraform/setup/main.tf @@ -90,4 +90,4 @@ resource "aws_ec2_host" "dedicated_host" { instance_type = each.value availability_zone = "${var.region}b" auto_placement = "on" -} +} \ No newline at end of file diff --git a/test/hyperpod/hyperpod_metrics_test.go b/test/hyperpod/hyperpod_metrics_test.go new file mode 100644 index 000000000..3ab25a1df --- /dev/null +++ b/test/hyperpod/hyperpod_metrics_test.go @@ -0,0 +1,67 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +package hyperpod + +import ( + "time" + + "github.com/aws/amazon-cloudwatch-agent-test/environment" + "github.com/aws/amazon-cloudwatch-agent-test/test/metric" + "github.com/aws/amazon-cloudwatch-agent-test/test/status" + "github.com/aws/amazon-cloudwatch-agent-test/test/test_runner" +) + +const ( + awsHyperPodMetricIndicator = "hyper_pod_" +) + +const ( + UnschedulablePendingReplacementMetric = "hyper_pod_node_health_status_unschedulable_pending_replacement" + UnschedulablePendingRebootMetric = "hyper_pod_node_health_status_unschedulable_pending_reboot" + SchedulableMetric = "hyper_pod_node_health_status_schedulable" + UnschedulableMetric = "hyper_pod_node_health_status_unschedulable" +) + +var expectedDimsToMetrics = map[string][]string{ + "ClusterName": { + SchedulableMetric, UnschedulableMetric, UnschedulablePendingRebootMetric, UnschedulablePendingReplacementMetric, + }, + "ClusterName-InstanceId-NodeName": { + SchedulableMetric, UnschedulableMetric, UnschedulablePendingRebootMetric, UnschedulablePendingReplacementMetric, + }, +} + +type AwsHyperPodTestRunner struct { + test_runner.BaseTestRunner + testName string + env *environment.MetaData +} + +var _ test_runner.ITestRunner = (*AwsHyperPodTestRunner)(nil) + +func (t *AwsHyperPodTestRunner) Validate() status.TestGroupResult { + var testResults []status.TestResult + testResults = append(testResults, metric.ValidateMetrics(t.env, awsHyperPodMetricIndicator, expectedDimsToMetrics)...) + testResults = append(testResults, metric.ValidateLogs(t.env)) + return status.TestGroupResult{ + Name: t.GetTestName(), + TestResults: testResults, + } +} + +func (t *AwsHyperPodTestRunner) GetTestName() string { + return t.testName +} + +func (t *AwsHyperPodTestRunner) GetAgentConfigFileName() string { + return "" +} + +func (t *AwsHyperPodTestRunner) GetAgentRunDuration() time.Duration { + return 20 * time.Minute +} + +func (t *AwsHyperPodTestRunner) GetMeasuredMetrics() []string { + return nil +} diff --git a/test/hyperpod/hyperpod_test.go b/test/hyperpod/hyperpod_test.go new file mode 100644 index 000000000..a00372fdb --- /dev/null +++ b/test/hyperpod/hyperpod_test.go @@ -0,0 +1,79 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: MIT + +//go:build !windows + +package hyperpod + +import ( + "fmt" + "log" + "testing" + + "github.com/stretchr/testify/suite" + + "github.com/aws/amazon-cloudwatch-agent-test/environment" + "github.com/aws/amazon-cloudwatch-agent-test/environment/computetype" + "github.com/aws/amazon-cloudwatch-agent-test/test/metric/dimension" + "github.com/aws/amazon-cloudwatch-agent-test/test/status" + "github.com/aws/amazon-cloudwatch-agent-test/test/test_runner" +) + +type HyperPodTestSuite struct { + suite.Suite + test_runner.TestSuite +} + +func (suite *HyperPodTestSuite) SetupSuite() { + fmt.Println(">>>> Starting AWS HyperPod Cluster Container Insights TestSuite") +} + +func (suite *HyperPodTestSuite) TearDownSuite() { + suite.Result.Print() + fmt.Println(">>>> Finished AWS HyperPod Cluster Container Insights TestSuite") +} + +func init() { + environment.RegisterEnvironmentMetaDataFlags() +} + +var ( + eksTestRunners []*test_runner.EKSTestRunner +) + +func getEksTestRunners(env *environment.MetaData) []*test_runner.EKSTestRunner { + if eksTestRunners == nil { + factory := dimension.GetDimensionFactory(*env) + + eksTestRunners = []*test_runner.EKSTestRunner{ + { + Runner: &AwsHyperPodTestRunner{test_runner.BaseTestRunner{DimensionFactory: factory}, "EKS_AWS_HYPERPOD", env}, + Env: *env, + }, + } + } + return eksTestRunners +} + +func (suite *HyperPodTestSuite) TestAllInSuite() { + env := environment.GetEnvironmentMetaData() + switch env.ComputeType { + case computetype.EKS: + log.Println("Environment compute type is EKS") + for _, testRunner := range getEksTestRunners(env) { + testRunner.Run(suite, env) + } + default: + return + } + + suite.Assert().Equal(status.SUCCESSFUL, suite.Result.GetStatus(), "AWS HyperPod Test Suite Failed") +} + +func (suite *HyperPodTestSuite) AddToSuiteResult(r status.TestGroupResult) { + suite.Result.TestGroupResults = append(suite.Result.TestGroupResults, r) +} + +func TestAWSHyperPodSuite(t *testing.T) { + suite.Run(t, new(HyperPodTestSuite)) +} diff --git a/test/hyperpod/resources/config.json b/test/hyperpod/resources/config.json new file mode 100644 index 000000000..6f37e43ed --- /dev/null +++ b/test/hyperpod/resources/config.json @@ -0,0 +1,16 @@ +{ + "agent": { + "metrics_collection_interval": 15, + "run_as_user": "root", + "debug": true, + "logfile": "" + }, + "logs": { + "metrics_collected": { + "kubernetes": { + "enhanced_container_insights": true + } + }, + "force_flush_interval": 5 + } +} \ No newline at end of file diff --git a/test/metric_value_benchmark/eks_resources/test_schemas/node_hyperpod.json b/test/metric_value_benchmark/eks_resources/test_schemas/node_hyperpod.json new file mode 100644 index 000000000..253d58b44 --- /dev/null +++ b/test/metric_value_benchmark/eks_resources/test_schemas/node_hyperpod.json @@ -0,0 +1,28 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "title": "structured log schema", + "description": "json schema for the cloudwatch agent k8s structured log", + "type": "object", + "properties": { + "CloudWatchMetrics": {}, + "ClusterName": {}, + "Type": {}, + "Timestamp": {}, + "Version": {}, + "InstanceId": {}, + "NodeName": {}, + "hyper_pod_node_health_status_unschedulable_pending_replacement": {}, + "hyper_pod_node_health_status_unschedulable_pending_reboot" : {}, + "hyper_pod_node_health_status_schedulable": {}, + "hyper_pod_node_health_status_unschedulable": {} + }, + "required": [ + "Type", + "Timestamp", + "ClusterName", + "InstanceId", + "NodeName", + "Version", + "CloudWatchMetrics" + ] +} \ No newline at end of file diff --git a/test/metric_value_benchmark/eks_resources/util.go b/test/metric_value_benchmark/eks_resources/util.go index 3ea59a1a0..f65e6316a 100644 --- a/test/metric_value_benchmark/eks_resources/util.go +++ b/test/metric_value_benchmark/eks_resources/util.go @@ -46,6 +46,8 @@ var ( eksNodeGpuSchema string //go:embed test_schemas/cluster_gpu.json eksClusterGpuSchema string + //go:embed test_schemas/node_hyperpod.json + eksHyperPodNodeSchema string EksClusterValidationMap = map[string]string{ "Cluster": eksClusterSchema, @@ -66,6 +68,7 @@ var ( "PodGPU": eksPodGpuSchema, "NodeGPU": eksNodeGpuSchema, "ClusterGPU": eksClusterGpuSchema, + "HyperPodNode": eksHyperPodNodeSchema, } )