From 89c404eb6c1a3f28617f5bc2a30e20eb86efbd7d Mon Sep 17 00:00:00 2001 From: "zoe.chou" Date: Wed, 26 Jun 2024 14:24:16 +0800 Subject: [PATCH] feat(cluster-autoscaler) upgrade to 1.27 --- cluster-autoscaler/Dockerfile | 21 + .../cloudprovider/aws/aws_manager.go | 5 + .../cloudprovider/builder/builder_all.go | 4 + .../cloudprovider/cloud_provider.go | 2 + .../spotinst/aws_ec2_instance_types.go | 4540 ++++++++++++ .../spotinst/spotinst_cloud_provider.go | 159 + .../spotinst/spotinst_cloud_provider_test.go | 411 ++ .../spotinst/spotinst_manager.go | 503 ++ .../spotinst/spotinst_node_group.go | 232 + cluster-autoscaler/go.mod | 6 +- cluster-autoscaler/go.sum | 11 +- .../scripts/gpu-deploy-tmpl.yaml | 26 + cluster-autoscaler/scripts/test-ca.sh | 74 + .../utils/gpumemory/gpumemory.go | 41 + .../utils/gpumemory/gpumemory_test.go | 83 + .../utils/mpscontext/mpscontext.go | 6 + .../spotinst/spotinst-sdk-go/LICENSE | 201 + .../spotinst/spotinst-sdk-go/NOTICE.md | 14 + .../service/elastigroup/elastigroup.go | 53 + .../service/elastigroup/providers/aws/aws.go | 5601 +++++++++++++++ .../elastigroup/providers/aws/service.go | 65 + .../service/elastigroup/providers/aws/tag.go | 31 + .../elastigroup/providers/azure/v3/azure.go | 997 +++ .../elastigroup/providers/azure/v3/service.go | 36 + .../service/elastigroup/providers/gcp/gcp.go | 2184 ++++++ .../elastigroup/providers/gcp/service.go | 38 + .../service/elastigroup/providers/gcp/tag.go | 31 + .../spotinst-sdk-go/spotinst/client/client.go | 93 + .../spotinst/client/request.go | 119 + .../spotinst/client/response.go | 110 + .../spotinst-sdk-go/spotinst/config.go | 183 + .../spotinst/credentials/credentials.go | 67 + .../spotinst/credentials/provider.go | 44 + .../spotinst/credentials/provider_chain.go | 117 + .../spotinst/credentials/provider_env.go | 56 + .../spotinst/credentials/provider_file.go | 207 + .../spotinst/credentials/provider_static.go | 38 + .../spotinst/featureflag/featureflag.go | 119 + .../spotinst/featureflag/features.go | 24 + .../spotinst-sdk-go/spotinst/log/log.go | 25 + .../spotinst/session/session.go | 22 + .../spotinst-sdk-go/spotinst/types.go | 357 + .../spotinst/util/jsonutil/json.go | 237 + .../spotinst/util/stringutil/stringutil.go | 69 + .../spotinst/util/uritemplates/LICENSE | 18 + .../util/uritemplates/uritemplates.go | 360 + .../spotinst/util/uritemplates/utils.go | 9 + .../spotinst/util/useragent/useragent.go | 50 + .../spotinst-sdk-go/spotinst/version.go | 7 + .../vendor/github.com/stretchr/objx/README.md | 8 +- .../github.com/stretchr/objx/Taskfile.yml | 5 +- .../github.com/stretchr/objx/accessors.go | 24 +- .../github.com/stretchr/objx/conversions.go | 4 +- .../vendor/github.com/stretchr/objx/doc.go | 44 +- .../vendor/github.com/stretchr/objx/map.go | 9 +- .../testify/assert/assertion_compare.go | 28 +- .../assert/assertion_compare_can_convert.go | 16 - .../assert/assertion_compare_legacy.go | 16 - .../testify/assert/assertion_format.go | 32 +- .../testify/assert/assertion_forward.go | 59 +- .../stretchr/testify/assert/assertions.go | 207 +- .../testify/assert/http_assertions.go | 27 +- .../github.com/stretchr/testify/mock/mock.go | 129 +- .../stretchr/testify/require/require.go | 65 +- .../testify/require/require_forward.go | 59 +- .../stretchr/testify/suite/suite.go | 27 +- .../vendor/gopkg.in/ini.v1/.editorconfig | 12 + .../vendor/gopkg.in/ini.v1/.gitignore | 7 + .../vendor/gopkg.in/ini.v1/.golangci.yml | 27 + .../vendor/gopkg.in/ini.v1/LICENSE | 191 + .../vendor/gopkg.in/ini.v1/Makefile | 15 + .../vendor/gopkg.in/ini.v1/README.md | 43 + .../vendor/gopkg.in/ini.v1/codecov.yml | 16 + .../vendor/gopkg.in/ini.v1/data_source.go | 76 + .../vendor/gopkg.in/ini.v1/deprecated.go | 22 + .../vendor/gopkg.in/ini.v1/error.go | 49 + .../vendor/gopkg.in/ini.v1/file.go | 541 ++ .../vendor/gopkg.in/ini.v1/helper.go | 24 + .../vendor/gopkg.in/ini.v1/ini.go | 176 + .../vendor/gopkg.in/ini.v1/key.go | 837 +++ .../vendor/gopkg.in/ini.v1/parser.go | 520 ++ .../vendor/gopkg.in/ini.v1/section.go | 256 + .../vendor/gopkg.in/ini.v1/struct.go | 747 ++ cluster-autoscaler/vendor/modules.txt | 27 +- cluster-autoscaler/visenze.md | 0 patch/1.27.patch | 6240 +++++++++++++++++ 86 files changed, 27975 insertions(+), 316 deletions(-) create mode 100644 cluster-autoscaler/Dockerfile create mode 100644 cluster-autoscaler/cloudprovider/spotinst/aws_ec2_instance_types.go create mode 100644 cluster-autoscaler/cloudprovider/spotinst/spotinst_cloud_provider.go create mode 100644 cluster-autoscaler/cloudprovider/spotinst/spotinst_cloud_provider_test.go create mode 100644 cluster-autoscaler/cloudprovider/spotinst/spotinst_manager.go create mode 100644 cluster-autoscaler/cloudprovider/spotinst/spotinst_node_group.go create mode 100644 cluster-autoscaler/scripts/gpu-deploy-tmpl.yaml create mode 100644 cluster-autoscaler/scripts/test-ca.sh create mode 100644 cluster-autoscaler/utils/gpumemory/gpumemory.go create mode 100644 cluster-autoscaler/utils/gpumemory/gpumemory_test.go create mode 100644 cluster-autoscaler/utils/mpscontext/mpscontext.go create mode 100644 cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/LICENSE create mode 100644 cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/NOTICE.md create mode 100644 cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/elastigroup.go create mode 100644 cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/aws.go create mode 100644 cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/service.go create mode 100644 cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/tag.go create mode 100644 cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/v3/azure.go create mode 100644 cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/v3/service.go create mode 100644 cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gcp/gcp.go create mode 100644 cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gcp/service.go create mode 100644 cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gcp/tag.go create mode 100644 cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client/client.go create mode 100644 cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client/request.go create mode 100644 cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client/response.go create mode 100644 cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/config.go create mode 100644 cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/credentials.go create mode 100644 cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider.go create mode 100644 cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_chain.go create mode 100644 cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_env.go create mode 100644 cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_file.go create mode 100644 cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_static.go create mode 100644 cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/featureflag/featureflag.go create mode 100644 cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/featureflag/features.go create mode 100644 cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/log/log.go create mode 100644 cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/session/session.go create mode 100644 cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/types.go create mode 100644 cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil/json.go create mode 100644 cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/stringutil/stringutil.go create mode 100644 cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates/LICENSE create mode 100644 cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates/uritemplates.go create mode 100644 cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates/utils.go create mode 100644 cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/useragent/useragent.go create mode 100644 cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/version.go delete mode 100644 cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go delete mode 100644 cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go create mode 100644 cluster-autoscaler/vendor/gopkg.in/ini.v1/.editorconfig create mode 100644 cluster-autoscaler/vendor/gopkg.in/ini.v1/.gitignore create mode 100644 cluster-autoscaler/vendor/gopkg.in/ini.v1/.golangci.yml create mode 100644 cluster-autoscaler/vendor/gopkg.in/ini.v1/LICENSE create mode 100644 cluster-autoscaler/vendor/gopkg.in/ini.v1/Makefile create mode 100644 cluster-autoscaler/vendor/gopkg.in/ini.v1/README.md create mode 100644 cluster-autoscaler/vendor/gopkg.in/ini.v1/codecov.yml create mode 100644 cluster-autoscaler/vendor/gopkg.in/ini.v1/data_source.go create mode 100644 cluster-autoscaler/vendor/gopkg.in/ini.v1/deprecated.go create mode 100644 cluster-autoscaler/vendor/gopkg.in/ini.v1/error.go create mode 100644 cluster-autoscaler/vendor/gopkg.in/ini.v1/file.go create mode 100644 cluster-autoscaler/vendor/gopkg.in/ini.v1/helper.go create mode 100644 cluster-autoscaler/vendor/gopkg.in/ini.v1/ini.go create mode 100644 cluster-autoscaler/vendor/gopkg.in/ini.v1/key.go create mode 100644 cluster-autoscaler/vendor/gopkg.in/ini.v1/parser.go create mode 100644 cluster-autoscaler/vendor/gopkg.in/ini.v1/section.go create mode 100644 cluster-autoscaler/vendor/gopkg.in/ini.v1/struct.go create mode 100644 cluster-autoscaler/visenze.md create mode 100644 patch/1.27.patch diff --git a/cluster-autoscaler/Dockerfile b/cluster-autoscaler/Dockerfile new file mode 100644 index 000000000000..6b4ae3a2af62 --- /dev/null +++ b/cluster-autoscaler/Dockerfile @@ -0,0 +1,21 @@ +FROM golang:1.19.13 AS build + +ARG TARGETARCH +ENV GOPATH /gopath/ +ENV PATH $GOPATH/bin:$PATH +ENV GO111MODULE auto +ENV GOARCH ${TARGETARCH} + +RUN apt-get update && apt-get --yes install libseccomp-dev +RUN go version +RUN go get github.com/tools/godep +RUN godep version + +WORKDIR /gopath/src/k8s.io/autoscaler/cluster-autoscaler +ADD . . +RUN CGO_ENABLED=0 GOOS=linux go build -o cluster-autoscaler --ldflags "-s" + +FROM alpine +COPY --from=build /gopath/src/k8s.io/autoscaler/cluster-autoscaler/cluster-autoscaler / + +CMD ["./cluster-autoscaler"] \ No newline at end of file diff --git a/cluster-autoscaler/cloudprovider/aws/aws_manager.go b/cluster-autoscaler/cloudprovider/aws/aws_manager.go index f45716359487..d489a11cc449 100644 --- a/cluster-autoscaler/cloudprovider/aws/aws_manager.go +++ b/cluster-autoscaler/cloudprovider/aws/aws_manager.go @@ -39,6 +39,9 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/aws/aws-sdk-go/service/eks" "k8s.io/autoscaler/cluster-autoscaler/config" "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" + "k8s.io/autoscaler/cluster-autoscaler/utils/gpumemory" + "k8s.io/autoscaler/cluster-autoscaler/utils/mpscontext" + klog "k8s.io/klog/v2" ) const ( @@ -270,6 +273,8 @@ func (m *AwsManager) buildNodeFromTemplate(asg *asg, template *asgTemplate) (*ap node.Status.Capacity[apiv1.ResourceCPU] = *resource.NewQuantity(template.InstanceType.VCPU, resource.DecimalSI) node.Status.Capacity[gpu.ResourceNvidiaGPU] = *resource.NewQuantity(template.InstanceType.GPU, resource.DecimalSI) node.Status.Capacity[apiv1.ResourceMemory] = *resource.NewQuantity(template.InstanceType.MemoryMb*1024*1024, resource.DecimalSI) + node.Status.Capacity[gpumemory.ResourceVisenzeGPUMemory] = *resource.NewQuantity(template.InstanceType.GPUMemory, resource.DecimalSI) + node.Status.Capacity[mpscontext.ResourceVisenzeMPSContext] = *resource.NewQuantity(template.InstanceType.MPSContext, resource.DecimalSI) m.updateCapacityWithRequirementsOverrides(&node.Status.Capacity, asg.MixedInstancesPolicy) diff --git a/cluster-autoscaler/cloudprovider/builder/builder_all.go b/cluster-autoscaler/cloudprovider/builder/builder_all.go index c8a2677ac355..dddf6578d46c 100644 --- a/cluster-autoscaler/cloudprovider/builder/builder_all.go +++ b/cluster-autoscaler/cloudprovider/builder/builder_all.go @@ -48,6 +48,7 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/scaleway" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/tencentcloud" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/vultr" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/spotinst" "k8s.io/autoscaler/cluster-autoscaler/config" ) @@ -80,6 +81,7 @@ var AvailableCloudProviders = []string{ cloudprovider.CivoProviderName, cloudprovider.ScalewayProviderName, cloudprovider.RancherProviderName, + cloudprovider.SpotinstProviderName, } // DefaultCloudProvider is GCE. @@ -141,6 +143,8 @@ func buildCloudProvider(opts config.AutoscalingOptions, do cloudprovider.NodeGro return scaleway.BuildScaleway(opts, do, rl) case cloudprovider.RancherProviderName: return rancher.BuildRancher(opts, do, rl) + case cloudprovider.SpotinstProviderName: + return spotinst.BuildSpotinst(opts, do, rl) } return nil } diff --git a/cluster-autoscaler/cloudprovider/cloud_provider.go b/cluster-autoscaler/cloudprovider/cloud_provider.go index 5b482857cab0..34f96f8712fc 100644 --- a/cluster-autoscaler/cloudprovider/cloud_provider.go +++ b/cluster-autoscaler/cloudprovider/cloud_provider.go @@ -84,6 +84,8 @@ const ( CivoProviderName = "civo" // RancherProviderName gets the provider name of rancher RancherProviderName = "rancher" + // SpotinstProviderName gets the provider name of aws + SpotinstProviderName = "spotinst" ) // GpuConfig contains the label, type and the resource name for a GPU. diff --git a/cluster-autoscaler/cloudprovider/spotinst/aws_ec2_instance_types.go b/cluster-autoscaler/cloudprovider/spotinst/aws_ec2_instance_types.go new file mode 100644 index 000000000000..5cf2902af657 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/spotinst/aws_ec2_instance_types.go @@ -0,0 +1,4540 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was generated by go generate; DO NOT EDIT + +package aws + +// InstanceType is spec of EC2 instance +type InstanceType struct { + InstanceType string + VCPU int64 + MemoryMb int64 + GPU int64 + Architecture string + GPUMemory int64 + MPSContext int64 +} + +// StaticListLastUpdateTime is a string declaring the last time the static list was updated. +var StaticListLastUpdateTime = "2023-02-14" + +// InstanceTypes is a map of ec2 resources +var InstanceTypes = map[string]*InstanceType{ + "a1.2xlarge": { + InstanceType: "a1.2xlarge", + VCPU: 8, + MemoryMb: 16384, + GPU: 0, + Architecture: "arm64", + }, + "a1.4xlarge": { + InstanceType: "a1.4xlarge", + VCPU: 16, + MemoryMb: 32768, + GPU: 0, + Architecture: "arm64", + }, + "a1.large": { + InstanceType: "a1.large", + VCPU: 2, + MemoryMb: 4096, + GPU: 0, + Architecture: "arm64", + }, + "a1.medium": { + InstanceType: "a1.medium", + VCPU: 1, + MemoryMb: 2048, + GPU: 0, + Architecture: "arm64", + }, + "a1.metal": { + InstanceType: "a1.metal", + VCPU: 16, + MemoryMb: 32768, + GPU: 0, + Architecture: "arm64", + }, + "a1.xlarge": { + InstanceType: "a1.xlarge", + VCPU: 4, + MemoryMb: 8192, + GPU: 0, + Architecture: "arm64", + }, + "c1.medium": { + InstanceType: "c1.medium", + VCPU: 2, + MemoryMb: 1740, + GPU: 0, + Architecture: "amd64", + }, + "c1.xlarge": { + InstanceType: "c1.xlarge", + VCPU: 8, + MemoryMb: 7168, + GPU: 0, + Architecture: "amd64", + }, + "c3.2xlarge": { + InstanceType: "c3.2xlarge", + VCPU: 8, + MemoryMb: 15360, + GPU: 0, + Architecture: "amd64", + }, + "c3.4xlarge": { + InstanceType: "c3.4xlarge", + VCPU: 16, + MemoryMb: 30720, + GPU: 0, + Architecture: "amd64", + }, + "c3.8xlarge": { + InstanceType: "c3.8xlarge", + VCPU: 32, + MemoryMb: 61440, + GPU: 0, + Architecture: "amd64", + }, + "c3.large": { + InstanceType: "c3.large", + VCPU: 2, + MemoryMb: 3840, + GPU: 0, + Architecture: "amd64", + }, + "c3.xlarge": { + InstanceType: "c3.xlarge", + VCPU: 4, + MemoryMb: 7680, + GPU: 0, + Architecture: "amd64", + }, + "c4.2xlarge": { + InstanceType: "c4.2xlarge", + VCPU: 8, + MemoryMb: 15360, + GPU: 0, + Architecture: "amd64", + }, + "c4.4xlarge": { + InstanceType: "c4.4xlarge", + VCPU: 16, + MemoryMb: 30720, + GPU: 0, + Architecture: "amd64", + }, + "c4.8xlarge": { + InstanceType: "c4.8xlarge", + VCPU: 36, + MemoryMb: 61440, + GPU: 0, + Architecture: "amd64", + }, + "c4.large": { + InstanceType: "c4.large", + VCPU: 2, + MemoryMb: 3840, + GPU: 0, + Architecture: "amd64", + }, + "c4.xlarge": { + InstanceType: "c4.xlarge", + VCPU: 4, + MemoryMb: 7680, + GPU: 0, + Architecture: "amd64", + }, + "c5.12xlarge": { + InstanceType: "c5.12xlarge", + VCPU: 48, + MemoryMb: 98304, + GPU: 0, + Architecture: "amd64", + }, + "c5.18xlarge": { + InstanceType: "c5.18xlarge", + VCPU: 72, + MemoryMb: 147456, + GPU: 0, + Architecture: "amd64", + }, + "c5.24xlarge": { + InstanceType: "c5.24xlarge", + VCPU: 96, + MemoryMb: 196608, + GPU: 0, + Architecture: "amd64", + }, + "c5.2xlarge": { + InstanceType: "c5.2xlarge", + VCPU: 8, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "c5.4xlarge": { + InstanceType: "c5.4xlarge", + VCPU: 16, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "c5.9xlarge": { + InstanceType: "c5.9xlarge", + VCPU: 36, + MemoryMb: 73728, + GPU: 0, + Architecture: "amd64", + }, + "c5.large": { + InstanceType: "c5.large", + VCPU: 2, + MemoryMb: 4096, + GPU: 0, + Architecture: "amd64", + }, + "c5.metal": { + InstanceType: "c5.metal", + VCPU: 96, + MemoryMb: 196608, + GPU: 0, + Architecture: "amd64", + }, + "c5.xlarge": { + InstanceType: "c5.xlarge", + VCPU: 4, + MemoryMb: 8192, + GPU: 0, + Architecture: "amd64", + }, + "c5a.12xlarge": { + InstanceType: "c5a.12xlarge", + VCPU: 48, + MemoryMb: 98304, + GPU: 0, + Architecture: "amd64", + }, + "c5a.16xlarge": { + InstanceType: "c5a.16xlarge", + VCPU: 64, + MemoryMb: 131072, + GPU: 0, + Architecture: "amd64", + }, + "c5a.24xlarge": { + InstanceType: "c5a.24xlarge", + VCPU: 96, + MemoryMb: 196608, + GPU: 0, + Architecture: "amd64", + }, + "c5a.2xlarge": { + InstanceType: "c5a.2xlarge", + VCPU: 8, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "c5a.4xlarge": { + InstanceType: "c5a.4xlarge", + VCPU: 16, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "c5a.8xlarge": { + InstanceType: "c5a.8xlarge", + VCPU: 32, + MemoryMb: 65536, + GPU: 0, + Architecture: "amd64", + }, + "c5a.large": { + InstanceType: "c5a.large", + VCPU: 2, + MemoryMb: 4096, + GPU: 0, + Architecture: "amd64", + }, + "c5a.xlarge": { + InstanceType: "c5a.xlarge", + VCPU: 4, + MemoryMb: 8192, + GPU: 0, + Architecture: "amd64", + }, + "c5ad.12xlarge": { + InstanceType: "c5ad.12xlarge", + VCPU: 48, + MemoryMb: 98304, + GPU: 0, + Architecture: "amd64", + }, + "c5ad.16xlarge": { + InstanceType: "c5ad.16xlarge", + VCPU: 64, + MemoryMb: 131072, + GPU: 0, + Architecture: "amd64", + }, + "c5ad.24xlarge": { + InstanceType: "c5ad.24xlarge", + VCPU: 96, + MemoryMb: 196608, + GPU: 0, + Architecture: "amd64", + }, + "c5ad.2xlarge": { + InstanceType: "c5ad.2xlarge", + VCPU: 8, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "c5ad.4xlarge": { + InstanceType: "c5ad.4xlarge", + VCPU: 16, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "c5ad.8xlarge": { + InstanceType: "c5ad.8xlarge", + VCPU: 32, + MemoryMb: 65536, + GPU: 0, + Architecture: "amd64", + }, + "c5ad.large": { + InstanceType: "c5ad.large", + VCPU: 2, + MemoryMb: 4096, + GPU: 0, + Architecture: "amd64", + }, + "c5ad.xlarge": { + InstanceType: "c5ad.xlarge", + VCPU: 4, + MemoryMb: 8192, + GPU: 0, + Architecture: "amd64", + }, + "c5d.12xlarge": { + InstanceType: "c5d.12xlarge", + VCPU: 48, + MemoryMb: 98304, + GPU: 0, + Architecture: "amd64", + }, + "c5d.18xlarge": { + InstanceType: "c5d.18xlarge", + VCPU: 72, + MemoryMb: 147456, + GPU: 0, + Architecture: "amd64", + }, + "c5d.24xlarge": { + InstanceType: "c5d.24xlarge", + VCPU: 96, + MemoryMb: 196608, + GPU: 0, + Architecture: "amd64", + }, + "c5d.2xlarge": { + InstanceType: "c5d.2xlarge", + VCPU: 8, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "c5d.4xlarge": { + InstanceType: "c5d.4xlarge", + VCPU: 16, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "c5d.9xlarge": { + InstanceType: "c5d.9xlarge", + VCPU: 36, + MemoryMb: 73728, + GPU: 0, + Architecture: "amd64", + }, + "c5d.large": { + InstanceType: "c5d.large", + VCPU: 2, + MemoryMb: 4096, + GPU: 0, + Architecture: "amd64", + }, + "c5d.metal": { + InstanceType: "c5d.metal", + VCPU: 96, + MemoryMb: 196608, + GPU: 0, + Architecture: "amd64", + }, + "c5d.xlarge": { + InstanceType: "c5d.xlarge", + VCPU: 4, + MemoryMb: 8192, + GPU: 0, + Architecture: "amd64", + }, + "c5n.18xlarge": { + InstanceType: "c5n.18xlarge", + VCPU: 72, + MemoryMb: 196608, + GPU: 0, + Architecture: "amd64", + }, + "c5n.2xlarge": { + InstanceType: "c5n.2xlarge", + VCPU: 8, + MemoryMb: 21504, + GPU: 0, + Architecture: "amd64", + }, + "c5n.4xlarge": { + InstanceType: "c5n.4xlarge", + VCPU: 16, + MemoryMb: 43008, + GPU: 0, + Architecture: "amd64", + }, + "c5n.9xlarge": { + InstanceType: "c5n.9xlarge", + VCPU: 36, + MemoryMb: 98304, + GPU: 0, + Architecture: "amd64", + }, + "c5n.large": { + InstanceType: "c5n.large", + VCPU: 2, + MemoryMb: 5376, + GPU: 0, + Architecture: "amd64", + }, + "c5n.metal": { + InstanceType: "c5n.metal", + VCPU: 72, + MemoryMb: 196608, + GPU: 0, + Architecture: "amd64", + }, + "c5n.xlarge": { + InstanceType: "c5n.xlarge", + VCPU: 4, + MemoryMb: 10752, + GPU: 0, + Architecture: "amd64", + }, + "c6a.12xlarge": { + InstanceType: "c6a.12xlarge", + VCPU: 48, + MemoryMb: 98304, + GPU: 0, + Architecture: "amd64", + }, + "c6a.16xlarge": { + InstanceType: "c6a.16xlarge", + VCPU: 64, + MemoryMb: 131072, + GPU: 0, + Architecture: "amd64", + }, + "c6a.24xlarge": { + InstanceType: "c6a.24xlarge", + VCPU: 96, + MemoryMb: 196608, + GPU: 0, + Architecture: "amd64", + }, + "c6a.2xlarge": { + InstanceType: "c6a.2xlarge", + VCPU: 8, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "c6a.32xlarge": { + InstanceType: "c6a.32xlarge", + VCPU: 128, + MemoryMb: 262144, + GPU: 0, + Architecture: "amd64", + }, + "c6a.48xlarge": { + InstanceType: "c6a.48xlarge", + VCPU: 192, + MemoryMb: 393216, + GPU: 0, + Architecture: "amd64", + }, + "c6a.4xlarge": { + InstanceType: "c6a.4xlarge", + VCPU: 16, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "c6a.8xlarge": { + InstanceType: "c6a.8xlarge", + VCPU: 32, + MemoryMb: 65536, + GPU: 0, + Architecture: "amd64", + }, + "c6a.large": { + InstanceType: "c6a.large", + VCPU: 2, + MemoryMb: 4096, + GPU: 0, + Architecture: "amd64", + }, + "c6a.metal": { + InstanceType: "c6a.metal", + VCPU: 192, + MemoryMb: 393216, + GPU: 0, + Architecture: "amd64", + }, + "c6a.xlarge": { + InstanceType: "c6a.xlarge", + VCPU: 4, + MemoryMb: 8192, + GPU: 0, + Architecture: "amd64", + }, + "c6g.12xlarge": { + InstanceType: "c6g.12xlarge", + VCPU: 48, + MemoryMb: 98304, + GPU: 0, + Architecture: "arm64", + }, + "c6g.16xlarge": { + InstanceType: "c6g.16xlarge", + VCPU: 64, + MemoryMb: 131072, + GPU: 0, + Architecture: "arm64", + }, + "c6g.2xlarge": { + InstanceType: "c6g.2xlarge", + VCPU: 8, + MemoryMb: 16384, + GPU: 0, + Architecture: "arm64", + }, + "c6g.4xlarge": { + InstanceType: "c6g.4xlarge", + VCPU: 16, + MemoryMb: 32768, + GPU: 0, + Architecture: "arm64", + }, + "c6g.8xlarge": { + InstanceType: "c6g.8xlarge", + VCPU: 32, + MemoryMb: 65536, + GPU: 0, + Architecture: "arm64", + }, + "c6g.large": { + InstanceType: "c6g.large", + VCPU: 2, + MemoryMb: 4096, + GPU: 0, + Architecture: "arm64", + }, + "c6g.medium": { + InstanceType: "c6g.medium", + VCPU: 1, + MemoryMb: 2048, + GPU: 0, + Architecture: "arm64", + }, + "c6g.metal": { + InstanceType: "c6g.metal", + VCPU: 64, + MemoryMb: 131072, + GPU: 0, + Architecture: "arm64", + }, + "c6g.xlarge": { + InstanceType: "c6g.xlarge", + VCPU: 4, + MemoryMb: 8192, + GPU: 0, + Architecture: "arm64", + }, + "c6gd.12xlarge": { + InstanceType: "c6gd.12xlarge", + VCPU: 48, + MemoryMb: 98304, + GPU: 0, + Architecture: "arm64", + }, + "c6gd.16xlarge": { + InstanceType: "c6gd.16xlarge", + VCPU: 64, + MemoryMb: 131072, + GPU: 0, + Architecture: "arm64", + }, + "c6gd.2xlarge": { + InstanceType: "c6gd.2xlarge", + VCPU: 8, + MemoryMb: 16384, + GPU: 0, + Architecture: "arm64", + }, + "c6gd.4xlarge": { + InstanceType: "c6gd.4xlarge", + VCPU: 16, + MemoryMb: 32768, + GPU: 0, + Architecture: "arm64", + }, + "c6gd.8xlarge": { + InstanceType: "c6gd.8xlarge", + VCPU: 32, + MemoryMb: 65536, + GPU: 0, + Architecture: "arm64", + }, + "c6gd.large": { + InstanceType: "c6gd.large", + VCPU: 2, + MemoryMb: 4096, + GPU: 0, + Architecture: "arm64", + }, + "c6gd.medium": { + InstanceType: "c6gd.medium", + VCPU: 1, + MemoryMb: 2048, + GPU: 0, + Architecture: "arm64", + }, + "c6gd.metal": { + InstanceType: "c6gd.metal", + VCPU: 64, + MemoryMb: 131072, + GPU: 0, + Architecture: "arm64", + }, + "c6gd.xlarge": { + InstanceType: "c6gd.xlarge", + VCPU: 4, + MemoryMb: 8192, + GPU: 0, + Architecture: "arm64", + }, + "c6gn.12xlarge": { + InstanceType: "c6gn.12xlarge", + VCPU: 48, + MemoryMb: 98304, + GPU: 0, + Architecture: "arm64", + }, + "c6gn.16xlarge": { + InstanceType: "c6gn.16xlarge", + VCPU: 64, + MemoryMb: 131072, + GPU: 0, + Architecture: "arm64", + }, + "c6gn.2xlarge": { + InstanceType: "c6gn.2xlarge", + VCPU: 8, + MemoryMb: 16384, + GPU: 0, + Architecture: "arm64", + }, + "c6gn.4xlarge": { + InstanceType: "c6gn.4xlarge", + VCPU: 16, + MemoryMb: 32768, + GPU: 0, + Architecture: "arm64", + }, + "c6gn.8xlarge": { + InstanceType: "c6gn.8xlarge", + VCPU: 32, + MemoryMb: 65536, + GPU: 0, + Architecture: "arm64", + }, + "c6gn.large": { + InstanceType: "c6gn.large", + VCPU: 2, + MemoryMb: 4096, + GPU: 0, + Architecture: "arm64", + }, + "c6gn.medium": { + InstanceType: "c6gn.medium", + VCPU: 1, + MemoryMb: 2048, + GPU: 0, + Architecture: "arm64", + }, + "c6gn.xlarge": { + InstanceType: "c6gn.xlarge", + VCPU: 4, + MemoryMb: 8192, + GPU: 0, + Architecture: "arm64", + }, + "c6i.12xlarge": { + InstanceType: "c6i.12xlarge", + VCPU: 48, + MemoryMb: 98304, + GPU: 0, + Architecture: "amd64", + }, + "c6i.16xlarge": { + InstanceType: "c6i.16xlarge", + VCPU: 64, + MemoryMb: 131072, + GPU: 0, + Architecture: "amd64", + }, + "c6i.24xlarge": { + InstanceType: "c6i.24xlarge", + VCPU: 96, + MemoryMb: 196608, + GPU: 0, + Architecture: "amd64", + }, + "c6i.2xlarge": { + InstanceType: "c6i.2xlarge", + VCPU: 8, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "c6i.32xlarge": { + InstanceType: "c6i.32xlarge", + VCPU: 128, + MemoryMb: 262144, + GPU: 0, + Architecture: "amd64", + }, + "c6i.4xlarge": { + InstanceType: "c6i.4xlarge", + VCPU: 16, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "c6i.8xlarge": { + InstanceType: "c6i.8xlarge", + VCPU: 32, + MemoryMb: 65536, + GPU: 0, + Architecture: "amd64", + }, + "c6i.large": { + InstanceType: "c6i.large", + VCPU: 2, + MemoryMb: 4096, + GPU: 0, + Architecture: "amd64", + }, + "c6i.metal": { + InstanceType: "c6i.metal", + VCPU: 128, + MemoryMb: 262144, + GPU: 0, + Architecture: "amd64", + }, + "c6i.xlarge": { + InstanceType: "c6i.xlarge", + VCPU: 4, + MemoryMb: 8192, + GPU: 0, + Architecture: "amd64", + }, + "c6id.12xlarge": { + InstanceType: "c6id.12xlarge", + VCPU: 48, + MemoryMb: 98304, + GPU: 0, + Architecture: "amd64", + }, + "c6id.16xlarge": { + InstanceType: "c6id.16xlarge", + VCPU: 64, + MemoryMb: 131072, + GPU: 0, + Architecture: "amd64", + }, + "c6id.24xlarge": { + InstanceType: "c6id.24xlarge", + VCPU: 96, + MemoryMb: 196608, + GPU: 0, + Architecture: "amd64", + }, + "c6id.2xlarge": { + InstanceType: "c6id.2xlarge", + VCPU: 8, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "c6id.32xlarge": { + InstanceType: "c6id.32xlarge", + VCPU: 128, + MemoryMb: 262144, + GPU: 0, + Architecture: "amd64", + }, + "c6id.4xlarge": { + InstanceType: "c6id.4xlarge", + VCPU: 16, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "c6id.8xlarge": { + InstanceType: "c6id.8xlarge", + VCPU: 32, + MemoryMb: 65536, + GPU: 0, + Architecture: "amd64", + }, + "c6id.large": { + InstanceType: "c6id.large", + VCPU: 2, + MemoryMb: 4096, + GPU: 0, + Architecture: "amd64", + }, + "c6id.metal": { + InstanceType: "c6id.metal", + VCPU: 128, + MemoryMb: 262144, + GPU: 0, + Architecture: "amd64", + }, + "c6id.xlarge": { + InstanceType: "c6id.xlarge", + VCPU: 4, + MemoryMb: 8192, + GPU: 0, + Architecture: "amd64", + }, + "c6in.12xlarge": { + InstanceType: "c6in.12xlarge", + VCPU: 48, + MemoryMb: 98304, + GPU: 0, + Architecture: "amd64", + }, + "c6in.16xlarge": { + InstanceType: "c6in.16xlarge", + VCPU: 64, + MemoryMb: 131072, + GPU: 0, + Architecture: "amd64", + }, + "c6in.24xlarge": { + InstanceType: "c6in.24xlarge", + VCPU: 96, + MemoryMb: 196608, + GPU: 0, + Architecture: "amd64", + }, + "c6in.2xlarge": { + InstanceType: "c6in.2xlarge", + VCPU: 8, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "c6in.32xlarge": { + InstanceType: "c6in.32xlarge", + VCPU: 128, + MemoryMb: 262144, + GPU: 0, + Architecture: "amd64", + }, + "c6in.4xlarge": { + InstanceType: "c6in.4xlarge", + VCPU: 16, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "c6in.8xlarge": { + InstanceType: "c6in.8xlarge", + VCPU: 32, + MemoryMb: 65536, + GPU: 0, + Architecture: "amd64", + }, + "c6in.large": { + InstanceType: "c6in.large", + VCPU: 2, + MemoryMb: 4096, + GPU: 0, + Architecture: "amd64", + }, + "c6in.xlarge": { + InstanceType: "c6in.xlarge", + VCPU: 4, + MemoryMb: 8192, + GPU: 0, + Architecture: "amd64", + }, + "c7g.12xlarge": { + InstanceType: "c7g.12xlarge", + VCPU: 48, + MemoryMb: 98304, + GPU: 0, + Architecture: "arm64", + }, + "c7g.16xlarge": { + InstanceType: "c7g.16xlarge", + VCPU: 64, + MemoryMb: 131072, + GPU: 0, + Architecture: "arm64", + }, + "c7g.2xlarge": { + InstanceType: "c7g.2xlarge", + VCPU: 8, + MemoryMb: 16384, + GPU: 0, + Architecture: "arm64", + }, + "c7g.4xlarge": { + InstanceType: "c7g.4xlarge", + VCPU: 16, + MemoryMb: 32768, + GPU: 0, + Architecture: "arm64", + }, + "c7g.8xlarge": { + InstanceType: "c7g.8xlarge", + VCPU: 32, + MemoryMb: 65536, + GPU: 0, + Architecture: "arm64", + }, + "c7g.large": { + InstanceType: "c7g.large", + VCPU: 2, + MemoryMb: 4096, + GPU: 0, + Architecture: "arm64", + }, + "c7g.medium": { + InstanceType: "c7g.medium", + VCPU: 1, + MemoryMb: 2048, + GPU: 0, + Architecture: "arm64", + }, + "c7g.metal": { + InstanceType: "c7g.metal", + VCPU: 64, + MemoryMb: 131072, + GPU: 0, + Architecture: "arm64", + }, + "c7g.xlarge": { + InstanceType: "c7g.xlarge", + VCPU: 4, + MemoryMb: 8192, + GPU: 0, + Architecture: "arm64", + }, + "cc2.8xlarge": { + InstanceType: "cc2.8xlarge", + VCPU: 32, + MemoryMb: 61952, + GPU: 0, + Architecture: "amd64", + }, + "d2.2xlarge": { + InstanceType: "d2.2xlarge", + VCPU: 8, + MemoryMb: 62464, + GPU: 0, + Architecture: "amd64", + }, + "d2.4xlarge": { + InstanceType: "d2.4xlarge", + VCPU: 16, + MemoryMb: 124928, + GPU: 0, + Architecture: "amd64", + }, + "d2.8xlarge": { + InstanceType: "d2.8xlarge", + VCPU: 36, + MemoryMb: 249856, + GPU: 0, + Architecture: "amd64", + }, + "d2.xlarge": { + InstanceType: "d2.xlarge", + VCPU: 4, + MemoryMb: 31232, + GPU: 0, + Architecture: "amd64", + }, + "d3.2xlarge": { + InstanceType: "d3.2xlarge", + VCPU: 8, + MemoryMb: 65536, + GPU: 0, + Architecture: "amd64", + }, + "d3.4xlarge": { + InstanceType: "d3.4xlarge", + VCPU: 16, + MemoryMb: 131072, + GPU: 0, + Architecture: "amd64", + }, + "d3.8xlarge": { + InstanceType: "d3.8xlarge", + VCPU: 32, + MemoryMb: 262144, + GPU: 0, + Architecture: "amd64", + }, + "d3.xlarge": { + InstanceType: "d3.xlarge", + VCPU: 4, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "d3en.12xlarge": { + InstanceType: "d3en.12xlarge", + VCPU: 48, + MemoryMb: 196608, + GPU: 0, + Architecture: "amd64", + }, + "d3en.2xlarge": { + InstanceType: "d3en.2xlarge", + VCPU: 8, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "d3en.4xlarge": { + InstanceType: "d3en.4xlarge", + VCPU: 16, + MemoryMb: 65536, + GPU: 0, + Architecture: "amd64", + }, + "d3en.6xlarge": { + InstanceType: "d3en.6xlarge", + VCPU: 24, + MemoryMb: 98304, + GPU: 0, + Architecture: "amd64", + }, + "d3en.8xlarge": { + InstanceType: "d3en.8xlarge", + VCPU: 32, + MemoryMb: 131072, + GPU: 0, + Architecture: "amd64", + }, + "d3en.xlarge": { + InstanceType: "d3en.xlarge", + VCPU: 4, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "dl1.24xlarge": { + InstanceType: "dl1.24xlarge", + VCPU: 96, + MemoryMb: 786432, + GPU: 8, + Architecture: "amd64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "f1.16xlarge": { + InstanceType: "f1.16xlarge", + VCPU: 64, + MemoryMb: 999424, + GPU: 0, + Architecture: "amd64", + }, + "f1.2xlarge": { + InstanceType: "f1.2xlarge", + VCPU: 8, + MemoryMb: 124928, + GPU: 0, + Architecture: "amd64", + }, + "f1.4xlarge": { + InstanceType: "f1.4xlarge", + VCPU: 16, + MemoryMb: 249856, + GPU: 0, + Architecture: "amd64", + }, + "g2.2xlarge": { + InstanceType: "g2.2xlarge", + VCPU: 8, + MemoryMb: 15360, + GPU: 1, + Architecture: "amd64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "g2.8xlarge": { + InstanceType: "g2.8xlarge", + VCPU: 32, + MemoryMb: 61440, + GPU: 4, + Architecture: "amd64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "g3.16xlarge": { + InstanceType: "g3.16xlarge", + VCPU: 64, + MemoryMb: 499712, + GPU: 4, + Architecture: "amd64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "g3.4xlarge": { + InstanceType: "g3.4xlarge", + VCPU: 16, + MemoryMb: 124928, + GPU: 1, + Architecture: "amd64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "g3.8xlarge": { + InstanceType: "g3.8xlarge", + VCPU: 32, + MemoryMb: 249856, + GPU: 2, + Architecture: "amd64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "g3s.xlarge": { + InstanceType: "g3s.xlarge", + VCPU: 4, + MemoryMb: 31232, + GPU: 1, + Architecture: "amd64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "g4ad.16xlarge": { + InstanceType: "g4ad.16xlarge", + VCPU: 64, + MemoryMb: 262144, + GPU: 4, + Architecture: "amd64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "g4ad.2xlarge": { + InstanceType: "g4ad.2xlarge", + VCPU: 8, + MemoryMb: 32768, + GPU: 1, + Architecture: "amd64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "g4ad.4xlarge": { + InstanceType: "g4ad.4xlarge", + VCPU: 16, + MemoryMb: 65536, + GPU: 1, + Architecture: "amd64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "g4ad.8xlarge": { + InstanceType: "g4ad.8xlarge", + VCPU: 32, + MemoryMb: 131072, + GPU: 2, + Architecture: "amd64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "g4ad.xlarge": { + InstanceType: "g4ad.xlarge", + VCPU: 4, + MemoryMb: 16384, + GPU: 1, + Architecture: "amd64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "g4dn.12xlarge": { + InstanceType: "g4dn.12xlarge", + VCPU: 48, + MemoryMb: 196608, + GPU: 4, + Architecture: "amd64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "g4dn.16xlarge": { + InstanceType: "g4dn.16xlarge", + VCPU: 64, + MemoryMb: 262144, + GPU: 1, + Architecture: "amd64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "g4dn.2xlarge": { + InstanceType: "g4dn.2xlarge", + VCPU: 8, + MemoryMb: 32768, + GPU: 1, + Architecture: "amd64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "g4dn.4xlarge": { + InstanceType: "g4dn.4xlarge", + VCPU: 16, + MemoryMb: 65536, + GPU: 1, + Architecture: "amd64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "g4dn.8xlarge": { + InstanceType: "g4dn.8xlarge", + VCPU: 32, + MemoryMb: 131072, + GPU: 1, + Architecture: "amd64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "g4dn.metal": { + InstanceType: "g4dn.metal", + VCPU: 96, + MemoryMb: 393216, + GPU: 8, + Architecture: "amd64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "g4dn.xlarge": { + InstanceType: "g4dn.xlarge", + VCPU: 4, + MemoryMb: 16384, + GPU: 1, + Architecture: "amd64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "g5.12xlarge": { + InstanceType: "g5.12xlarge", + VCPU: 48, + MemoryMb: 196608, + GPU: 4, + Architecture: "amd64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "g5.16xlarge": { + InstanceType: "g5.16xlarge", + VCPU: 64, + MemoryMb: 262144, + GPU: 1, + Architecture: "amd64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "g5.24xlarge": { + InstanceType: "g5.24xlarge", + VCPU: 96, + MemoryMb: 393216, + GPU: 4, + Architecture: "amd64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "g5.2xlarge": { + InstanceType: "g5.2xlarge", + VCPU: 8, + MemoryMb: 32768, + GPU: 1, + Architecture: "amd64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "g5.48xlarge": { + InstanceType: "g5.48xlarge", + VCPU: 192, + MemoryMb: 786432, + GPU: 8, + Architecture: "amd64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "g5.4xlarge": { + InstanceType: "g5.4xlarge", + VCPU: 16, + MemoryMb: 65536, + GPU: 1, + Architecture: "amd64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "g5.8xlarge": { + InstanceType: "g5.8xlarge", + VCPU: 32, + MemoryMb: 131072, + GPU: 1, + Architecture: "amd64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "g5.xlarge": { + InstanceType: "g5.xlarge", + VCPU: 4, + MemoryMb: 16384, + GPU: 1, + Architecture: "amd64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "g5g.16xlarge": { + InstanceType: "g5g.16xlarge", + VCPU: 64, + MemoryMb: 131072, + GPU: 2, + Architecture: "arm64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "g5g.2xlarge": { + InstanceType: "g5g.2xlarge", + VCPU: 8, + MemoryMb: 16384, + GPU: 1, + Architecture: "arm64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "g5g.4xlarge": { + InstanceType: "g5g.4xlarge", + VCPU: 16, + MemoryMb: 32768, + GPU: 1, + Architecture: "arm64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "g5g.8xlarge": { + InstanceType: "g5g.8xlarge", + VCPU: 32, + MemoryMb: 65536, + GPU: 1, + Architecture: "arm64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "g5g.metal": { + InstanceType: "g5g.metal", + VCPU: 64, + MemoryMb: 131072, + GPU: 2, + Architecture: "arm64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "g5g.xlarge": { + InstanceType: "g5g.xlarge", + VCPU: 4, + MemoryMb: 8192, + GPU: 1, + Architecture: "arm64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "h1.16xlarge": { + InstanceType: "h1.16xlarge", + VCPU: 64, + MemoryMb: 262144, + GPU: 0, + Architecture: "amd64", + }, + "h1.2xlarge": { + InstanceType: "h1.2xlarge", + VCPU: 8, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "h1.4xlarge": { + InstanceType: "h1.4xlarge", + VCPU: 16, + MemoryMb: 65536, + GPU: 0, + Architecture: "amd64", + }, + "h1.8xlarge": { + InstanceType: "h1.8xlarge", + VCPU: 32, + MemoryMb: 131072, + GPU: 0, + Architecture: "amd64", + }, + "i2.2xlarge": { + InstanceType: "i2.2xlarge", + VCPU: 8, + MemoryMb: 62464, + GPU: 0, + Architecture: "amd64", + }, + "i2.4xlarge": { + InstanceType: "i2.4xlarge", + VCPU: 16, + MemoryMb: 124928, + GPU: 0, + Architecture: "amd64", + }, + "i2.8xlarge": { + InstanceType: "i2.8xlarge", + VCPU: 32, + MemoryMb: 249856, + GPU: 0, + Architecture: "amd64", + }, + "i2.xlarge": { + InstanceType: "i2.xlarge", + VCPU: 4, + MemoryMb: 31232, + GPU: 0, + Architecture: "amd64", + }, + "i3.16xlarge": { + InstanceType: "i3.16xlarge", + VCPU: 64, + MemoryMb: 499712, + GPU: 0, + Architecture: "amd64", + }, + "i3.2xlarge": { + InstanceType: "i3.2xlarge", + VCPU: 8, + MemoryMb: 62464, + GPU: 0, + Architecture: "amd64", + }, + "i3.4xlarge": { + InstanceType: "i3.4xlarge", + VCPU: 16, + MemoryMb: 124928, + GPU: 0, + Architecture: "amd64", + }, + "i3.8xlarge": { + InstanceType: "i3.8xlarge", + VCPU: 32, + MemoryMb: 249856, + GPU: 0, + Architecture: "amd64", + }, + "i3.large": { + InstanceType: "i3.large", + VCPU: 2, + MemoryMb: 15616, + GPU: 0, + Architecture: "amd64", + }, + "i3.metal": { + InstanceType: "i3.metal", + VCPU: 72, + MemoryMb: 524288, + GPU: 0, + Architecture: "amd64", + }, + "i3.xlarge": { + InstanceType: "i3.xlarge", + VCPU: 4, + MemoryMb: 31232, + GPU: 0, + Architecture: "amd64", + }, + "i3en.12xlarge": { + InstanceType: "i3en.12xlarge", + VCPU: 48, + MemoryMb: 393216, + GPU: 0, + Architecture: "amd64", + }, + "i3en.24xlarge": { + InstanceType: "i3en.24xlarge", + VCPU: 96, + MemoryMb: 786432, + GPU: 0, + Architecture: "amd64", + }, + "i3en.2xlarge": { + InstanceType: "i3en.2xlarge", + VCPU: 8, + MemoryMb: 65536, + GPU: 0, + Architecture: "amd64", + }, + "i3en.3xlarge": { + InstanceType: "i3en.3xlarge", + VCPU: 12, + MemoryMb: 98304, + GPU: 0, + Architecture: "amd64", + }, + "i3en.6xlarge": { + InstanceType: "i3en.6xlarge", + VCPU: 24, + MemoryMb: 196608, + GPU: 0, + Architecture: "amd64", + }, + "i3en.large": { + InstanceType: "i3en.large", + VCPU: 2, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "i3en.metal": { + InstanceType: "i3en.metal", + VCPU: 96, + MemoryMb: 786432, + GPU: 0, + Architecture: "amd64", + }, + "i3en.xlarge": { + InstanceType: "i3en.xlarge", + VCPU: 4, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "i4i.16xlarge": { + InstanceType: "i4i.16xlarge", + VCPU: 64, + MemoryMb: 524288, + GPU: 0, + Architecture: "amd64", + }, + "i4i.2xlarge": { + InstanceType: "i4i.2xlarge", + VCPU: 8, + MemoryMb: 65536, + GPU: 0, + Architecture: "amd64", + }, + "i4i.32xlarge": { + InstanceType: "i4i.32xlarge", + VCPU: 128, + MemoryMb: 1048576, + GPU: 0, + Architecture: "amd64", + }, + "i4i.4xlarge": { + InstanceType: "i4i.4xlarge", + VCPU: 16, + MemoryMb: 131072, + GPU: 0, + Architecture: "amd64", + }, + "i4i.8xlarge": { + InstanceType: "i4i.8xlarge", + VCPU: 32, + MemoryMb: 262144, + GPU: 0, + Architecture: "amd64", + }, + "i4i.large": { + InstanceType: "i4i.large", + VCPU: 2, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "i4i.metal": { + InstanceType: "i4i.metal", + VCPU: 128, + MemoryMb: 1048576, + GPU: 0, + Architecture: "amd64", + }, + "i4i.xlarge": { + InstanceType: "i4i.xlarge", + VCPU: 4, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "im4gn.16xlarge": { + InstanceType: "im4gn.16xlarge", + VCPU: 64, + MemoryMb: 262144, + GPU: 0, + Architecture: "arm64", + }, + "im4gn.2xlarge": { + InstanceType: "im4gn.2xlarge", + VCPU: 8, + MemoryMb: 32768, + GPU: 0, + Architecture: "arm64", + }, + "im4gn.4xlarge": { + InstanceType: "im4gn.4xlarge", + VCPU: 16, + MemoryMb: 65536, + GPU: 0, + Architecture: "arm64", + }, + "im4gn.8xlarge": { + InstanceType: "im4gn.8xlarge", + VCPU: 32, + MemoryMb: 131072, + GPU: 0, + Architecture: "arm64", + }, + "im4gn.large": { + InstanceType: "im4gn.large", + VCPU: 2, + MemoryMb: 8192, + GPU: 0, + Architecture: "arm64", + }, + "im4gn.xlarge": { + InstanceType: "im4gn.xlarge", + VCPU: 4, + MemoryMb: 16384, + GPU: 0, + Architecture: "arm64", + }, + "inf1.24xlarge": { + InstanceType: "inf1.24xlarge", + VCPU: 96, + MemoryMb: 196608, + GPU: 0, + Architecture: "amd64", + }, + "inf1.2xlarge": { + InstanceType: "inf1.2xlarge", + VCPU: 8, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "inf1.6xlarge": { + InstanceType: "inf1.6xlarge", + VCPU: 24, + MemoryMb: 49152, + GPU: 0, + Architecture: "amd64", + }, + "inf1.xlarge": { + InstanceType: "inf1.xlarge", + VCPU: 4, + MemoryMb: 8192, + GPU: 0, + Architecture: "amd64", + }, + "inf2.48xlarge": { + InstanceType: "inf2.48xlarge", + VCPU: 192, + MemoryMb: 786432, + GPU: 0, + Architecture: "amd64", + }, + "inf2.24xlarge": { + InstanceType: "inf2.24xlarge", + VCPU: 96, + MemoryMb: 393216, + GPU: 0, + Architecture: "amd64", + }, + "inf2.8xlarge": { + InstanceType: "inf2.8xlarge", + VCPU: 32, + MemoryMb: 131072, + GPU: 0, + Architecture: "amd64", + }, + "inf2.xlarge": { + InstanceType: "inf2.xlarge", + VCPU: 4, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "is4gen.2xlarge": { + InstanceType: "is4gen.2xlarge", + VCPU: 8, + MemoryMb: 49152, + GPU: 0, + Architecture: "arm64", + }, + "is4gen.4xlarge": { + InstanceType: "is4gen.4xlarge", + VCPU: 16, + MemoryMb: 98304, + GPU: 0, + Architecture: "arm64", + }, + "is4gen.8xlarge": { + InstanceType: "is4gen.8xlarge", + VCPU: 32, + MemoryMb: 196608, + GPU: 0, + Architecture: "arm64", + }, + "is4gen.large": { + InstanceType: "is4gen.large", + VCPU: 2, + MemoryMb: 12288, + GPU: 0, + Architecture: "arm64", + }, + "is4gen.medium": { + InstanceType: "is4gen.medium", + VCPU: 1, + MemoryMb: 6144, + GPU: 0, + Architecture: "arm64", + }, + "is4gen.xlarge": { + InstanceType: "is4gen.xlarge", + VCPU: 4, + MemoryMb: 24576, + GPU: 0, + Architecture: "arm64", + }, + "m1.large": { + InstanceType: "m1.large", + VCPU: 2, + MemoryMb: 7680, + GPU: 0, + Architecture: "amd64", + }, + "m1.medium": { + InstanceType: "m1.medium", + VCPU: 1, + MemoryMb: 3788, + GPU: 0, + Architecture: "amd64", + }, + "m1.small": { + InstanceType: "m1.small", + VCPU: 1, + MemoryMb: 1740, + GPU: 0, + Architecture: "amd64", + }, + "m1.xlarge": { + InstanceType: "m1.xlarge", + VCPU: 4, + MemoryMb: 15360, + GPU: 0, + Architecture: "amd64", + }, + "m2.2xlarge": { + InstanceType: "m2.2xlarge", + VCPU: 4, + MemoryMb: 35020, + GPU: 0, + Architecture: "amd64", + }, + "m2.4xlarge": { + InstanceType: "m2.4xlarge", + VCPU: 8, + MemoryMb: 70041, + GPU: 0, + Architecture: "amd64", + }, + "m2.xlarge": { + InstanceType: "m2.xlarge", + VCPU: 2, + MemoryMb: 17510, + GPU: 0, + Architecture: "amd64", + }, + "m3.2xlarge": { + InstanceType: "m3.2xlarge", + VCPU: 8, + MemoryMb: 30720, + GPU: 0, + Architecture: "amd64", + }, + "m3.large": { + InstanceType: "m3.large", + VCPU: 2, + MemoryMb: 7680, + GPU: 0, + Architecture: "amd64", + }, + "m3.medium": { + InstanceType: "m3.medium", + VCPU: 1, + MemoryMb: 3840, + GPU: 0, + Architecture: "amd64", + }, + "m3.xlarge": { + InstanceType: "m3.xlarge", + VCPU: 4, + MemoryMb: 15360, + GPU: 0, + Architecture: "amd64", + }, + "m4.10xlarge": { + InstanceType: "m4.10xlarge", + VCPU: 40, + MemoryMb: 163840, + GPU: 0, + Architecture: "amd64", + }, + "m4.16xlarge": { + InstanceType: "m4.16xlarge", + VCPU: 64, + MemoryMb: 262144, + GPU: 0, + Architecture: "amd64", + }, + "m4.2xlarge": { + InstanceType: "m4.2xlarge", + VCPU: 8, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "m4.4xlarge": { + InstanceType: "m4.4xlarge", + VCPU: 16, + MemoryMb: 65536, + GPU: 0, + Architecture: "amd64", + }, + "m4.large": { + InstanceType: "m4.large", + VCPU: 2, + MemoryMb: 8192, + GPU: 0, + Architecture: "amd64", + }, + "m4.xlarge": { + InstanceType: "m4.xlarge", + VCPU: 4, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "m5.12xlarge": { + InstanceType: "m5.12xlarge", + VCPU: 48, + MemoryMb: 196608, + GPU: 0, + Architecture: "amd64", + }, + "m5.16xlarge": { + InstanceType: "m5.16xlarge", + VCPU: 64, + MemoryMb: 262144, + GPU: 0, + Architecture: "amd64", + }, + "m5.24xlarge": { + InstanceType: "m5.24xlarge", + VCPU: 96, + MemoryMb: 393216, + GPU: 0, + Architecture: "amd64", + }, + "m5.2xlarge": { + InstanceType: "m5.2xlarge", + VCPU: 8, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "m5.4xlarge": { + InstanceType: "m5.4xlarge", + VCPU: 16, + MemoryMb: 65536, + GPU: 0, + Architecture: "amd64", + }, + "m5.8xlarge": { + InstanceType: "m5.8xlarge", + VCPU: 32, + MemoryMb: 131072, + GPU: 0, + Architecture: "amd64", + }, + "m5.large": { + InstanceType: "m5.large", + VCPU: 2, + MemoryMb: 8192, + GPU: 0, + Architecture: "amd64", + }, + "m5.metal": { + InstanceType: "m5.metal", + VCPU: 96, + MemoryMb: 393216, + GPU: 0, + Architecture: "amd64", + }, + "m5.xlarge": { + InstanceType: "m5.xlarge", + VCPU: 4, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "m5a.12xlarge": { + InstanceType: "m5a.12xlarge", + VCPU: 48, + MemoryMb: 196608, + GPU: 0, + Architecture: "amd64", + }, + "m5a.16xlarge": { + InstanceType: "m5a.16xlarge", + VCPU: 64, + MemoryMb: 262144, + GPU: 0, + Architecture: "amd64", + }, + "m5a.24xlarge": { + InstanceType: "m5a.24xlarge", + VCPU: 96, + MemoryMb: 393216, + GPU: 0, + Architecture: "amd64", + }, + "m5a.2xlarge": { + InstanceType: "m5a.2xlarge", + VCPU: 8, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "m5a.4xlarge": { + InstanceType: "m5a.4xlarge", + VCPU: 16, + MemoryMb: 65536, + GPU: 0, + Architecture: "amd64", + }, + "m5a.8xlarge": { + InstanceType: "m5a.8xlarge", + VCPU: 32, + MemoryMb: 131072, + GPU: 0, + Architecture: "amd64", + }, + "m5a.large": { + InstanceType: "m5a.large", + VCPU: 2, + MemoryMb: 8192, + GPU: 0, + Architecture: "amd64", + }, + "m5a.xlarge": { + InstanceType: "m5a.xlarge", + VCPU: 4, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "m5ad.12xlarge": { + InstanceType: "m5ad.12xlarge", + VCPU: 48, + MemoryMb: 196608, + GPU: 0, + Architecture: "amd64", + }, + "m5ad.16xlarge": { + InstanceType: "m5ad.16xlarge", + VCPU: 64, + MemoryMb: 262144, + GPU: 0, + Architecture: "amd64", + }, + "m5ad.24xlarge": { + InstanceType: "m5ad.24xlarge", + VCPU: 96, + MemoryMb: 393216, + GPU: 0, + Architecture: "amd64", + }, + "m5ad.2xlarge": { + InstanceType: "m5ad.2xlarge", + VCPU: 8, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "m5ad.4xlarge": { + InstanceType: "m5ad.4xlarge", + VCPU: 16, + MemoryMb: 65536, + GPU: 0, + Architecture: "amd64", + }, + "m5ad.8xlarge": { + InstanceType: "m5ad.8xlarge", + VCPU: 32, + MemoryMb: 131072, + GPU: 0, + Architecture: "amd64", + }, + "m5ad.large": { + InstanceType: "m5ad.large", + VCPU: 2, + MemoryMb: 8192, + GPU: 0, + Architecture: "amd64", + }, + "m5ad.xlarge": { + InstanceType: "m5ad.xlarge", + VCPU: 4, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "m5d.12xlarge": { + InstanceType: "m5d.12xlarge", + VCPU: 48, + MemoryMb: 196608, + GPU: 0, + Architecture: "amd64", + }, + "m5d.16xlarge": { + InstanceType: "m5d.16xlarge", + VCPU: 64, + MemoryMb: 262144, + GPU: 0, + Architecture: "amd64", + }, + "m5d.24xlarge": { + InstanceType: "m5d.24xlarge", + VCPU: 96, + MemoryMb: 393216, + GPU: 0, + Architecture: "amd64", + }, + "m5d.2xlarge": { + InstanceType: "m5d.2xlarge", + VCPU: 8, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "m5d.4xlarge": { + InstanceType: "m5d.4xlarge", + VCPU: 16, + MemoryMb: 65536, + GPU: 0, + Architecture: "amd64", + }, + "m5d.8xlarge": { + InstanceType: "m5d.8xlarge", + VCPU: 32, + MemoryMb: 131072, + GPU: 0, + Architecture: "amd64", + }, + "m5d.large": { + InstanceType: "m5d.large", + VCPU: 2, + MemoryMb: 8192, + GPU: 0, + Architecture: "amd64", + }, + "m5d.metal": { + InstanceType: "m5d.metal", + VCPU: 96, + MemoryMb: 393216, + GPU: 0, + Architecture: "amd64", + }, + "m5d.xlarge": { + InstanceType: "m5d.xlarge", + VCPU: 4, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "m5dn.12xlarge": { + InstanceType: "m5dn.12xlarge", + VCPU: 48, + MemoryMb: 196608, + GPU: 0, + Architecture: "amd64", + }, + "m5dn.16xlarge": { + InstanceType: "m5dn.16xlarge", + VCPU: 64, + MemoryMb: 262144, + GPU: 0, + Architecture: "amd64", + }, + "m5dn.24xlarge": { + InstanceType: "m5dn.24xlarge", + VCPU: 96, + MemoryMb: 393216, + GPU: 0, + Architecture: "amd64", + }, + "m5dn.2xlarge": { + InstanceType: "m5dn.2xlarge", + VCPU: 8, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "m5dn.4xlarge": { + InstanceType: "m5dn.4xlarge", + VCPU: 16, + MemoryMb: 65536, + GPU: 0, + Architecture: "amd64", + }, + "m5dn.8xlarge": { + InstanceType: "m5dn.8xlarge", + VCPU: 32, + MemoryMb: 131072, + GPU: 0, + Architecture: "amd64", + }, + "m5dn.large": { + InstanceType: "m5dn.large", + VCPU: 2, + MemoryMb: 8192, + GPU: 0, + Architecture: "amd64", + }, + "m5dn.metal": { + InstanceType: "m5dn.metal", + VCPU: 96, + MemoryMb: 393216, + GPU: 0, + Architecture: "amd64", + }, + "m5dn.xlarge": { + InstanceType: "m5dn.xlarge", + VCPU: 4, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "m5n.12xlarge": { + InstanceType: "m5n.12xlarge", + VCPU: 48, + MemoryMb: 196608, + GPU: 0, + Architecture: "amd64", + }, + "m5n.16xlarge": { + InstanceType: "m5n.16xlarge", + VCPU: 64, + MemoryMb: 262144, + GPU: 0, + Architecture: "amd64", + }, + "m5n.24xlarge": { + InstanceType: "m5n.24xlarge", + VCPU: 96, + MemoryMb: 393216, + GPU: 0, + Architecture: "amd64", + }, + "m5n.2xlarge": { + InstanceType: "m5n.2xlarge", + VCPU: 8, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "m5n.4xlarge": { + InstanceType: "m5n.4xlarge", + VCPU: 16, + MemoryMb: 65536, + GPU: 0, + Architecture: "amd64", + }, + "m5n.8xlarge": { + InstanceType: "m5n.8xlarge", + VCPU: 32, + MemoryMb: 131072, + GPU: 0, + Architecture: "amd64", + }, + "m5n.large": { + InstanceType: "m5n.large", + VCPU: 2, + MemoryMb: 8192, + GPU: 0, + Architecture: "amd64", + }, + "m5n.metal": { + InstanceType: "m5n.metal", + VCPU: 96, + MemoryMb: 393216, + GPU: 0, + Architecture: "amd64", + }, + "m5n.xlarge": { + InstanceType: "m5n.xlarge", + VCPU: 4, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "m5zn.12xlarge": { + InstanceType: "m5zn.12xlarge", + VCPU: 48, + MemoryMb: 196608, + GPU: 0, + Architecture: "amd64", + }, + "m5zn.2xlarge": { + InstanceType: "m5zn.2xlarge", + VCPU: 8, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "m5zn.3xlarge": { + InstanceType: "m5zn.3xlarge", + VCPU: 12, + MemoryMb: 49152, + GPU: 0, + Architecture: "amd64", + }, + "m5zn.6xlarge": { + InstanceType: "m5zn.6xlarge", + VCPU: 24, + MemoryMb: 98304, + GPU: 0, + Architecture: "amd64", + }, + "m5zn.large": { + InstanceType: "m5zn.large", + VCPU: 2, + MemoryMb: 8192, + GPU: 0, + Architecture: "amd64", + }, + "m5zn.metal": { + InstanceType: "m5zn.metal", + VCPU: 48, + MemoryMb: 196608, + GPU: 0, + Architecture: "amd64", + }, + "m5zn.xlarge": { + InstanceType: "m5zn.xlarge", + VCPU: 4, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "m6a.12xlarge": { + InstanceType: "m6a.12xlarge", + VCPU: 48, + MemoryMb: 196608, + GPU: 0, + Architecture: "amd64", + }, + "m6a.16xlarge": { + InstanceType: "m6a.16xlarge", + VCPU: 64, + MemoryMb: 262144, + GPU: 0, + Architecture: "amd64", + }, + "m6a.24xlarge": { + InstanceType: "m6a.24xlarge", + VCPU: 96, + MemoryMb: 393216, + GPU: 0, + Architecture: "amd64", + }, + "m6a.2xlarge": { + InstanceType: "m6a.2xlarge", + VCPU: 8, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "m6a.32xlarge": { + InstanceType: "m6a.32xlarge", + VCPU: 128, + MemoryMb: 524288, + GPU: 0, + Architecture: "amd64", + }, + "m6a.48xlarge": { + InstanceType: "m6a.48xlarge", + VCPU: 192, + MemoryMb: 786432, + GPU: 0, + Architecture: "amd64", + }, + "m6a.4xlarge": { + InstanceType: "m6a.4xlarge", + VCPU: 16, + MemoryMb: 65536, + GPU: 0, + Architecture: "amd64", + }, + "m6a.8xlarge": { + InstanceType: "m6a.8xlarge", + VCPU: 32, + MemoryMb: 131072, + GPU: 0, + Architecture: "amd64", + }, + "m6a.large": { + InstanceType: "m6a.large", + VCPU: 2, + MemoryMb: 8192, + GPU: 0, + Architecture: "amd64", + }, + "m6a.metal": { + InstanceType: "m6a.metal", + VCPU: 192, + MemoryMb: 786432, + GPU: 0, + Architecture: "amd64", + }, + "m6a.xlarge": { + InstanceType: "m6a.xlarge", + VCPU: 4, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "m6g.12xlarge": { + InstanceType: "m6g.12xlarge", + VCPU: 48, + MemoryMb: 196608, + GPU: 0, + Architecture: "arm64", + }, + "m6g.16xlarge": { + InstanceType: "m6g.16xlarge", + VCPU: 64, + MemoryMb: 262144, + GPU: 0, + Architecture: "arm64", + }, + "m6g.2xlarge": { + InstanceType: "m6g.2xlarge", + VCPU: 8, + MemoryMb: 32768, + GPU: 0, + Architecture: "arm64", + }, + "m6g.4xlarge": { + InstanceType: "m6g.4xlarge", + VCPU: 16, + MemoryMb: 65536, + GPU: 0, + Architecture: "arm64", + }, + "m6g.8xlarge": { + InstanceType: "m6g.8xlarge", + VCPU: 32, + MemoryMb: 131072, + GPU: 0, + Architecture: "arm64", + }, + "m6g.large": { + InstanceType: "m6g.large", + VCPU: 2, + MemoryMb: 8192, + GPU: 0, + Architecture: "arm64", + }, + "m6g.medium": { + InstanceType: "m6g.medium", + VCPU: 1, + MemoryMb: 4096, + GPU: 0, + Architecture: "arm64", + }, + "m6g.metal": { + InstanceType: "m6g.metal", + VCPU: 64, + MemoryMb: 262144, + GPU: 0, + Architecture: "arm64", + }, + "m6g.xlarge": { + InstanceType: "m6g.xlarge", + VCPU: 4, + MemoryMb: 16384, + GPU: 0, + Architecture: "arm64", + }, + "m6gd.12xlarge": { + InstanceType: "m6gd.12xlarge", + VCPU: 48, + MemoryMb: 196608, + GPU: 0, + Architecture: "arm64", + }, + "m6gd.16xlarge": { + InstanceType: "m6gd.16xlarge", + VCPU: 64, + MemoryMb: 262144, + GPU: 0, + Architecture: "arm64", + }, + "m6gd.2xlarge": { + InstanceType: "m6gd.2xlarge", + VCPU: 8, + MemoryMb: 32768, + GPU: 0, + Architecture: "arm64", + }, + "m6gd.4xlarge": { + InstanceType: "m6gd.4xlarge", + VCPU: 16, + MemoryMb: 65536, + GPU: 0, + Architecture: "arm64", + }, + "m6gd.8xlarge": { + InstanceType: "m6gd.8xlarge", + VCPU: 32, + MemoryMb: 131072, + GPU: 0, + Architecture: "arm64", + }, + "m6gd.large": { + InstanceType: "m6gd.large", + VCPU: 2, + MemoryMb: 8192, + GPU: 0, + Architecture: "arm64", + }, + "m6gd.medium": { + InstanceType: "m6gd.medium", + VCPU: 1, + MemoryMb: 4096, + GPU: 0, + Architecture: "arm64", + }, + "m6gd.metal": { + InstanceType: "m6gd.metal", + VCPU: 64, + MemoryMb: 262144, + GPU: 0, + Architecture: "arm64", + }, + "m6gd.xlarge": { + InstanceType: "m6gd.xlarge", + VCPU: 4, + MemoryMb: 16384, + GPU: 0, + Architecture: "arm64", + }, + "m6i.12xlarge": { + InstanceType: "m6i.12xlarge", + VCPU: 48, + MemoryMb: 196608, + GPU: 0, + Architecture: "amd64", + }, + "m6i.16xlarge": { + InstanceType: "m6i.16xlarge", + VCPU: 64, + MemoryMb: 262144, + GPU: 0, + Architecture: "amd64", + }, + "m6i.24xlarge": { + InstanceType: "m6i.24xlarge", + VCPU: 96, + MemoryMb: 393216, + GPU: 0, + Architecture: "amd64", + }, + "m6i.2xlarge": { + InstanceType: "m6i.2xlarge", + VCPU: 8, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "m6i.32xlarge": { + InstanceType: "m6i.32xlarge", + VCPU: 128, + MemoryMb: 524288, + GPU: 0, + Architecture: "amd64", + }, + "m6i.4xlarge": { + InstanceType: "m6i.4xlarge", + VCPU: 16, + MemoryMb: 65536, + GPU: 0, + Architecture: "amd64", + }, + "m6i.8xlarge": { + InstanceType: "m6i.8xlarge", + VCPU: 32, + MemoryMb: 131072, + GPU: 0, + Architecture: "amd64", + }, + "m6i.large": { + InstanceType: "m6i.large", + VCPU: 2, + MemoryMb: 8192, + GPU: 0, + Architecture: "amd64", + }, + "m6i.metal": { + InstanceType: "m6i.metal", + VCPU: 128, + MemoryMb: 524288, + GPU: 0, + Architecture: "amd64", + }, + "m6i.xlarge": { + InstanceType: "m6i.xlarge", + VCPU: 4, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "m6id.12xlarge": { + InstanceType: "m6id.12xlarge", + VCPU: 48, + MemoryMb: 196608, + GPU: 0, + Architecture: "amd64", + }, + "m6id.16xlarge": { + InstanceType: "m6id.16xlarge", + VCPU: 64, + MemoryMb: 262144, + GPU: 0, + Architecture: "amd64", + }, + "m6id.24xlarge": { + InstanceType: "m6id.24xlarge", + VCPU: 96, + MemoryMb: 393216, + GPU: 0, + Architecture: "amd64", + }, + "m6id.2xlarge": { + InstanceType: "m6id.2xlarge", + VCPU: 8, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "m6id.32xlarge": { + InstanceType: "m6id.32xlarge", + VCPU: 128, + MemoryMb: 524288, + GPU: 0, + Architecture: "amd64", + }, + "m6id.4xlarge": { + InstanceType: "m6id.4xlarge", + VCPU: 16, + MemoryMb: 65536, + GPU: 0, + Architecture: "amd64", + }, + "m6id.8xlarge": { + InstanceType: "m6id.8xlarge", + VCPU: 32, + MemoryMb: 131072, + GPU: 0, + Architecture: "amd64", + }, + "m6id.large": { + InstanceType: "m6id.large", + VCPU: 2, + MemoryMb: 8192, + GPU: 0, + Architecture: "amd64", + }, + "m6id.metal": { + InstanceType: "m6id.metal", + VCPU: 128, + MemoryMb: 524288, + GPU: 0, + Architecture: "amd64", + }, + "m6id.xlarge": { + InstanceType: "m6id.xlarge", + VCPU: 4, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "m6idn.12xlarge": { + InstanceType: "m6idn.12xlarge", + VCPU: 48, + MemoryMb: 196608, + GPU: 0, + Architecture: "amd64", + }, + "m6idn.16xlarge": { + InstanceType: "m6idn.16xlarge", + VCPU: 64, + MemoryMb: 262144, + GPU: 0, + Architecture: "amd64", + }, + "m6idn.24xlarge": { + InstanceType: "m6idn.24xlarge", + VCPU: 96, + MemoryMb: 393216, + GPU: 0, + Architecture: "amd64", + }, + "m6idn.2xlarge": { + InstanceType: "m6idn.2xlarge", + VCPU: 8, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "m6idn.32xlarge": { + InstanceType: "m6idn.32xlarge", + VCPU: 128, + MemoryMb: 524288, + GPU: 0, + Architecture: "amd64", + }, + "m6idn.4xlarge": { + InstanceType: "m6idn.4xlarge", + VCPU: 16, + MemoryMb: 65536, + GPU: 0, + Architecture: "amd64", + }, + "m6idn.8xlarge": { + InstanceType: "m6idn.8xlarge", + VCPU: 32, + MemoryMb: 131072, + GPU: 0, + Architecture: "amd64", + }, + "m6idn.large": { + InstanceType: "m6idn.large", + VCPU: 2, + MemoryMb: 8192, + GPU: 0, + Architecture: "amd64", + }, + "m6idn.xlarge": { + InstanceType: "m6idn.xlarge", + VCPU: 4, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "m6in.12xlarge": { + InstanceType: "m6in.12xlarge", + VCPU: 48, + MemoryMb: 196608, + GPU: 0, + Architecture: "amd64", + }, + "m6in.16xlarge": { + InstanceType: "m6in.16xlarge", + VCPU: 64, + MemoryMb: 262144, + GPU: 0, + Architecture: "amd64", + }, + "m6in.24xlarge": { + InstanceType: "m6in.24xlarge", + VCPU: 96, + MemoryMb: 393216, + GPU: 0, + Architecture: "amd64", + }, + "m6in.2xlarge": { + InstanceType: "m6in.2xlarge", + VCPU: 8, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "m6in.32xlarge": { + InstanceType: "m6in.32xlarge", + VCPU: 128, + MemoryMb: 524288, + GPU: 0, + Architecture: "amd64", + }, + "m6in.4xlarge": { + InstanceType: "m6in.4xlarge", + VCPU: 16, + MemoryMb: 65536, + GPU: 0, + Architecture: "amd64", + }, + "m6in.8xlarge": { + InstanceType: "m6in.8xlarge", + VCPU: 32, + MemoryMb: 131072, + GPU: 0, + Architecture: "amd64", + }, + "m6in.large": { + InstanceType: "m6in.large", + VCPU: 2, + MemoryMb: 8192, + GPU: 0, + Architecture: "amd64", + }, + "m6in.xlarge": { + InstanceType: "m6in.xlarge", + VCPU: 4, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "m7g.12xlarge": { + InstanceType: "m7g.12xlarge", + VCPU: 48, + MemoryMb: 196608, + GPU: 0, + Architecture: "arm64", + }, + "m7g.16xlarge": { + InstanceType: "m7g.16xlarge", + VCPU: 64, + MemoryMb: 262144, + GPU: 0, + Architecture: "arm64", + }, + "m7g.2xlarge": { + InstanceType: "m7g.2xlarge", + VCPU: 8, + MemoryMb: 32768, + GPU: 0, + Architecture: "arm64", + }, + "m7g.4xlarge": { + InstanceType: "m7g.4xlarge", + VCPU: 16, + MemoryMb: 65536, + GPU: 0, + Architecture: "arm64", + }, + "m7g.8xlarge": { + InstanceType: "m7g.8xlarge", + VCPU: 32, + MemoryMb: 131072, + GPU: 0, + Architecture: "arm64", + }, + "m7g.large": { + InstanceType: "m7g.large", + VCPU: 2, + MemoryMb: 8192, + GPU: 0, + Architecture: "arm64", + }, + "m7g.medium": { + InstanceType: "m7g.medium", + VCPU: 1, + MemoryMb: 4096, + GPU: 0, + Architecture: "arm64", + }, + "m7g.metal": { + InstanceType: "m7g.metal", + VCPU: 64, + MemoryMb: 262144, + GPU: 0, + Architecture: "arm64", + }, + "m7g.xlarge": { + InstanceType: "m7g.xlarge", + VCPU: 4, + MemoryMb: 16384, + GPU: 0, + Architecture: "arm64", + }, + "mac1.metal": { + InstanceType: "mac1.metal", + VCPU: 12, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "mac2.metal": { + InstanceType: "mac2.metal", + VCPU: 8, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "p2.16xlarge": { + InstanceType: "p2.16xlarge", + VCPU: 64, + MemoryMb: 749568, + GPU: 16, + Architecture: "amd64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "p2.8xlarge": { + InstanceType: "p2.8xlarge", + VCPU: 32, + MemoryMb: 499712, + GPU: 8, + Architecture: "amd64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "p2.xlarge": { + InstanceType: "p2.xlarge", + VCPU: 4, + MemoryMb: 62464, + GPU: 1, + Architecture: "amd64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "p3.16xlarge": { + InstanceType: "p3.16xlarge", + VCPU: 64, + MemoryMb: 499712, + GPU: 8, + Architecture: "amd64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "p3.2xlarge": { + InstanceType: "p3.2xlarge", + VCPU: 8, + MemoryMb: 62464, + GPU: 1, + Architecture: "amd64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "p3.8xlarge": { + InstanceType: "p3.8xlarge", + VCPU: 32, + MemoryMb: 249856, + GPU: 4, + Architecture: "amd64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "p3dn.24xlarge": { + InstanceType: "p3dn.24xlarge", + VCPU: 96, + MemoryMb: 786432, + GPU: 8, + Architecture: "amd64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "p4d.24xlarge": { + InstanceType: "p4d.24xlarge", + VCPU: 96, + MemoryMb: 1179648, + GPU: 8, + Architecture: "amd64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "p4de.24xlarge": { + InstanceType: "p4de.24xlarge", + VCPU: 96, + MemoryMb: 1179648, + GPU: 8, + Architecture: "amd64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "p5.48xlarge": { + InstanceType: "p5.48xlarge", + VCPU: 192, + MemoryMb: 2048000, + GPU: 8, + Architecture: "amd64", + GPUMemory: 15842934784, + MPSContext: 32, + }, + "r3.2xlarge": { + InstanceType: "r3.2xlarge", + VCPU: 8, + MemoryMb: 62464, + GPU: 0, + Architecture: "amd64", + }, + "r3.4xlarge": { + InstanceType: "r3.4xlarge", + VCPU: 16, + MemoryMb: 124928, + GPU: 0, + Architecture: "amd64", + }, + "r3.8xlarge": { + InstanceType: "r3.8xlarge", + VCPU: 32, + MemoryMb: 249856, + GPU: 0, + Architecture: "amd64", + }, + "r3.large": { + InstanceType: "r3.large", + VCPU: 2, + MemoryMb: 15360, + GPU: 0, + Architecture: "amd64", + }, + "r3.xlarge": { + InstanceType: "r3.xlarge", + VCPU: 4, + MemoryMb: 31232, + GPU: 0, + Architecture: "amd64", + }, + "r4.16xlarge": { + InstanceType: "r4.16xlarge", + VCPU: 64, + MemoryMb: 499712, + GPU: 0, + Architecture: "amd64", + }, + "r4.2xlarge": { + InstanceType: "r4.2xlarge", + VCPU: 8, + MemoryMb: 62464, + GPU: 0, + Architecture: "amd64", + }, + "r4.4xlarge": { + InstanceType: "r4.4xlarge", + VCPU: 16, + MemoryMb: 124928, + GPU: 0, + Architecture: "amd64", + }, + "r4.8xlarge": { + InstanceType: "r4.8xlarge", + VCPU: 32, + MemoryMb: 249856, + GPU: 0, + Architecture: "amd64", + }, + "r4.large": { + InstanceType: "r4.large", + VCPU: 2, + MemoryMb: 15616, + GPU: 0, + Architecture: "amd64", + }, + "r4.xlarge": { + InstanceType: "r4.xlarge", + VCPU: 4, + MemoryMb: 31232, + GPU: 0, + Architecture: "amd64", + }, + "r5.12xlarge": { + InstanceType: "r5.12xlarge", + VCPU: 48, + MemoryMb: 393216, + GPU: 0, + Architecture: "amd64", + }, + "r5.16xlarge": { + InstanceType: "r5.16xlarge", + VCPU: 64, + MemoryMb: 524288, + GPU: 0, + Architecture: "amd64", + }, + "r5.24xlarge": { + InstanceType: "r5.24xlarge", + VCPU: 96, + MemoryMb: 786432, + GPU: 0, + Architecture: "amd64", + }, + "r5.2xlarge": { + InstanceType: "r5.2xlarge", + VCPU: 8, + MemoryMb: 65536, + GPU: 0, + Architecture: "amd64", + }, + "r5.4xlarge": { + InstanceType: "r5.4xlarge", + VCPU: 16, + MemoryMb: 131072, + GPU: 0, + Architecture: "amd64", + }, + "r5.8xlarge": { + InstanceType: "r5.8xlarge", + VCPU: 32, + MemoryMb: 262144, + GPU: 0, + Architecture: "amd64", + }, + "r5.large": { + InstanceType: "r5.large", + VCPU: 2, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "r5.metal": { + InstanceType: "r5.metal", + VCPU: 96, + MemoryMb: 786432, + GPU: 0, + Architecture: "amd64", + }, + "r5.xlarge": { + InstanceType: "r5.xlarge", + VCPU: 4, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "r5a.12xlarge": { + InstanceType: "r5a.12xlarge", + VCPU: 48, + MemoryMb: 393216, + GPU: 0, + Architecture: "amd64", + }, + "r5a.16xlarge": { + InstanceType: "r5a.16xlarge", + VCPU: 64, + MemoryMb: 524288, + GPU: 0, + Architecture: "amd64", + }, + "r5a.24xlarge": { + InstanceType: "r5a.24xlarge", + VCPU: 96, + MemoryMb: 786432, + GPU: 0, + Architecture: "amd64", + }, + "r5a.2xlarge": { + InstanceType: "r5a.2xlarge", + VCPU: 8, + MemoryMb: 65536, + GPU: 0, + Architecture: "amd64", + }, + "r5a.4xlarge": { + InstanceType: "r5a.4xlarge", + VCPU: 16, + MemoryMb: 131072, + GPU: 0, + Architecture: "amd64", + }, + "r5a.8xlarge": { + InstanceType: "r5a.8xlarge", + VCPU: 32, + MemoryMb: 262144, + GPU: 0, + Architecture: "amd64", + }, + "r5a.large": { + InstanceType: "r5a.large", + VCPU: 2, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "r5a.xlarge": { + InstanceType: "r5a.xlarge", + VCPU: 4, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "r5ad.12xlarge": { + InstanceType: "r5ad.12xlarge", + VCPU: 48, + MemoryMb: 393216, + GPU: 0, + Architecture: "amd64", + }, + "r5ad.16xlarge": { + InstanceType: "r5ad.16xlarge", + VCPU: 64, + MemoryMb: 524288, + GPU: 0, + Architecture: "amd64", + }, + "r5ad.24xlarge": { + InstanceType: "r5ad.24xlarge", + VCPU: 96, + MemoryMb: 786432, + GPU: 0, + Architecture: "amd64", + }, + "r5ad.2xlarge": { + InstanceType: "r5ad.2xlarge", + VCPU: 8, + MemoryMb: 65536, + GPU: 0, + Architecture: "amd64", + }, + "r5ad.4xlarge": { + InstanceType: "r5ad.4xlarge", + VCPU: 16, + MemoryMb: 131072, + GPU: 0, + Architecture: "amd64", + }, + "r5ad.8xlarge": { + InstanceType: "r5ad.8xlarge", + VCPU: 32, + MemoryMb: 262144, + GPU: 0, + Architecture: "amd64", + }, + "r5ad.large": { + InstanceType: "r5ad.large", + VCPU: 2, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "r5ad.xlarge": { + InstanceType: "r5ad.xlarge", + VCPU: 4, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "r5b.12xlarge": { + InstanceType: "r5b.12xlarge", + VCPU: 48, + MemoryMb: 393216, + GPU: 0, + Architecture: "amd64", + }, + "r5b.16xlarge": { + InstanceType: "r5b.16xlarge", + VCPU: 64, + MemoryMb: 524288, + GPU: 0, + Architecture: "amd64", + }, + "r5b.24xlarge": { + InstanceType: "r5b.24xlarge", + VCPU: 96, + MemoryMb: 786432, + GPU: 0, + Architecture: "amd64", + }, + "r5b.2xlarge": { + InstanceType: "r5b.2xlarge", + VCPU: 8, + MemoryMb: 65536, + GPU: 0, + Architecture: "amd64", + }, + "r5b.4xlarge": { + InstanceType: "r5b.4xlarge", + VCPU: 16, + MemoryMb: 131072, + GPU: 0, + Architecture: "amd64", + }, + "r5b.8xlarge": { + InstanceType: "r5b.8xlarge", + VCPU: 32, + MemoryMb: 262144, + GPU: 0, + Architecture: "amd64", + }, + "r5b.large": { + InstanceType: "r5b.large", + VCPU: 2, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "r5b.metal": { + InstanceType: "r5b.metal", + VCPU: 96, + MemoryMb: 786432, + GPU: 0, + Architecture: "amd64", + }, + "r5b.xlarge": { + InstanceType: "r5b.xlarge", + VCPU: 4, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "r5d.12xlarge": { + InstanceType: "r5d.12xlarge", + VCPU: 48, + MemoryMb: 393216, + GPU: 0, + Architecture: "amd64", + }, + "r5d.16xlarge": { + InstanceType: "r5d.16xlarge", + VCPU: 64, + MemoryMb: 524288, + GPU: 0, + Architecture: "amd64", + }, + "r5d.24xlarge": { + InstanceType: "r5d.24xlarge", + VCPU: 96, + MemoryMb: 786432, + GPU: 0, + Architecture: "amd64", + }, + "r5d.2xlarge": { + InstanceType: "r5d.2xlarge", + VCPU: 8, + MemoryMb: 65536, + GPU: 0, + Architecture: "amd64", + }, + "r5d.4xlarge": { + InstanceType: "r5d.4xlarge", + VCPU: 16, + MemoryMb: 131072, + GPU: 0, + Architecture: "amd64", + }, + "r5d.8xlarge": { + InstanceType: "r5d.8xlarge", + VCPU: 32, + MemoryMb: 262144, + GPU: 0, + Architecture: "amd64", + }, + "r5d.large": { + InstanceType: "r5d.large", + VCPU: 2, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "r5d.metal": { + InstanceType: "r5d.metal", + VCPU: 96, + MemoryMb: 786432, + GPU: 0, + Architecture: "amd64", + }, + "r5d.xlarge": { + InstanceType: "r5d.xlarge", + VCPU: 4, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "r5dn.12xlarge": { + InstanceType: "r5dn.12xlarge", + VCPU: 48, + MemoryMb: 393216, + GPU: 0, + Architecture: "amd64", + }, + "r5dn.16xlarge": { + InstanceType: "r5dn.16xlarge", + VCPU: 64, + MemoryMb: 524288, + GPU: 0, + Architecture: "amd64", + }, + "r5dn.24xlarge": { + InstanceType: "r5dn.24xlarge", + VCPU: 96, + MemoryMb: 786432, + GPU: 0, + Architecture: "amd64", + }, + "r5dn.2xlarge": { + InstanceType: "r5dn.2xlarge", + VCPU: 8, + MemoryMb: 65536, + GPU: 0, + Architecture: "amd64", + }, + "r5dn.4xlarge": { + InstanceType: "r5dn.4xlarge", + VCPU: 16, + MemoryMb: 131072, + GPU: 0, + Architecture: "amd64", + }, + "r5dn.8xlarge": { + InstanceType: "r5dn.8xlarge", + VCPU: 32, + MemoryMb: 262144, + GPU: 0, + Architecture: "amd64", + }, + "r5dn.large": { + InstanceType: "r5dn.large", + VCPU: 2, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "r5dn.metal": { + InstanceType: "r5dn.metal", + VCPU: 96, + MemoryMb: 786432, + GPU: 0, + Architecture: "amd64", + }, + "r5dn.xlarge": { + InstanceType: "r5dn.xlarge", + VCPU: 4, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "r5n.12xlarge": { + InstanceType: "r5n.12xlarge", + VCPU: 48, + MemoryMb: 393216, + GPU: 0, + Architecture: "amd64", + }, + "r5n.16xlarge": { + InstanceType: "r5n.16xlarge", + VCPU: 64, + MemoryMb: 524288, + GPU: 0, + Architecture: "amd64", + }, + "r5n.24xlarge": { + InstanceType: "r5n.24xlarge", + VCPU: 96, + MemoryMb: 786432, + GPU: 0, + Architecture: "amd64", + }, + "r5n.2xlarge": { + InstanceType: "r5n.2xlarge", + VCPU: 8, + MemoryMb: 65536, + GPU: 0, + Architecture: "amd64", + }, + "r5n.4xlarge": { + InstanceType: "r5n.4xlarge", + VCPU: 16, + MemoryMb: 131072, + GPU: 0, + Architecture: "amd64", + }, + "r5n.8xlarge": { + InstanceType: "r5n.8xlarge", + VCPU: 32, + MemoryMb: 262144, + GPU: 0, + Architecture: "amd64", + }, + "r5n.large": { + InstanceType: "r5n.large", + VCPU: 2, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "r5n.metal": { + InstanceType: "r5n.metal", + VCPU: 96, + MemoryMb: 786432, + GPU: 0, + Architecture: "amd64", + }, + "r5n.xlarge": { + InstanceType: "r5n.xlarge", + VCPU: 4, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "r6a.12xlarge": { + InstanceType: "r6a.12xlarge", + VCPU: 48, + MemoryMb: 393216, + GPU: 0, + Architecture: "amd64", + }, + "r6a.16xlarge": { + InstanceType: "r6a.16xlarge", + VCPU: 64, + MemoryMb: 524288, + GPU: 0, + Architecture: "amd64", + }, + "r6a.24xlarge": { + InstanceType: "r6a.24xlarge", + VCPU: 96, + MemoryMb: 786432, + GPU: 0, + Architecture: "amd64", + }, + "r6a.2xlarge": { + InstanceType: "r6a.2xlarge", + VCPU: 8, + MemoryMb: 65536, + GPU: 0, + Architecture: "amd64", + }, + "r6a.32xlarge": { + InstanceType: "r6a.32xlarge", + VCPU: 128, + MemoryMb: 1048576, + GPU: 0, + Architecture: "amd64", + }, + "r6a.48xlarge": { + InstanceType: "r6a.48xlarge", + VCPU: 192, + MemoryMb: 1572864, + GPU: 0, + Architecture: "amd64", + }, + "r6a.4xlarge": { + InstanceType: "r6a.4xlarge", + VCPU: 16, + MemoryMb: 131072, + GPU: 0, + Architecture: "amd64", + }, + "r6a.8xlarge": { + InstanceType: "r6a.8xlarge", + VCPU: 32, + MemoryMb: 262144, + GPU: 0, + Architecture: "amd64", + }, + "r6a.large": { + InstanceType: "r6a.large", + VCPU: 2, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "r6a.metal": { + InstanceType: "r6a.metal", + VCPU: 192, + MemoryMb: 1572864, + GPU: 0, + Architecture: "amd64", + }, + "r6a.xlarge": { + InstanceType: "r6a.xlarge", + VCPU: 4, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "r6g.12xlarge": { + InstanceType: "r6g.12xlarge", + VCPU: 48, + MemoryMb: 393216, + GPU: 0, + Architecture: "arm64", + }, + "r6g.16xlarge": { + InstanceType: "r6g.16xlarge", + VCPU: 64, + MemoryMb: 524288, + GPU: 0, + Architecture: "arm64", + }, + "r6g.2xlarge": { + InstanceType: "r6g.2xlarge", + VCPU: 8, + MemoryMb: 65536, + GPU: 0, + Architecture: "arm64", + }, + "r6g.4xlarge": { + InstanceType: "r6g.4xlarge", + VCPU: 16, + MemoryMb: 131072, + GPU: 0, + Architecture: "arm64", + }, + "r6g.8xlarge": { + InstanceType: "r6g.8xlarge", + VCPU: 32, + MemoryMb: 262144, + GPU: 0, + Architecture: "arm64", + }, + "r6g.large": { + InstanceType: "r6g.large", + VCPU: 2, + MemoryMb: 16384, + GPU: 0, + Architecture: "arm64", + }, + "r6g.medium": { + InstanceType: "r6g.medium", + VCPU: 1, + MemoryMb: 8192, + GPU: 0, + Architecture: "arm64", + }, + "r6g.metal": { + InstanceType: "r6g.metal", + VCPU: 64, + MemoryMb: 524288, + GPU: 0, + Architecture: "arm64", + }, + "r6g.xlarge": { + InstanceType: "r6g.xlarge", + VCPU: 4, + MemoryMb: 32768, + GPU: 0, + Architecture: "arm64", + }, + "r6gd.12xlarge": { + InstanceType: "r6gd.12xlarge", + VCPU: 48, + MemoryMb: 393216, + GPU: 0, + Architecture: "arm64", + }, + "r6gd.16xlarge": { + InstanceType: "r6gd.16xlarge", + VCPU: 64, + MemoryMb: 524288, + GPU: 0, + Architecture: "arm64", + }, + "r6gd.2xlarge": { + InstanceType: "r6gd.2xlarge", + VCPU: 8, + MemoryMb: 65536, + GPU: 0, + Architecture: "arm64", + }, + "r6gd.4xlarge": { + InstanceType: "r6gd.4xlarge", + VCPU: 16, + MemoryMb: 131072, + GPU: 0, + Architecture: "arm64", + }, + "r6gd.8xlarge": { + InstanceType: "r6gd.8xlarge", + VCPU: 32, + MemoryMb: 262144, + GPU: 0, + Architecture: "arm64", + }, + "r6gd.large": { + InstanceType: "r6gd.large", + VCPU: 2, + MemoryMb: 16384, + GPU: 0, + Architecture: "arm64", + }, + "r6gd.medium": { + InstanceType: "r6gd.medium", + VCPU: 1, + MemoryMb: 8192, + GPU: 0, + Architecture: "arm64", + }, + "r6gd.metal": { + InstanceType: "r6gd.metal", + VCPU: 64, + MemoryMb: 524288, + GPU: 0, + Architecture: "arm64", + }, + "r6gd.xlarge": { + InstanceType: "r6gd.xlarge", + VCPU: 4, + MemoryMb: 32768, + GPU: 0, + Architecture: "arm64", + }, + "r6i.12xlarge": { + InstanceType: "r6i.12xlarge", + VCPU: 48, + MemoryMb: 393216, + GPU: 0, + Architecture: "amd64", + }, + "r6i.16xlarge": { + InstanceType: "r6i.16xlarge", + VCPU: 64, + MemoryMb: 524288, + GPU: 0, + Architecture: "amd64", + }, + "r6i.24xlarge": { + InstanceType: "r6i.24xlarge", + VCPU: 96, + MemoryMb: 786432, + GPU: 0, + Architecture: "amd64", + }, + "r6i.2xlarge": { + InstanceType: "r6i.2xlarge", + VCPU: 8, + MemoryMb: 65536, + GPU: 0, + Architecture: "amd64", + }, + "r6i.32xlarge": { + InstanceType: "r6i.32xlarge", + VCPU: 128, + MemoryMb: 1048576, + GPU: 0, + Architecture: "amd64", + }, + "r6i.4xlarge": { + InstanceType: "r6i.4xlarge", + VCPU: 16, + MemoryMb: 131072, + GPU: 0, + Architecture: "amd64", + }, + "r6i.8xlarge": { + InstanceType: "r6i.8xlarge", + VCPU: 32, + MemoryMb: 262144, + GPU: 0, + Architecture: "amd64", + }, + "r6i.large": { + InstanceType: "r6i.large", + VCPU: 2, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "r6i.metal": { + InstanceType: "r6i.metal", + VCPU: 128, + MemoryMb: 1048576, + GPU: 0, + Architecture: "amd64", + }, + "r6i.xlarge": { + InstanceType: "r6i.xlarge", + VCPU: 4, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "r6id.12xlarge": { + InstanceType: "r6id.12xlarge", + VCPU: 48, + MemoryMb: 393216, + GPU: 0, + Architecture: "amd64", + }, + "r6id.16xlarge": { + InstanceType: "r6id.16xlarge", + VCPU: 64, + MemoryMb: 524288, + GPU: 0, + Architecture: "amd64", + }, + "r6id.24xlarge": { + InstanceType: "r6id.24xlarge", + VCPU: 96, + MemoryMb: 786432, + GPU: 0, + Architecture: "amd64", + }, + "r6id.2xlarge": { + InstanceType: "r6id.2xlarge", + VCPU: 8, + MemoryMb: 65536, + GPU: 0, + Architecture: "amd64", + }, + "r6id.32xlarge": { + InstanceType: "r6id.32xlarge", + VCPU: 128, + MemoryMb: 1048576, + GPU: 0, + Architecture: "amd64", + }, + "r6id.4xlarge": { + InstanceType: "r6id.4xlarge", + VCPU: 16, + MemoryMb: 131072, + GPU: 0, + Architecture: "amd64", + }, + "r6id.8xlarge": { + InstanceType: "r6id.8xlarge", + VCPU: 32, + MemoryMb: 262144, + GPU: 0, + Architecture: "amd64", + }, + "r6id.large": { + InstanceType: "r6id.large", + VCPU: 2, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "r6id.metal": { + InstanceType: "r6id.metal", + VCPU: 128, + MemoryMb: 1048576, + GPU: 0, + Architecture: "amd64", + }, + "r6id.xlarge": { + InstanceType: "r6id.xlarge", + VCPU: 4, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "r6idn.12xlarge": { + InstanceType: "r6idn.12xlarge", + VCPU: 48, + MemoryMb: 393216, + GPU: 0, + Architecture: "amd64", + }, + "r6idn.16xlarge": { + InstanceType: "r6idn.16xlarge", + VCPU: 64, + MemoryMb: 524288, + GPU: 0, + Architecture: "amd64", + }, + "r6idn.24xlarge": { + InstanceType: "r6idn.24xlarge", + VCPU: 96, + MemoryMb: 786432, + GPU: 0, + Architecture: "amd64", + }, + "r6idn.2xlarge": { + InstanceType: "r6idn.2xlarge", + VCPU: 8, + MemoryMb: 65536, + GPU: 0, + Architecture: "amd64", + }, + "r6idn.32xlarge": { + InstanceType: "r6idn.32xlarge", + VCPU: 128, + MemoryMb: 1048576, + GPU: 0, + Architecture: "amd64", + }, + "r6idn.4xlarge": { + InstanceType: "r6idn.4xlarge", + VCPU: 16, + MemoryMb: 131072, + GPU: 0, + Architecture: "amd64", + }, + "r6idn.8xlarge": { + InstanceType: "r6idn.8xlarge", + VCPU: 32, + MemoryMb: 262144, + GPU: 0, + Architecture: "amd64", + }, + "r6idn.large": { + InstanceType: "r6idn.large", + VCPU: 2, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "r6idn.xlarge": { + InstanceType: "r6idn.xlarge", + VCPU: 4, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "r6in.12xlarge": { + InstanceType: "r6in.12xlarge", + VCPU: 48, + MemoryMb: 393216, + GPU: 0, + Architecture: "amd64", + }, + "r6in.16xlarge": { + InstanceType: "r6in.16xlarge", + VCPU: 64, + MemoryMb: 524288, + GPU: 0, + Architecture: "amd64", + }, + "r6in.24xlarge": { + InstanceType: "r6in.24xlarge", + VCPU: 96, + MemoryMb: 786432, + GPU: 0, + Architecture: "amd64", + }, + "r6in.2xlarge": { + InstanceType: "r6in.2xlarge", + VCPU: 8, + MemoryMb: 65536, + GPU: 0, + Architecture: "amd64", + }, + "r6in.32xlarge": { + InstanceType: "r6in.32xlarge", + VCPU: 128, + MemoryMb: 1048576, + GPU: 0, + Architecture: "amd64", + }, + "r6in.4xlarge": { + InstanceType: "r6in.4xlarge", + VCPU: 16, + MemoryMb: 131072, + GPU: 0, + Architecture: "amd64", + }, + "r6in.8xlarge": { + InstanceType: "r6in.8xlarge", + VCPU: 32, + MemoryMb: 262144, + GPU: 0, + Architecture: "amd64", + }, + "r6in.large": { + InstanceType: "r6in.large", + VCPU: 2, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "r6in.xlarge": { + InstanceType: "r6in.xlarge", + VCPU: 4, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "r7g.12xlarge": { + InstanceType: "r7g.12xlarge", + VCPU: 48, + MemoryMb: 393216, + GPU: 0, + Architecture: "arm64", + }, + "r7g.16xlarge": { + InstanceType: "r7g.16xlarge", + VCPU: 64, + MemoryMb: 524288, + GPU: 0, + Architecture: "arm64", + }, + "r7g.2xlarge": { + InstanceType: "r7g.2xlarge", + VCPU: 8, + MemoryMb: 65536, + GPU: 0, + Architecture: "arm64", + }, + "r7g.4xlarge": { + InstanceType: "r7g.4xlarge", + VCPU: 16, + MemoryMb: 131072, + GPU: 0, + Architecture: "arm64", + }, + "r7g.8xlarge": { + InstanceType: "r7g.8xlarge", + VCPU: 32, + MemoryMb: 262144, + GPU: 0, + Architecture: "arm64", + }, + "r7g.large": { + InstanceType: "r7g.large", + VCPU: 2, + MemoryMb: 16384, + GPU: 0, + Architecture: "arm64", + }, + "r7g.medium": { + InstanceType: "r7g.medium", + VCPU: 1, + MemoryMb: 8192, + GPU: 0, + Architecture: "arm64", + }, + "r7g.metal": { + InstanceType: "r7g.metal", + VCPU: 64, + MemoryMb: 524288, + GPU: 0, + Architecture: "arm64", + }, + "r7g.xlarge": { + InstanceType: "r7g.xlarge", + VCPU: 4, + MemoryMb: 32768, + GPU: 0, + Architecture: "arm64", + }, + "t1.micro": { + InstanceType: "t1.micro", + VCPU: 1, + MemoryMb: 627, + GPU: 0, + Architecture: "amd64", + }, + "t2.2xlarge": { + InstanceType: "t2.2xlarge", + VCPU: 8, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "t2.large": { + InstanceType: "t2.large", + VCPU: 2, + MemoryMb: 8192, + GPU: 0, + Architecture: "amd64", + }, + "t2.medium": { + InstanceType: "t2.medium", + VCPU: 2, + MemoryMb: 4096, + GPU: 0, + Architecture: "amd64", + }, + "t2.micro": { + InstanceType: "t2.micro", + VCPU: 1, + MemoryMb: 1024, + GPU: 0, + Architecture: "amd64", + }, + "t2.nano": { + InstanceType: "t2.nano", + VCPU: 1, + MemoryMb: 512, + GPU: 0, + Architecture: "amd64", + }, + "t2.small": { + InstanceType: "t2.small", + VCPU: 1, + MemoryMb: 2048, + GPU: 0, + Architecture: "amd64", + }, + "t2.xlarge": { + InstanceType: "t2.xlarge", + VCPU: 4, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "t3.2xlarge": { + InstanceType: "t3.2xlarge", + VCPU: 8, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "t3.large": { + InstanceType: "t3.large", + VCPU: 2, + MemoryMb: 8192, + GPU: 0, + Architecture: "amd64", + }, + "t3.medium": { + InstanceType: "t3.medium", + VCPU: 2, + MemoryMb: 4096, + GPU: 0, + Architecture: "amd64", + }, + "t3.micro": { + InstanceType: "t3.micro", + VCPU: 2, + MemoryMb: 1024, + GPU: 0, + Architecture: "amd64", + }, + "t3.nano": { + InstanceType: "t3.nano", + VCPU: 2, + MemoryMb: 512, + GPU: 0, + Architecture: "amd64", + }, + "t3.small": { + InstanceType: "t3.small", + VCPU: 2, + MemoryMb: 2048, + GPU: 0, + Architecture: "amd64", + }, + "t3.xlarge": { + InstanceType: "t3.xlarge", + VCPU: 4, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "t3a.2xlarge": { + InstanceType: "t3a.2xlarge", + VCPU: 8, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "t3a.large": { + InstanceType: "t3a.large", + VCPU: 2, + MemoryMb: 8192, + GPU: 0, + Architecture: "amd64", + }, + "t3a.medium": { + InstanceType: "t3a.medium", + VCPU: 2, + MemoryMb: 4096, + GPU: 0, + Architecture: "amd64", + }, + "t3a.micro": { + InstanceType: "t3a.micro", + VCPU: 2, + MemoryMb: 1024, + GPU: 0, + Architecture: "amd64", + }, + "t3a.nano": { + InstanceType: "t3a.nano", + VCPU: 2, + MemoryMb: 512, + GPU: 0, + Architecture: "amd64", + }, + "t3a.small": { + InstanceType: "t3a.small", + VCPU: 2, + MemoryMb: 2048, + GPU: 0, + Architecture: "amd64", + }, + "t3a.xlarge": { + InstanceType: "t3a.xlarge", + VCPU: 4, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "t4g.2xlarge": { + InstanceType: "t4g.2xlarge", + VCPU: 8, + MemoryMb: 32768, + GPU: 0, + Architecture: "arm64", + }, + "t4g.large": { + InstanceType: "t4g.large", + VCPU: 2, + MemoryMb: 8192, + GPU: 0, + Architecture: "arm64", + }, + "t4g.medium": { + InstanceType: "t4g.medium", + VCPU: 2, + MemoryMb: 4096, + GPU: 0, + Architecture: "arm64", + }, + "t4g.micro": { + InstanceType: "t4g.micro", + VCPU: 2, + MemoryMb: 1024, + GPU: 0, + Architecture: "arm64", + }, + "t4g.nano": { + InstanceType: "t4g.nano", + VCPU: 2, + MemoryMb: 512, + GPU: 0, + Architecture: "arm64", + }, + "t4g.small": { + InstanceType: "t4g.small", + VCPU: 2, + MemoryMb: 2048, + GPU: 0, + Architecture: "arm64", + }, + "t4g.xlarge": { + InstanceType: "t4g.xlarge", + VCPU: 4, + MemoryMb: 16384, + GPU: 0, + Architecture: "arm64", + }, + "trn1.2xlarge": { + InstanceType: "trn1.2xlarge", + VCPU: 8, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, + "trn1.32xlarge": { + InstanceType: "trn1.32xlarge", + VCPU: 128, + MemoryMb: 524288, + GPU: 0, + Architecture: "amd64", + }, + "u-12tb1.112xlarge": { + InstanceType: "u-12tb1.112xlarge", + VCPU: 448, + MemoryMb: 12582912, + GPU: 0, + Architecture: "amd64", + }, + "u-18tb1.112xlarge": { + InstanceType: "u-18tb1.112xlarge", + VCPU: 448, + MemoryMb: 18874368, + GPU: 0, + Architecture: "amd64", + }, + "u-24tb1.112xlarge": { + InstanceType: "u-24tb1.112xlarge", + VCPU: 448, + MemoryMb: 25165824, + GPU: 0, + Architecture: "amd64", + }, + "u-3tb1.56xlarge": { + InstanceType: "u-3tb1.56xlarge", + VCPU: 224, + MemoryMb: 3145728, + GPU: 0, + Architecture: "amd64", + }, + "u-6tb1.112xlarge": { + InstanceType: "u-6tb1.112xlarge", + VCPU: 448, + MemoryMb: 6291456, + GPU: 0, + Architecture: "amd64", + }, + "u-6tb1.56xlarge": { + InstanceType: "u-6tb1.56xlarge", + VCPU: 224, + MemoryMb: 6291456, + GPU: 0, + Architecture: "amd64", + }, + "u-9tb1.112xlarge": { + InstanceType: "u-9tb1.112xlarge", + VCPU: 448, + MemoryMb: 9437184, + GPU: 0, + Architecture: "amd64", + }, + "vt1.24xlarge": { + InstanceType: "vt1.24xlarge", + VCPU: 96, + MemoryMb: 196608, + GPU: 0, + Architecture: "amd64", + }, + "vt1.3xlarge": { + InstanceType: "vt1.3xlarge", + VCPU: 12, + MemoryMb: 24576, + GPU: 0, + Architecture: "amd64", + }, + "vt1.6xlarge": { + InstanceType: "vt1.6xlarge", + VCPU: 24, + MemoryMb: 49152, + GPU: 0, + Architecture: "amd64", + }, + "x1.16xlarge": { + InstanceType: "x1.16xlarge", + VCPU: 64, + MemoryMb: 999424, + GPU: 0, + Architecture: "amd64", + }, + "x1.32xlarge": { + InstanceType: "x1.32xlarge", + VCPU: 128, + MemoryMb: 1998848, + GPU: 0, + Architecture: "amd64", + }, + "x1e.16xlarge": { + InstanceType: "x1e.16xlarge", + VCPU: 64, + MemoryMb: 1998848, + GPU: 0, + Architecture: "amd64", + }, + "x1e.2xlarge": { + InstanceType: "x1e.2xlarge", + VCPU: 8, + MemoryMb: 249856, + GPU: 0, + Architecture: "amd64", + }, + "x1e.32xlarge": { + InstanceType: "x1e.32xlarge", + VCPU: 128, + MemoryMb: 3997696, + GPU: 0, + Architecture: "amd64", + }, + "x1e.4xlarge": { + InstanceType: "x1e.4xlarge", + VCPU: 16, + MemoryMb: 499712, + GPU: 0, + Architecture: "amd64", + }, + "x1e.8xlarge": { + InstanceType: "x1e.8xlarge", + VCPU: 32, + MemoryMb: 999424, + GPU: 0, + Architecture: "amd64", + }, + "x1e.xlarge": { + InstanceType: "x1e.xlarge", + VCPU: 4, + MemoryMb: 124928, + GPU: 0, + Architecture: "amd64", + }, + "x2gd.12xlarge": { + InstanceType: "x2gd.12xlarge", + VCPU: 48, + MemoryMb: 786432, + GPU: 0, + Architecture: "arm64", + }, + "x2gd.16xlarge": { + InstanceType: "x2gd.16xlarge", + VCPU: 64, + MemoryMb: 1048576, + GPU: 0, + Architecture: "arm64", + }, + "x2gd.2xlarge": { + InstanceType: "x2gd.2xlarge", + VCPU: 8, + MemoryMb: 131072, + GPU: 0, + Architecture: "arm64", + }, + "x2gd.4xlarge": { + InstanceType: "x2gd.4xlarge", + VCPU: 16, + MemoryMb: 262144, + GPU: 0, + Architecture: "arm64", + }, + "x2gd.8xlarge": { + InstanceType: "x2gd.8xlarge", + VCPU: 32, + MemoryMb: 524288, + GPU: 0, + Architecture: "arm64", + }, + "x2gd.large": { + InstanceType: "x2gd.large", + VCPU: 2, + MemoryMb: 32768, + GPU: 0, + Architecture: "arm64", + }, + "x2gd.medium": { + InstanceType: "x2gd.medium", + VCPU: 1, + MemoryMb: 16384, + GPU: 0, + Architecture: "arm64", + }, + "x2gd.metal": { + InstanceType: "x2gd.metal", + VCPU: 64, + MemoryMb: 1048576, + GPU: 0, + Architecture: "arm64", + }, + "x2gd.xlarge": { + InstanceType: "x2gd.xlarge", + VCPU: 4, + MemoryMb: 65536, + GPU: 0, + Architecture: "arm64", + }, + "x2idn.16xlarge": { + InstanceType: "x2idn.16xlarge", + VCPU: 64, + MemoryMb: 1048576, + GPU: 0, + Architecture: "amd64", + }, + "x2idn.24xlarge": { + InstanceType: "x2idn.24xlarge", + VCPU: 96, + MemoryMb: 1572864, + GPU: 0, + Architecture: "amd64", + }, + "x2idn.32xlarge": { + InstanceType: "x2idn.32xlarge", + VCPU: 128, + MemoryMb: 2097152, + GPU: 0, + Architecture: "amd64", + }, + "x2idn.metal": { + InstanceType: "x2idn.metal", + VCPU: 128, + MemoryMb: 2097152, + GPU: 0, + Architecture: "amd64", + }, + "x2iedn.16xlarge": { + InstanceType: "x2iedn.16xlarge", + VCPU: 64, + MemoryMb: 2097152, + GPU: 0, + Architecture: "amd64", + }, + "x2iedn.24xlarge": { + InstanceType: "x2iedn.24xlarge", + VCPU: 96, + MemoryMb: 3145728, + GPU: 0, + Architecture: "amd64", + }, + "x2iedn.2xlarge": { + InstanceType: "x2iedn.2xlarge", + VCPU: 8, + MemoryMb: 262144, + GPU: 0, + Architecture: "amd64", + }, + "x2iedn.32xlarge": { + InstanceType: "x2iedn.32xlarge", + VCPU: 128, + MemoryMb: 4194304, + GPU: 0, + Architecture: "amd64", + }, + "x2iedn.4xlarge": { + InstanceType: "x2iedn.4xlarge", + VCPU: 16, + MemoryMb: 524288, + GPU: 0, + Architecture: "amd64", + }, + "x2iedn.8xlarge": { + InstanceType: "x2iedn.8xlarge", + VCPU: 32, + MemoryMb: 1048576, + GPU: 0, + Architecture: "amd64", + }, + "x2iedn.metal": { + InstanceType: "x2iedn.metal", + VCPU: 128, + MemoryMb: 4194304, + GPU: 0, + Architecture: "amd64", + }, + "x2iedn.xlarge": { + InstanceType: "x2iedn.xlarge", + VCPU: 4, + MemoryMb: 131072, + GPU: 0, + Architecture: "amd64", + }, + "x2iezn.12xlarge": { + InstanceType: "x2iezn.12xlarge", + VCPU: 48, + MemoryMb: 1572864, + GPU: 0, + Architecture: "amd64", + }, + "x2iezn.2xlarge": { + InstanceType: "x2iezn.2xlarge", + VCPU: 8, + MemoryMb: 262144, + GPU: 0, + Architecture: "amd64", + }, + "x2iezn.4xlarge": { + InstanceType: "x2iezn.4xlarge", + VCPU: 16, + MemoryMb: 524288, + GPU: 0, + Architecture: "amd64", + }, + "x2iezn.6xlarge": { + InstanceType: "x2iezn.6xlarge", + VCPU: 24, + MemoryMb: 786432, + GPU: 0, + Architecture: "amd64", + }, + "x2iezn.8xlarge": { + InstanceType: "x2iezn.8xlarge", + VCPU: 32, + MemoryMb: 1048576, + GPU: 0, + Architecture: "amd64", + }, + "x2iezn.metal": { + InstanceType: "x2iezn.metal", + VCPU: 48, + MemoryMb: 1572864, + GPU: 0, + Architecture: "amd64", + }, + "z1d.12xlarge": { + InstanceType: "z1d.12xlarge", + VCPU: 48, + MemoryMb: 393216, + GPU: 0, + Architecture: "amd64", + }, + "z1d.2xlarge": { + InstanceType: "z1d.2xlarge", + VCPU: 8, + MemoryMb: 65536, + GPU: 0, + Architecture: "amd64", + }, + "z1d.3xlarge": { + InstanceType: "z1d.3xlarge", + VCPU: 12, + MemoryMb: 98304, + GPU: 0, + Architecture: "amd64", + }, + "z1d.6xlarge": { + InstanceType: "z1d.6xlarge", + VCPU: 24, + MemoryMb: 196608, + GPU: 0, + Architecture: "amd64", + }, + "z1d.large": { + InstanceType: "z1d.large", + VCPU: 2, + MemoryMb: 16384, + GPU: 0, + Architecture: "amd64", + }, + "z1d.metal": { + InstanceType: "z1d.metal", + VCPU: 48, + MemoryMb: 393216, + GPU: 0, + Architecture: "amd64", + }, + "z1d.xlarge": { + InstanceType: "z1d.xlarge", + VCPU: 4, + MemoryMb: 32768, + GPU: 0, + Architecture: "amd64", + }, +} diff --git a/cluster-autoscaler/cloudprovider/spotinst/spotinst_cloud_provider.go b/cluster-autoscaler/cloudprovider/spotinst/spotinst_cloud_provider.go new file mode 100644 index 000000000000..1b9441dcded0 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/spotinst/spotinst_cloud_provider.go @@ -0,0 +1,159 @@ +/* +Copyright 2016 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spotinst + +import ( + "io" + apiv1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + "k8s.io/autoscaler/cluster-autoscaler/config" + "k8s.io/autoscaler/cluster-autoscaler/utils/errors" + klog "k8s.io/klog/v2" + "os" +) + +const ( + // ProviderName is the provide name of spotinst + ProviderName = "spotinst" + + // GPULabel is the label added to nodes with GPU resource. + GPULabel = "k8s.amazonaws.com/accelerator" +) + +// CloudProvider implements CloudProvider interface. +type CloudProvider struct { + manager *CloudManager + resourceLimiter *cloudprovider.ResourceLimiter +} + +var ( + availableGPUTypes = map[string]struct{}{ + "nvidia-tesla-k80": {}, + "nvidia-tesla-p100": {}, + "nvidia-tesla-v100": {}, + } +) + +// NewCloudProvider returns CloudProvider implementation for Spotinst. +func NewCloudProvider(manager *CloudManager, resourceLimiter *cloudprovider.ResourceLimiter) (*CloudProvider, error) { + klog.Info("Building Spotinst cloud provider") + cloud := &CloudProvider{ + manager: manager, + resourceLimiter: resourceLimiter, + } + + return cloud, nil +} + +// Name returns name of the cloud c. +func (c *CloudProvider) Name() string { + return ProviderName +} + +// NodeGroups returns all node groups configured for this cloud c. +func (c *CloudProvider) NodeGroups() []cloudprovider.NodeGroup { + out := make([]cloudprovider.NodeGroup, len(c.manager.groups)) + for i, group := range c.manager.groups { + out[i] = group + } + return out +} + +// NodeGroupForNode returns the node group for the given node. +func (c *CloudProvider) NodeGroupForNode(node *apiv1.Node) (cloudprovider.NodeGroup, error) { + instanceID, err := extractInstanceId(node.Spec.ProviderID) + if err != nil { + return nil, err + } + return c.manager.GetGroupForInstance(instanceID) +} + +// Pricing returns pricing model for this cloud provider or error if not available. +func (c *CloudProvider) Pricing() (cloudprovider.PricingModel, errors.AutoscalerError) { + return nil, cloudprovider.ErrNotImplemented +} + +// GetAvailableMachineTypes get all machine types that can be requested from the cloud provider. +// Implementation optional. +func (c *CloudProvider) GetAvailableMachineTypes() ([]string, error) { + return []string{}, nil +} + +// NewNodeGroup builds a theoretical node group based on the node definition provided. +func (c *CloudProvider) NewNodeGroup(machineType string, labels map[string]string, systemLabels map[string]string, + taints []apiv1.Taint, extraResources map[string]resource.Quantity) (cloudprovider.NodeGroup, error) { + return nil, cloudprovider.ErrNotImplemented +} + +// GetResourceLimiter returns struct containing limits (max, min) for resources (cores, memory etc.). +func (c *CloudProvider) GetResourceLimiter() (*cloudprovider.ResourceLimiter, error) { + return c.resourceLimiter, nil +} + +// Cleanup cleans up open resources before the cloud provider is destroyed, i.e. go routines etc. +func (c *CloudProvider) Cleanup() error { + return c.manager.Cleanup() +} + +// Refresh is called before every main loop and can be used to dynamically update cloud provider state. +// In particular the list of node groups returned by NodeGroups can change as a result of CloudProvider.Refresh(). +func (c *CloudProvider) Refresh() error { + return c.manager.Refresh() +} + +// GetInstanceID gets the instance ID for the specified node. +func (c *CloudProvider) GetInstanceID(node *apiv1.Node) string { + return node.Spec.ProviderID +} + +// GPULabel returns the label added to nodes with GPU resource. +func (c *CloudProvider) GPULabel() string { + return GPULabel +} + +// GetAvailableGPUTypes return all available GPU types cloud provider supports +func (c *CloudProvider) GetAvailableGPUTypes() map[string]struct{} { + return availableGPUTypes +} + +// BuildSpotinst return the spotinst provider +func BuildSpotinst(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider { + var config io.ReadCloser + if opts.CloudConfig != "" { + var err error + config, err = os.Open(opts.CloudConfig) + if err != nil { + klog.Fatalf("Couldn't open cloud provider configuration %s: %#v", opts.CloudConfig, err) + } + defer config.Close() + } + + manager, err := NewCloudManager(config, do) + if err != nil { + klog.Fatalf("Failed to create Spotinst manager: %v", err) + } + + provider, err := NewCloudProvider(manager, rl) + if err != nil { + klog.Fatalf("Failed to create Spotinst cloud provider: %v", err) + } + + return provider +} + +// HasInstance returns whether a given node has a corresponding instance in this cloud provider +func (c *CloudProvider) HasInstance(node *apiv1.Node) (bool, error) { + return true, nil +} \ No newline at end of file diff --git a/cluster-autoscaler/cloudprovider/spotinst/spotinst_cloud_provider_test.go b/cluster-autoscaler/cloudprovider/spotinst/spotinst_cloud_provider_test.go new file mode 100644 index 000000000000..7b944bca9b0c --- /dev/null +++ b/cluster-autoscaler/cloudprovider/spotinst/spotinst_cloud_provider_test.go @@ -0,0 +1,411 @@ +/* +Copyright 2016 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spotinst + +import ( + "context" + "testing" + "time" + + "github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws" + azurev3 "github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/v3" + "github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gcp" + "github.com/spotinst/spotinst-sdk-go/spotinst" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + apiv1 "k8s.io/api/core/v1" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" +) + +type groupServiceMock struct { + mock.Mock + providerAWS *awsServiceMock +} + +func (s *groupServiceMock) CloudProviderAWS() aws.Service { + return s.providerAWS +} + +func (s *groupServiceMock) CloudProviderGCP() gcp.Service { + return nil // not implemented +} + +func (s *groupServiceMock) CloudProviderAzureV3() azurev3.Service { + return nil // not implemented +} + +type awsServiceMock struct { + mock.Mock +} + +func (s *awsServiceMock) List(ctx context.Context, input *aws.ListGroupsInput) (*aws.ListGroupsOutput, error) { + return nil, nil +} + +func (s *awsServiceMock) Create(ctx context.Context, input *aws.CreateGroupInput) (*aws.CreateGroupOutput, error) { + return nil, nil +} + +func (s *awsServiceMock) DeploymentStatusECS(context.Context, *aws.DeploymentStatusInput) (*aws.RollGroupOutput, error) { + return nil, nil +} + +func (s *awsServiceMock) DeploymentStatus(context.Context, *aws.DeploymentStatusInput) (*aws.RollGroupOutput, error) { + return nil, nil +} + +func (s *awsServiceMock) RollECS(context.Context, *aws.RollECSGroupInput) (*aws.RollGroupOutput, error) { + return nil, nil +} + +func (s *awsServiceMock) Scale(context.Context, *aws.ScaleGroupInput) (*aws.ScaleGroupOutput, error) { + return nil, nil +} + +func (s *awsServiceMock) FinishBeanstalkMaintenance(context.Context, *aws.BeanstalkMaintenanceInput) (*aws.BeanstalkMaintenanceOutput, error) { + return nil, nil +} + +func (s *awsServiceMock) GetBeanstalkMaintenanceStatus(context.Context, *aws.BeanstalkMaintenanceInput) (*string, error) { + return nil, nil +} + +func (s *awsServiceMock) GetGroupEvents(context.Context, *aws.GetGroupEventsInput) (*aws.GetGroupEventsOutput, error) { + return nil, nil +} + +func (s *awsServiceMock) StartBeanstalkMaintenance(context.Context, *aws.BeanstalkMaintenanceInput) (*aws.BeanstalkMaintenanceOutput, error) { + return nil, nil +} + +func (s *awsServiceMock) Read(ctx context.Context, input *aws.ReadGroupInput) (*aws.ReadGroupOutput, error) { + out := &aws.ReadGroupOutput{ + Group: &aws.Group{ + Capacity: &aws.Capacity{ + Target: spotinst.Int(2), + }, + }, + } + return out, nil +} + +func (s *awsServiceMock) GetInstanceHealthiness(context.Context, *aws.GetInstanceHealthinessInput) (*aws.GetInstanceHealthinessOutput, error) { + return nil, nil +} + +func (s *awsServiceMock) ImportBeanstalkEnv(context.Context, *aws.ImportBeanstalkInput) (*aws.ImportBeanstalkOutput, error) { + return nil, nil +} + +func (s *awsServiceMock) StopDeployment(context.Context, *aws.StopDeploymentInput) (*aws.StopDeploymentOutput, error) { + return nil, nil +} + +func (s *awsServiceMock) Update(ctx context.Context, input *aws.UpdateGroupInput) (*aws.UpdateGroupOutput, error) { + args := s.Called(ctx, input) + return args.Get(0).(*aws.UpdateGroupOutput), nil +} + +func (s *awsServiceMock) Delete(ctx context.Context, input *aws.DeleteGroupInput) (*aws.DeleteGroupOutput, error) { + return nil, nil +} + +func (s *awsServiceMock) Status(ctx context.Context, input *aws.StatusGroupInput) (*aws.StatusGroupOutput, error) { + out := &aws.StatusGroupOutput{ + Instances: []*aws.Instance{ + { + ID: spotinst.String("test-instance-id"), + }, + { + ID: spotinst.String("second-test-instance-id"), + }, + }, + } + return out, nil +} + +func (s *awsServiceMock) Detach(ctx context.Context, input *aws.DetachGroupInput) (*aws.DetachGroupOutput, error) { + args := s.Called(ctx, input) + return args.Get(0).(*aws.DetachGroupOutput), nil +} + +func (s *awsServiceMock) Roll(ctx context.Context, input *aws.RollGroupInput) (*aws.RollGroupOutput, error) { + return nil, nil +} + +func (s *awsServiceMock) CreateSuspensions(ctx context.Context, input *aws.CreateSuspensionsInput) (*aws.CreateSuspensionsOutput, error) { + return nil, nil +} + +func (s *awsServiceMock) ListSuspensions(context.Context, *aws.ListSuspensionsInput) (*aws.ListSuspensionsOutput, error) { + return nil, nil +} +func (s *awsServiceMock) DeleteSuspensions(context.Context, *aws.DeleteSuspensionsInput) (*aws.DeleteSuspensionsOutput, error) { + return nil, nil +} + +func (s *awsServiceMock) ListStatefulInstances(context.Context, *aws.ListStatefulInstancesInput) (*aws.ListStatefulInstancesOutput, error) { + return nil, nil +} +func (s *awsServiceMock) PauseStatefulInstance(context.Context, *aws.PauseStatefulInstanceInput) (*aws.PauseStatefulInstanceOutput, error) { + return nil, nil +} +func (s *awsServiceMock) ResumeStatefulInstance(context.Context, *aws.ResumeStatefulInstanceInput) (*aws.ResumeStatefulInstanceOutput, error) { + return nil, nil +} +func (s *awsServiceMock) RecycleStatefulInstance(context.Context, *aws.RecycleStatefulInstanceInput) (*aws.RecycleStatefulInstanceOutput, error) { + return nil, nil +} +func (s *awsServiceMock) DeallocateStatefulInstance(context.Context, *aws.DeallocateStatefulInstanceInput) (*aws.DeallocateStatefulInstanceOutput, error) { + return nil, nil +} + +func testCloudManager(t *testing.T) *CloudManager { + return &CloudManager{ + groupService: &groupServiceMock{ + providerAWS: new(awsServiceMock), + }, + groups: make([]*Group, 0), + cache: make(map[string]*Group), + interruptCh: make(chan struct{}), + refreshInterval: time.Minute, + } +} + +func testCloudProvider(t *testing.T, m *CloudManager) *CloudProvider { + resourceLimiter := cloudprovider.NewResourceLimiter( + map[string]int64{cloudprovider.ResourceNameCores: 1, cloudprovider.ResourceNameMemory: 10000000}, + map[string]int64{cloudprovider.ResourceNameCores: 10, cloudprovider.ResourceNameMemory: 100000000}) + + cloud, err := NewCloudProvider(m, resourceLimiter) + assert.NoError(t, err) + return cloud +} + +func TestNewCloudProvider(t *testing.T) { + testCloudProvider(t, testCloudManager(t)) +} + +func TestAddNodeGroup(t *testing.T) { + provider := testCloudProvider(t, testCloudManager(t)) + err := provider.manager.addNodeGroup("bad spec") + assert.Error(t, err) + assert.Equal(t, len(provider.manager.groups), 0) + + err = provider.manager.addNodeGroup("1:5:sig-test") + assert.NoError(t, err) + assert.Equal(t, len(provider.manager.groups), 1) +} + +func TestName(t *testing.T) { + provider := testCloudProvider(t, testCloudManager(t)) + assert.Equal(t, provider.Name(), "spotinst") +} + +func TestNodeGroups(t *testing.T) { + provider := testCloudProvider(t, testCloudManager(t)) + assert.Equal(t, len(provider.NodeGroups()), 0) + err := provider.manager.addNodeGroup("1:5:sig-test") + assert.NoError(t, err) + assert.Equal(t, len(provider.NodeGroups()), 1) +} + +func TestNodeGroupForNode(t *testing.T) { + node := &apiv1.Node{ + Spec: apiv1.NodeSpec{ + ProviderID: "aws:///us-east-1a/test-instance-id", + }, + } + provider := testCloudProvider(t, testCloudManager(t)) + err := provider.manager.addNodeGroup("1:5:sig-test") + assert.NoError(t, err) + + provider.Refresh() + + group, err := provider.NodeGroupForNode(node) + + assert.NoError(t, err) + assert.Equal(t, group.Id(), "sig-test") + assert.Equal(t, group.MinSize(), 1) + assert.Equal(t, group.MaxSize(), 5) + + // test node in cluster that is not in a group managed by cluster autoscaler + nodeNotInGroup := &apiv1.Node{ + Spec: apiv1.NodeSpec{ + ProviderID: "aws:///us-east-1a/test-instance-id-not-in-group", + }, + } + + group, err = provider.NodeGroupForNode(nodeNotInGroup) + assert.NoError(t, err) + assert.Nil(t, group) +} + +func TestExtractInstanceId(t *testing.T) { + _, err := extractInstanceId("bad spec") + assert.Error(t, err) + + instanceID, err := extractInstanceId("aws:///us-east-1a/i-260942b3") + assert.NoError(t, err) + assert.Equal(t, instanceID, "i-260942b3") +} + +func TestMaxSize(t *testing.T) { + provider := testCloudProvider(t, testCloudManager(t)) + err := provider.manager.addNodeGroup("1:5:sig-test") + assert.NoError(t, err) + assert.Equal(t, len(provider.manager.groups), 1) + assert.Equal(t, provider.manager.groups[0].MaxSize(), 5) +} + +func TestMinSize(t *testing.T) { + provider := testCloudProvider(t, testCloudManager(t)) + err := provider.manager.addNodeGroup("1:5:sig-test") + assert.NoError(t, err) + assert.Equal(t, len(provider.manager.groups), 1) + assert.Equal(t, provider.manager.groups[0].MinSize(), 1) +} + +func TestTargetSize(t *testing.T) { + provider := testCloudProvider(t, testCloudManager(t)) + err := provider.manager.addNodeGroup("1:5:sig-test") + assert.NoError(t, err) + targetSize, err := provider.manager.groups[0].TargetSize() + assert.Equal(t, targetSize, 2) + assert.NoError(t, err) +} + +func TestIncreaseSize(t *testing.T) { + provider := testCloudProvider(t, testCloudManager(t)) + err := provider.manager.addNodeGroup("1:5:sig-test") + assert.NoError(t, err) + assert.Equal(t, len(provider.manager.groups), 1) + + cloud := provider.manager.groupService.CloudProviderAWS().(*awsServiceMock) + cloud.On("Update", context.Background(), &aws.UpdateGroupInput{ + Group: &aws.Group{ + ID: spotinst.String(provider.manager.groups[0].Id()), + Capacity: &aws.Capacity{ + Target: spotinst.Int(3), + Minimum: spotinst.Int(provider.manager.groups[0].minSize), + Maximum: spotinst.Int(provider.manager.groups[0].maxSize), + }, + }, + }).Return(&aws.UpdateGroupOutput{}) + + err = provider.manager.groups[0].IncreaseSize(1) + assert.NoError(t, err) + cloud.AssertExpectations(t) +} + +func TestBelongs(t *testing.T) { + provider := testCloudProvider(t, testCloudManager(t)) + err := provider.manager.addNodeGroup("1:5:sig-test") + assert.NoError(t, err) + + provider.Refresh() + + invalidNode := &apiv1.Node{ + Spec: apiv1.NodeSpec{ + ProviderID: "aws:///us-east-1a/invalid-instance-id", + }, + } + _, err = provider.manager.groups[0].Belongs(invalidNode) + assert.Error(t, err) + + validNode := &apiv1.Node{ + Spec: apiv1.NodeSpec{ + ProviderID: "aws:///us-east-1a/test-instance-id", + }, + } + belongs, err := provider.manager.groups[0].Belongs(validNode) + assert.Equal(t, belongs, true) + assert.NoError(t, err) +} + +func TestDeleteNodes(t *testing.T) { + provider := testCloudProvider(t, testCloudManager(t)) + err := provider.manager.addNodeGroup("1:5:sig-test") + assert.NoError(t, err) + assert.Equal(t, len(provider.manager.groups), 1) + + provider.Refresh() + + cloud := provider.manager.groupService.CloudProviderAWS().(*awsServiceMock) + cloud.On("Detach", context.Background(), &aws.DetachGroupInput{ + GroupID: spotinst.String(provider.manager.groups[0].Id()), + InstanceIDs: []string{"test-instance-id"}, + ShouldDecrementTargetCapacity: spotinst.Bool(true), + ShouldTerminateInstances: spotinst.Bool(true), + }).Return(&aws.DetachGroupOutput{}) + + node := &apiv1.Node{ + Spec: apiv1.NodeSpec{ + ProviderID: "aws:///us-east-1a/test-instance-id", + }, + } + + err = provider.manager.groups[0].DeleteNodes([]*apiv1.Node{node}) + assert.NoError(t, err) + cloud.AssertExpectations(t) +} + +func TestId(t *testing.T) { + provider := testCloudProvider(t, testCloudManager(t)) + err := provider.manager.addNodeGroup("1:5:sig-test") + assert.NoError(t, err) + assert.Equal(t, len(provider.manager.groups), 1) + assert.Equal(t, provider.manager.groups[0].Id(), "sig-test") +} + +func TestDebug(t *testing.T) { + grp := Group{ + manager: testCloudManager(t), + minSize: 5, + maxSize: 55, + } + grp.groupID = "sig-test" + assert.Equal(t, grp.Debug(), "sig-test (5:55)") +} + +func TestBuildGroup(t *testing.T) { + provider := testCloudProvider(t, testCloudManager(t)) + + _, err := provider.manager.buildGroupFromSpec("a") + assert.Error(t, err) + _, err = provider.manager.buildGroupFromSpec("a:b:c") + assert.Error(t, err) + _, err = provider.manager.buildGroupFromSpec("1:") + assert.Error(t, err) + _, err = provider.manager.buildGroupFromSpec("1:2:") + assert.Error(t, err) + + grp, err := provider.manager.buildGroupFromSpec("111:222:sig-test") + assert.NoError(t, err) + assert.Equal(t, 111, grp.MinSize()) + assert.Equal(t, 222, grp.MaxSize()) + assert.Equal(t, "sig-test", grp.Id()) +} + +func TestGetResourceLimiter(t *testing.T) { + provider := testCloudProvider(t, testCloudManager(t)) + _, err := provider.GetResourceLimiter() + assert.NoError(t, err) +} + +func TestCleanup(t *testing.T) { + provider := testCloudProvider(t, testCloudManager(t)) + err := provider.Cleanup() + assert.NoError(t, err) +} \ No newline at end of file diff --git a/cluster-autoscaler/cloudprovider/spotinst/spotinst_manager.go b/cluster-autoscaler/cloudprovider/spotinst/spotinst_manager.go new file mode 100644 index 000000000000..c5505f360ef5 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/spotinst/spotinst_manager.go @@ -0,0 +1,503 @@ +/* +Copyright 2016 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spotinst + +import ( + "context" + "errors" + "fmt" + "io" + "math/rand" + "strconv" + "strings" + "sync" + "time" + + "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" + "k8s.io/klog/v2" + + "k8s.io/autoscaler/cluster-autoscaler/utils/mpscontext" + + "github.com/spotinst/spotinst-sdk-go/service/elastigroup" + "github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws" + "github.com/spotinst/spotinst-sdk-go/spotinst" + "github.com/spotinst/spotinst-sdk-go/spotinst/log" + "github.com/spotinst/spotinst-sdk-go/spotinst/session" + "github.com/spotinst/spotinst-sdk-go/spotinst/util/stringutil" + gcfg "gopkg.in/gcfg.v1" + apiv1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + "k8s.io/autoscaler/cluster-autoscaler/config/dynamic" + "k8s.io/autoscaler/cluster-autoscaler/utils/gpumemory" + kubeletapis "k8s.io/kubelet/pkg/apis" +) + +// CloudManager holds the config and client. +type CloudManager struct { + groupService elastigroup.Service + groups []*Group + refreshedAt time.Time + refreshInterval time.Duration + interruptCh chan struct{} + cacheMu sync.Mutex + cache map[string]*Group // k: InstanceID, v: Group +} + +// CloudConfig holds the configuration parsed from the --cloud-config flag. +// All fields are required unless otherwise specified. +type CloudConfig struct { + Global struct{} +} + +// NewCloudManager constructs manager object. +func NewCloudManager(config io.Reader, discoveryOpts cloudprovider.NodeGroupDiscoveryOptions) (*CloudManager, error) { + klog.Info("Building Spotinst cloud manager") + + cfg, err := readCloudConfig(config) + if err != nil { + return nil, err + } + + svc, err := newService(cfg) + if err != nil { + return nil, err + } + + manager := &CloudManager{ + groupService: svc, + refreshInterval: time.Minute, + interruptCh: make(chan struct{}), + groups: make([]*Group, 0), + cache: make(map[string]*Group), + } + + if err := manager.addNodeGroups(discoveryOpts.NodeGroupSpecs); err != nil { + return nil, err + } + + go wait.Until(func() { + manager.cacheMu.Lock() + defer manager.cacheMu.Unlock() + + if err := manager.forceRefresh(); err != nil { + klog.Errorf("Error while refreshing cache: %v", err) + } + }, time.Hour, manager.interruptCh) + + return manager, nil +} + +// newService returns a new instance of Spotinst Service. +func newService(cloudConfig *CloudConfig) (elastigroup.Service, error) { + // Create a new config. + config := spotinst.DefaultConfig() + config.WithLogger(newStdLogger()) + config.WithUserAgent("Kubernetes-ClusterAutoscaler") + + // Create a new session. + sess := session.New(config) + + // Create a new service. + svc := elastigroup.New(sess) + + return svc, nil +} + +func newStdLogger() log.Logger { + return log.LoggerFunc(func(format string, args ...interface{}) { + klog.V(4).Infof(format, args...) + }) +} + +// readCloudConfig reads an instance of Config from config reader. +func readCloudConfig(config io.Reader) (*CloudConfig, error) { + var cfg CloudConfig + + if config != nil { + if err := gcfg.ReadInto(&cfg, config); err != nil { + return nil, fmt.Errorf("couldn't read Spotinst config: %v", err) + } + } + + return &cfg, nil +} + +func (mgr *CloudManager) addNodeGroups(specs []string) error { + klog.Info("Attempting to add node groups") + + for _, spec := range specs { + if err := mgr.addNodeGroup(spec); err != nil { + return fmt.Errorf("could not register group with spec %s: %v", spec, err) + } + } + + return nil +} + +func (mgr *CloudManager) addNodeGroup(spec string) error { + klog.Infof("Attempting to add node group: %s", spec) + + group, err := mgr.buildGroupFromSpec(spec) + if err != nil { + return fmt.Errorf("could not parse spec for node group: %v", err) + } + err = mgr.RegisterGroup(group) + if err != nil { + return fmt.Errorf("could not register the group(%s): %v", group.Id(), err) + } + + klog.Infof("Node group added: %s", group.groupID) + return nil +} + +func (mgr *CloudManager) buildGroupFromSpec(value string) (*Group, error) { + spec, err := dynamic.SpecFromString(value, true) + if err != nil { + return nil, fmt.Errorf("failed to parse node group spec: %v", err) + } + group := &Group{ + manager: mgr, + groupID: spec.Name, + minSize: spec.MinSize, + maxSize: spec.MaxSize, + } + return group, nil +} + +// RegisterGroup registers a resource group in Spotinst Manager. +func (mgr *CloudManager) RegisterGroup(grp *Group) error { + mgr.cacheMu.Lock() + defer mgr.cacheMu.Unlock() + + group, err := mgr.getResourceForGroup(grp.Id()) + if err != nil { + return err + } + grp.group = group + + mgr.groups = append(mgr.groups, grp) + return nil +} + +// GetGroupSize gets the current size of the group. +func (mgr *CloudManager) GetGroupSize(grp *Group) (int64, error) { + group, err := mgr.getResourceForGroup(grp.Id()) + if err != nil { + return -1, err + } + size := spotinst.IntValue(group.Capacity.Target) + return int64(size), nil +} + +// SetGroupSize sets the instances count in a Group by updating a +// predefined Spotinst stack parameter (specified by the user). +func (mgr *CloudManager) SetGroupSize(grp *Group, size int64) error { + in := &aws.UpdateGroupInput{ + Group: &aws.Group{ + ID: spotinst.String(grp.Id()), + Capacity: &aws.Capacity{ + Target: spotinst.Int(int(size)), + Minimum: spotinst.Int(grp.minSize), + Maximum: spotinst.Int(grp.maxSize), + }, + }, + } + _, err := mgr.groupService.CloudProviderAWS().Update(context.Background(), in) + if err != nil { + return err + } + return nil +} + +// GetGroupForInstance retrieves the resource group that contains +// a given instance. +func (mgr *CloudManager) GetGroupForInstance(instanceID string) (*Group, error) { + mgr.cacheMu.Lock() + defer mgr.cacheMu.Unlock() + + if group, ok := mgr.cache[instanceID]; ok { + return group, nil + } + + klog.V(8).Infof("Instance `%s` does not belong to any managed group", instanceID) + return nil, nil +} + +// DeleteInstances deletes the specified instances from the +// OpenStack resource group +func (mgr *CloudManager) DeleteInstances(instanceIDs []string) error { + if len(instanceIDs) == 0 { + return nil + } + commonGroup, err := mgr.GetGroupForInstance(instanceIDs[0]) + if err != nil { + return err + } + for _, instanceID := range instanceIDs { + instanceGroup, err := mgr.GetGroupForInstance(instanceID) + if err != nil { + return err + } + if instanceGroup.groupID != commonGroup.groupID { + return errors.New("connot delete instances which don't belong to the same group") + } + } + in := &aws.DetachGroupInput{ + GroupID: spotinst.String(commonGroup.groupID), + InstanceIDs: instanceIDs, + ShouldDecrementTargetCapacity: spotinst.Bool(true), + ShouldTerminateInstances: spotinst.Bool(true), + } + if _, err := mgr.groupService.CloudProviderAWS().Detach(context.Background(), in); err != nil { + return fmt.Errorf("failed to detach instances from group %s: %v", commonGroup.groupID, err) + } + return nil +} + +func (mgr *CloudManager) getResourceForGroup(groupID string) (*aws.Group, error) { + in := &aws.ReadGroupInput{ + GroupID: spotinst.String(groupID), + } + out, err := mgr.groupService.CloudProviderAWS().Read(context.Background(), in) + if err != nil { + return nil, err + } + if out.Group == nil { + return nil, fmt.Errorf("failed to get group %s", groupID) + } + return out.Group, nil +} + +// Cleanup cleans up open resources before the cloud provider is destroyed, i.e. go routines etc. +func (mgr *CloudManager) Cleanup() error { + close(mgr.interruptCh) + return nil +} + +// Refresh is called before every main loop and can be used to dynamically update cloud provider state. +// In particular the list of node groups returned by NodeGroups can change as a result of CloudProvider.Refresh(). +func (mgr *CloudManager) Refresh() error { + mgr.cacheMu.Lock() + defer mgr.cacheMu.Unlock() + + if mgr.refreshedAt.Add(mgr.refreshInterval).After(time.Now()) { + return nil + } + return mgr.forceRefresh() +} + +func (mgr *CloudManager) forceRefresh() error { + mgr.regenerateCache() + mgr.refreshedAt = time.Now() + klog.V(2).Infof("Refreshed, next refresh after %v", mgr.refreshedAt.Add(mgr.refreshInterval)) + return nil +} + +func (mgr *CloudManager) regenerateCache() { + mgr.cache = make(map[string]*Group) + for _, group := range mgr.groups { + klog.V(4).Infof("Regenerating resource group information for %s", group.groupID) + if err := mgr.refreshGroupNodes(group); err != nil { + klog.Warningf("Could not retrieve nodes for group %s: %v", group.groupID, err) + } + } +} + +func (mgr *CloudManager) refreshGroupNodes(grp *Group) error { + in := &aws.StatusGroupInput{ + GroupID: spotinst.String(grp.Id()), + } + status, err := mgr.groupService.CloudProviderAWS().Status(context.Background(), in) + if err != nil { + return err + } + for _, instance := range status.Instances { + if instance.ID != nil { + instanceID := spotinst.StringValue(instance.ID) + klog.V(8).Infof("Managing AWS instance with ID %s in group %s", instanceID, grp.Id()) + mgr.cache[instanceID] = grp + } + } + return nil +} + +type groupTemplate struct { + InstanceType *instanceType + Region string + Zone string + Tags []*aws.Tag +} + +func (mgr *CloudManager) inferInstanceType(instanceTypeName string) *instanceType { + ret := &instanceType{ + InstanceType: instanceTypeName, + VCPU: 1, + MemoryMb: 1024, // 1GB + GPU: 0, + } + size := 1 + if strings.HasSuffix(instanceTypeName, ".medium") || strings.HasSuffix(instanceTypeName, ".large") { + size = 1 + } else if strings.HasSuffix(instanceTypeName, ".xlarge") { + size = 2 + } else { + elems := strings.Split(instanceTypeName, ".") + if len(elems) > 1 { + nums := strings.Split(elems[1], "xlarge") + if len(nums) > 0 { + if num, err := strconv.Atoi(nums[0]); err == nil { + size = num * 2 + } + } + } + } + ret.VCPU = 2 * int64(size) + ret.MemoryMb = 1024 * 2 * ret.VCPU + if strings.HasPrefix(instanceTypeName, "g") || strings.HasPrefix(instanceTypeName, "p") { + ret.GPU = int64(size / 4) + if ret.GPU <= 0 { + ret.GPU = 1 + } + } + return ret +} + +func (mgr *CloudManager) buildGroupTemplate(groupID string) (*groupTemplate, error) { + klog.Infof("Building template for group %s", groupID) + + group, err := mgr.getResourceForGroup(groupID) + if err != nil { + return nil, err + } + + if len(group.Compute.AvailabilityZones) < 1 { + return nil, fmt.Errorf("unable to get first AvailabilityZone for %s", groupID) + } + + zone := spotinst.StringValue(group.Compute.AvailabilityZones[0].Name) + region := zone[0 : len(zone)-1] + + if len(group.Compute.AvailabilityZones) > 1 { + klog.Warningf("Found multiple availability zones, using %s", zone) + } + + instanceTypeName := spotinst.StringValue(group.Compute.InstanceTypes.OnDemand) + foundInstanceType := InstanceTypes[instanceTypeName] + if foundInstanceType == nil { + klog.Warningf("Unable to get node template info for instance type %s", instanceTypeName) + foundInstanceType = mgr.inferInstanceType(instanceTypeName) + } + + tmpl := &groupTemplate{ + InstanceType: foundInstanceType, + Region: region, + Zone: zone, + Tags: group.Compute.LaunchSpecification.Tags, + } + + return tmpl, nil +} + +func (mgr *CloudManager) buildNodeFromTemplate(group *Group, template *groupTemplate) (*apiv1.Node, error) { + klog.Infof("Building node from template of group %s", group.Id()) + + node := apiv1.Node{} + nodeName := fmt.Sprintf("%s-group-%d", group.groupID, rand.Int63()) + + node.ObjectMeta = metav1.ObjectMeta{ + Name: nodeName, + SelfLink: fmt.Sprintf("/api/v1/nodes/%s", nodeName), + Labels: map[string]string{}, + } + + node.Status = apiv1.NodeStatus{ + Capacity: apiv1.ResourceList{}, + } + + node.Status.Capacity[apiv1.ResourcePods] = *resource.NewQuantity(110, resource.DecimalSI) + node.Status.Capacity[apiv1.ResourceCPU] = *resource.NewQuantity(template.InstanceType.VCPU, resource.DecimalSI) + node.Status.Capacity[apiv1.ResourceMemory] = *resource.NewQuantity(template.InstanceType.MemoryMb*1024*1024, resource.DecimalSI) + node.Status.Capacity[gpu.ResourceNvidiaGPU] = *resource.NewQuantity(template.InstanceType.GPU, resource.DecimalSI) + node.Status.Capacity[gpumemory.ResourceVisenzeGPUMemory] = *resource.NewQuantity(template.InstanceType.GPUMemory, resource.DecimalSI) + node.Status.Capacity[mpscontext.ResourceVisenzeMPSContext] = *resource.NewQuantity(template.InstanceType.MPSContext, resource.DecimalSI) + node.Status.Allocatable = node.Status.Capacity + + // NodeLabels + node.Labels = cloudprovider.JoinStringMaps(node.Labels, extractLabelsFromGroup(template.Tags)) + + // GenericLabels + node.Labels = cloudprovider.JoinStringMaps(node.Labels, buildGenericLabels(template, nodeName)) + + node.Spec.Taints = extractTaintsFromGroup(template.Tags) + node.Status.Conditions = cloudprovider.BuildReadyConditions() + + klog.V(4).Infof("Node `%s` labels: %s", nodeName, stringutil.Stringify(node.Labels)) + klog.V(4).Infof("Node `%s` taints: %s", nodeName, stringutil.Stringify(node.Spec.Taints)) + + return &node, nil +} + +func buildGenericLabels(template *groupTemplate, nodeName string) map[string]string { + result := make(map[string]string) + + result[kubeletapis.LabelArch] = cloudprovider.DefaultArch + result[kubeletapis.LabelOS] = cloudprovider.DefaultOS + result[apiv1.LabelInstanceType] = template.InstanceType.InstanceType + result[apiv1.LabelZoneRegion] = template.Region + result[apiv1.LabelZoneFailureDomain] = template.Zone + result[apiv1.LabelHostname] = nodeName + + return result +} + +func extractLabelsFromGroup(tags []*aws.Tag) map[string]string { + result := make(map[string]string) + + for _, tag := range tags { + k := *tag.Key + v := *tag.Value + splits := strings.Split(k, "k8s.io/cluster-autoscaler/node-template/label/") + if len(splits) > 1 { + label := splits[1] + if label != "" { + result[label] = v + } + } + } + + return result +} + +func extractTaintsFromGroup(tags []*aws.Tag) []apiv1.Taint { + taints := make([]apiv1.Taint, 0) + + for _, tag := range tags { + k := *tag.Key + v := *tag.Value + splits := strings.Split(k, "k8s.io/cluster-autoscaler/node-template/taint/") + if len(splits) > 1 { + values := strings.SplitN(v, ":", 2) + taints = append(taints, apiv1.Taint{ + Key: splits[1], + Value: values[0], + Effect: apiv1.TaintEffect(values[1]), + }) + } + } + + return taints +} \ No newline at end of file diff --git a/cluster-autoscaler/cloudprovider/spotinst/spotinst_node_group.go b/cluster-autoscaler/cloudprovider/spotinst/spotinst_node_group.go new file mode 100644 index 000000000000..8eb1efd0f464 --- /dev/null +++ b/cluster-autoscaler/cloudprovider/spotinst/spotinst_node_group.go @@ -0,0 +1,232 @@ +/* +Copyright 2016 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spotinst + +import ( + "context" + "errors" + "fmt" + "regexp" + "strings" + + "k8s.io/klog/v2" + + "github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws" + "github.com/spotinst/spotinst-sdk-go/spotinst" + apiv1 "k8s.io/api/core/v1" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + "k8s.io/autoscaler/cluster-autoscaler/config" + schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" +) + +// Group declaration of spotinst nodegroup +type Group struct { + manager *CloudManager + group *aws.Group + groupID string + minSize int + maxSize int +} + +// MaxSize returns maximum size of the node group. +func (grp *Group) MaxSize() int { + return grp.maxSize +} + +// MinSize returns minimum size of the node group. +func (grp *Group) MinSize() int { + return grp.minSize +} + +// TargetSize returns the current target size of the node group. +func (grp *Group) TargetSize() (int, error) { + size, err := grp.manager.GetGroupSize(grp) + return int(size), err +} + +// IncreaseSize increases the size of the node group. +func (grp *Group) IncreaseSize(delta int) error { + if delta <= 0 { + return errors.New("size increase must be positive") + } + size, err := grp.manager.GetGroupSize(grp) + if err != nil { + return err + } + if int(size)+delta > grp.MaxSize() { + return fmt.Errorf("size increase too large - desired:%d max:%d", int(size)+delta, grp.MaxSize()) + } + return grp.manager.SetGroupSize(grp, size+int64(delta)) +} + +// DeleteNodes deletes nodes from this node group. +func (grp *Group) DeleteNodes(nodes []*apiv1.Node) error { + size, err := grp.manager.GetGroupSize(grp) + if err != nil { + return fmt.Errorf("error when deleting nodes, retrieving size of group %s failed: %v", grp.Id(), err) + } + if int(size) <= grp.MinSize() { + return errors.New("min size reached, nodes will not be deleted") + } + toBeDeleted := make([]string, 0) + for _, node := range nodes { + belongs, err := grp.Belongs(node) + if err != nil { + return fmt.Errorf("failed to check membership of node %s in group %s: %v", node.Name, grp.Id(), err) + } + if !belongs { + return fmt.Errorf("%s belongs to a different group than %s", node.Name, grp.Id()) + } + instanceID, err := extractInstanceId(node.Spec.ProviderID) + if err != nil { + return fmt.Errorf("node %s's cloud provider ID is malformed: %v", node.Name, err) + } + toBeDeleted = append(toBeDeleted, instanceID) + } + return grp.manager.DeleteInstances(toBeDeleted) +} + +// DecreaseTargetSize decreases the target size of the node group. +func (grp *Group) DecreaseTargetSize(delta int) error { + if delta >= 0 { + return errors.New("size decrease size must be negative") + } + size, err := grp.manager.GetGroupSize(grp) + if err != nil { + return err + } + nodes, err := grp.Nodes() + if err != nil { + return err + } + if int(size)+delta < len(nodes) { + return fmt.Errorf("size decrease too large - desired:%d existing:%d", int(size)+delta, len(nodes)) + } + return grp.manager.SetGroupSize(grp, size+int64(delta)) +} + +// Id returns an unique identifier of the node group. +func (grp *Group) Id() string { + return grp.groupID +} + +// Debug returns a string containing all information regarding this node group. +func (grp *Group) Debug() string { + return fmt.Sprintf("%s (%d:%d)", grp.Id(), grp.MinSize(), grp.MaxSize()) +} + +// Nodes returns a list of all nodes that belong to this node group. +func (grp *Group) Nodes() ([]cloudprovider.Instance, error) { + in := &aws.StatusGroupInput{ + GroupID: spotinst.String(grp.Id()), + } + status, err := grp.manager.groupService.CloudProviderAWS().Status(context.Background(), in) + if err != nil { + return []cloudprovider.Instance{}, err + } + out := make([]cloudprovider.Instance, 0) + for _, instance := range status.Instances { + if instance.ID != nil && instance.AvailabilityZone != nil { + out = append(out, cloudprovider.Instance{Id: fmt.Sprintf("aws:///%s/%s", *instance.AvailabilityZone, *instance.ID)}) + } + } + return out, nil +} + +// TemplateNodeInfo returns a node template for this node group. +func (grp *Group) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { + klog.Infof("No working nodes in node group %s, trying to generate from template", grp.Id()) + + template, err := grp.manager.buildGroupTemplate(grp.Id()) + if err != nil { + return nil, err + } + + node, err := grp.manager.buildNodeFromTemplate(grp, template) + if err != nil { + return nil, err + } + + nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(grp.Id())) + nodeInfo.SetNode(node) + return nodeInfo, nil +} + +// Belongs returns true if the given node belongs to the NodeGroup. +func (grp *Group) Belongs(node *apiv1.Node) (bool, error) { + instanceID, err := extractInstanceId(node.Spec.ProviderID) + if err != nil { + return false, err + } + group, err := grp.manager.GetGroupForInstance(instanceID) + if err != nil { + return false, err + } + if group == nil { + return false, fmt.Errorf("%s does not belong to a known group", node.Name) + } + return true, nil +} + +// Exist checks if the node group really exists on the cloud provider side. +func (grp *Group) Exist() bool { + return true +} + +// Create creates the node group on the cloud provider side. +func (grp *Group) Create() (cloudprovider.NodeGroup, error) { + return nil, cloudprovider.ErrAlreadyExist +} + +// Delete deletes the node group on the cloud provider side. +func (grp *Group) Delete() error { + return cloudprovider.ErrNotImplemented +} + +// Autoprovisioned returns true if the node group is autoprovisioned. +func (grp *Group) Autoprovisioned() bool { + return false +} + +func (grp *Group) GetOptions(defaults config.NodeGroupAutoscalingOptions) (*config.NodeGroupAutoscalingOptions, error) { + return nil, cloudprovider.ErrNotImplemented +} + +var ( + spotinstProviderRE = regexp.MustCompile(`^spotinst\:\/\/\/[-0-9a-z]*\/[-0-9a-z]*$`) + awsProviderRE = regexp.MustCompile(`^aws\:\/\/\/[-0-9a-z]*\/[-0-9a-z]*$`) +) + +func extractInstanceId(providerID string) (string, error) { + var prefix string + + if spotinstProviderRE.FindStringSubmatch(providerID) != nil { + prefix = "spotinst:///" + } + + if awsProviderRE.FindStringSubmatch(providerID) != nil { + prefix = "aws:///" + } + + if prefix == "" { + return "", fmt.Errorf("expected node provider ID to be one of the "+ + "forms `spotinst:////` or `aws:////`, got `%s`", providerID) + } + + parts := strings.Split(providerID[len(prefix):], "/") + instanceID := parts[1] + + klog.V(8).Infof("Instance ID `%s` extracted from provider `%s`", instanceID, providerID) + return instanceID, nil +} \ No newline at end of file diff --git a/cluster-autoscaler/go.mod b/cluster-autoscaler/go.mod index 479302a59c2f..6243c2e09c69 100644 --- a/cluster-autoscaler/go.mod +++ b/cluster-autoscaler/go.mod @@ -26,7 +26,8 @@ require ( github.com/prometheus/client_golang v1.14.0 github.com/satori/go.uuid v1.2.0 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.8.4 + github.com/spotinst/spotinst-sdk-go v1.357.0 + github.com/stretchr/testify v1.9.0 golang.org/x/crypto v0.21.0 golang.org/x/net v0.23.0 golang.org/x/oauth2 v0.7.0 @@ -141,7 +142,7 @@ require ( github.com/sirupsen/logrus v1.9.0 // indirect github.com/spf13/cobra v1.7.0 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect - github.com/stretchr/objx v0.5.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect github.com/vishvananda/netlink v1.1.0 // indirect github.com/vishvananda/netns v0.0.2 // indirect @@ -174,6 +175,7 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/cluster-autoscaler/go.sum b/cluster-autoscaler/go.sum index e4bfc8112581..69f6b0ec35fd 100644 --- a/cluster-autoscaler/go.sum +++ b/cluster-autoscaler/go.sum @@ -573,14 +573,17 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spotinst/spotinst-sdk-go v1.357.0 h1:mp1TyACYqw3aG74KT8jwA2FvlWDr67fMqO+6vDwT1pQ= +github.com/spotinst/spotinst-sdk-go v1.357.0/go.mod h1:Tn4/eb0SFY6IXmxz71CClujvbD/PuT+EO6Ta8v6AML4= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -591,8 +594,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -1146,6 +1149,8 @@ gopkg.in/gcfg.v1 v1.2.3 h1:m8OOJ4ccYHnx2f4gQwpno8nAX5OGOh7RLaaz0pj3Ogs= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= diff --git a/cluster-autoscaler/scripts/gpu-deploy-tmpl.yaml b/cluster-autoscaler/scripts/gpu-deploy-tmpl.yaml new file mode 100644 index 000000000000..1055cc07d06c --- /dev/null +++ b/cluster-autoscaler/scripts/gpu-deploy-tmpl.yaml @@ -0,0 +1,26 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gpu-pod + labels: + app: gpu-pod +spec: + replicas: 0 + selector: + matchLabels: + app: gpu-pod + template: + metadata: + labels: + app: gpu-pod + spec: + nodeSelector: + visenze.component: search + containers: + - name: digits-container + image: nvcr.io/nvidia/digits:20.12-tensorflow-py3 + command: + - sleep + - infinity + resources: + limits: \ No newline at end of file diff --git a/cluster-autoscaler/scripts/test-ca.sh b/cluster-autoscaler/scripts/test-ca.sh new file mode 100644 index 000000000000..6eb3df670c39 --- /dev/null +++ b/cluster-autoscaler/scripts/test-ca.sh @@ -0,0 +1,74 @@ +DEPLOY_NAME="gpu-pod" + +function check() { + local target=$1 + local is_match="false" + + for i in $(seq 500) + do + AVAILABLE_REPLICA=$(kubectl get deploy ${DEPLOY_NAME} | awk '{print $4}' | tail -n 1) + NODES=$(kubectl get nodes -l visenze.component=search,visenze.gpu=true -o json | jq '.items | length') + if [ ${AVAILABLE_REPLICA} -eq ${target} ] && [ ${NODES} -eq ${target} ] + then + is_match="true" + break + fi + sleep 5 + done + echo ${is_match} +} + +function scale() { + kubectl scale --replicas=$1 deployment/${DEPLOY_NAME} +} + +function scale_and_check() { + local target=$1 + echo "Scaling to ${target}, checking..." + scale ${target} + if [ $(check ${target}) = "false" ] + then + echo "Scaling to ${target} doesn't work" + exit 1 + else + echo "Scaling to ${target} succeed" + fi +} + +function update_resource_limit() { + cp gpu-deploy-tmpl.yaml gpu-deploy-tmp.yaml + local append_txt="" + case $1 in + gpu_num) + append_txt=" nvidia.com/gpu: 1" + ;; + + gpu_memory) + append_txt=" visenze.com/nvidia-gpu-memory: 8988051968" + ;; + + mps_context) + append_txt=" visenze.com/nvidia-mps-context: 18" + ;; + + # *) + # STATEMENTS + # ;; + esac + echo "$append_txt" >> gpu-deploy-tmp.yaml + kubectl apply -f gpu-deploy-tmp.yaml +} + +function test_with_resource() { + echo "check the resource $1" + update_resource_limit $1 + scale_and_check 1 + scale_and_check 2 + scale_and_check 1 + scale_and_check 0 +} + + +test_with_resource "gpu_num" +test_with_resource "gpu_memory" +test_with_resource "mps_context" \ No newline at end of file diff --git a/cluster-autoscaler/utils/gpumemory/gpumemory.go b/cluster-autoscaler/utils/gpumemory/gpumemory.go new file mode 100644 index 000000000000..7d357c10c579 --- /dev/null +++ b/cluster-autoscaler/utils/gpumemory/gpumemory.go @@ -0,0 +1,41 @@ +package gpumemory + +import ( + apiv1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +const ( + // ResourceVisenzeGPUMemory is the name of the GPU Memory resource + ResourceVisenzeGPUMemory = "visenze.com/nvidia-gpu-memory" + // GPULabel is the label added to nodes with GPU resource by Visenze. + // If you're not scaling - this is probably the problem! + GPULabel = "accelerator" +) + +// NodeHasGpuMemory returns true if a given node has GPU hardware +func NodeHasGpuMemory(node *apiv1.Node) bool { + _, hasGpuLabel := node.Labels[GPULabel] + gpuAllocatable, hasGpuAllocatable := node.Status.Allocatable[ResourceVisenzeGPUMemory] + return hasGpuLabel || (hasGpuAllocatable && !gpuAllocatable.IsZero()) +} + +// PodRequestsGpuMemory returns true if a given pod has GPU Memory request +func PodRequestsGpuMemory(pod *apiv1.Pod) bool { + for _, container := range pod.Spec.Containers { + if container.Resources.Requests != nil { + _, gpuMemoryFound := container.Resources.Requests[ResourceVisenzeGPUMemory] + if gpuMemoryFound { + return true + } + } + } + return false +} + +// RequestInfo gives some information about hwo much GPU memory is needed +type RequestInfo struct { + MaximumMemory resource.Quantity + TotalMemory resource.Quantity + Pods []*apiv1.Pod +} \ No newline at end of file diff --git a/cluster-autoscaler/utils/gpumemory/gpumemory_test.go b/cluster-autoscaler/utils/gpumemory/gpumemory_test.go new file mode 100644 index 000000000000..14507cf51cbb --- /dev/null +++ b/cluster-autoscaler/utils/gpumemory/gpumemory_test.go @@ -0,0 +1,83 @@ +package gpumemory + +import ( + "testing" + + "github.com/stretchr/testify/assert" + apiv1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestNodeHasGpuMemory(t *testing.T) { + gpuLabels := map[string]string{ + GPULabel: "nvidia-tesla-k80", + } + nodeGpuReady := &apiv1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nodeGpuReady", + Labels: gpuLabels, + }, + Status: apiv1.NodeStatus{ + Capacity: apiv1.ResourceList{}, + Allocatable: apiv1.ResourceList{}, + }, + } + nodeGpuReady.Status.Allocatable[ResourceVisenzeGPUMemory] = *resource.NewQuantity(8e9, resource.DecimalSI) + nodeGpuReady.Status.Capacity[ResourceVisenzeGPUMemory] = *resource.NewQuantity(8e9, resource.DecimalSI) + assert.True(t, NodeHasGpuMemory(nodeGpuReady)) + + nodeGpuUnready := &apiv1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nodeGpuUnready", + Labels: gpuLabels, + }, + Status: apiv1.NodeStatus{ + Capacity: apiv1.ResourceList{}, + Allocatable: apiv1.ResourceList{}, + }, + } + assert.True(t, NodeHasGpuMemory(nodeGpuUnready)) + + nodeNoGpu := &apiv1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nodeNoGpu", + Labels: map[string]string{}, + }, + Status: apiv1.NodeStatus{ + Capacity: apiv1.ResourceList{}, + Allocatable: apiv1.ResourceList{}, + }, + } + assert.False(t, NodeHasGpuMemory(nodeNoGpu)) +} + +func TestPodRequestsGpuMemory(t *testing.T) { + podNoGpu := &apiv1.Pod{ + Spec: apiv1.PodSpec{ + Containers: []apiv1.Container{ + apiv1.Container{ + Resources: apiv1.ResourceRequirements{ + Requests: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewQuantity(1, resource.DecimalSI), + }, + }, + }, + }, + }, + } + podWithGpu := &apiv1.Pod{Spec: apiv1.PodSpec{Containers: []apiv1.Container{ + apiv1.Container{ + Resources: apiv1.ResourceRequirements{ + Requests: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewQuantity(1, resource.DecimalSI), + ResourceVisenzeGPUMemory: *resource.NewQuantity(1, resource.DecimalSI), + }, + }, + }, + }}} + podWithGpu.Spec.Containers[0].Resources.Requests[ResourceVisenzeGPUMemory] = *resource.NewQuantity(1, resource.DecimalSI) + + assert.False(t, PodRequestsGpuMemory(podNoGpu)) + assert.True(t, PodRequestsGpuMemory(podWithGpu)) +} \ No newline at end of file diff --git a/cluster-autoscaler/utils/mpscontext/mpscontext.go b/cluster-autoscaler/utils/mpscontext/mpscontext.go new file mode 100644 index 000000000000..22d9a66687dc --- /dev/null +++ b/cluster-autoscaler/utils/mpscontext/mpscontext.go @@ -0,0 +1,6 @@ +package mpscontext + +// Custom resource for NVIDIA MPS context +const ( + ResourceVisenzeMPSContext = "visenze.com/nvidia-mps-context" +) \ No newline at end of file diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/LICENSE b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/LICENSE new file mode 100644 index 000000000000..261eeb9e9f8b --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/NOTICE.md b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/NOTICE.md new file mode 100644 index 000000000000..918a63fbf6b2 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/NOTICE.md @@ -0,0 +1,14 @@ + + + + + + + + + + + +
SoftwareLicense
go-ini/iniApache 2.0 +
stretchr/testifyMIT +
diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/elastigroup.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/elastigroup.go new file mode 100644 index 000000000000..d591dcfcaa78 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/elastigroup.go @@ -0,0 +1,53 @@ +package elastigroup + +import ( + "github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws" + azurev3 "github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/v3" + "github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gcp" + "github.com/spotinst/spotinst-sdk-go/spotinst" + "github.com/spotinst/spotinst-sdk-go/spotinst/client" + "github.com/spotinst/spotinst-sdk-go/spotinst/session" +) + +// Service provides the API operation methods for making requests to endpoints +// of the Spotinst API. See this package's package overview docs for details on +// the service. +type Service interface { + CloudProviderAWS() aws.Service + CloudProviderAzureV3() azurev3.Service + CloudProviderGCP() gcp.Service +} + +type ServiceOp struct { + Client *client.Client +} + +var _ Service = &ServiceOp{} + +func New(sess *session.Session, cfgs ...*spotinst.Config) *ServiceOp { + cfg := &spotinst.Config{} + cfg.Merge(sess.Config) + cfg.Merge(cfgs...) + + return &ServiceOp{ + Client: client.New(cfg), + } +} + +func (s *ServiceOp) CloudProviderAWS() aws.Service { + return &aws.ServiceOp{ + Client: s.Client, + } +} + +func (s *ServiceOp) CloudProviderAzureV3() azurev3.Service { + return &azurev3.ServiceOp{ + Client: s.Client, + } +} + +func (s *ServiceOp) CloudProviderGCP() gcp.Service { + return &gcp.ServiceOp{ + Client: s.Client, + } +} diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/aws.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/aws.go new file mode 100644 index 000000000000..a008da0117ba --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/aws.go @@ -0,0 +1,5601 @@ +package aws + +import ( + "context" + "encoding/json" + "io/ioutil" + "net/http" + "strconv" + "time" + + "github.com/spotinst/spotinst-sdk-go/spotinst" + "github.com/spotinst/spotinst-sdk-go/spotinst/client" + "github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil" + "github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates" +) + +// A Product represents the type of an operating system. +type Product int + +const ( + // ProductWindows represents the Windows product. + ProductWindows Product = iota + + // ProductWindowsVPC represents the Windows (Amazon VPC) product. + ProductWindowsVPC + + // ProductLinuxUnix represents the Linux/Unix product. + ProductLinuxUnix + + // ProductLinuxUnixVPC represents the Linux/Unix (Amazon VPC) product. + ProductLinuxUnixVPC + + // ProductSUSELinux represents the SUSE Linux product. + ProductSUSELinux + + // ProductSUSELinuxVPC represents the SUSE Linux (Amazon VPC) product. + ProductSUSELinuxVPC +) + +var ProductName = map[Product]string{ + ProductWindows: "Windows", + ProductWindowsVPC: "Windows (Amazon VPC)", + ProductLinuxUnix: "Linux/UNIX", + ProductLinuxUnixVPC: "Linux/UNIX (Amazon VPC)", + ProductSUSELinux: "SUSE Linux", + ProductSUSELinuxVPC: "SUSE Linux (Amazon VPC)", +} + +var ProductValue = map[string]Product{ + "Windows": ProductWindows, + "Windows (Amazon VPC)": ProductWindowsVPC, + "Linux/UNIX": ProductLinuxUnix, + "Linux/UNIX (Amazon VPC)": ProductLinuxUnixVPC, + "SUSE Linux": ProductSUSELinux, + "SUSE Linux (Amazon VPC)": ProductSUSELinuxVPC, +} + +func (p Product) String() string { + return ProductName[p] +} + +type Group struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + Region *string `json:"region,omitempty"` + Capacity *Capacity `json:"capacity,omitempty"` + Compute *Compute `json:"compute,omitempty"` + Strategy *Strategy `json:"strategy,omitempty"` + Scaling *Scaling `json:"scaling,omitempty"` + Scheduling *Scheduling `json:"scheduling,omitempty"` + Integration *Integration `json:"thirdPartiesIntegration,omitempty"` + Logging *Logging `json:"logging,omitempty"` + + // Read-only fields. + CreatedAt *time.Time `json:"createdAt,omitempty"` + UpdatedAt *time.Time `json:"updatedAt,omitempty"` + + // forceSendFields is a list of field names (e.g. "Keys") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + forceSendFields []string + + // nullFields is a list of field names (e.g. "Keys") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + nullFields []string +} + +type Integration struct { + EC2ContainerService *EC2ContainerServiceIntegration `json:"ecs,omitempty"` + ElasticBeanstalk *ElasticBeanstalkIntegration `json:"elasticBeanstalk,omitempty"` + CodeDeploy *CodeDeployIntegration `json:"codeDeploy,omitempty"` + OpsWorks *OpsWorksIntegration `json:"opsWorks,omitempty"` + Rancher *RancherIntegration `json:"rancher,omitempty"` + Kubernetes *KubernetesIntegration `json:"kubernetes,omitempty"` + Mesosphere *MesosphereIntegration `json:"mesosphere,omitempty"` + Nomad *NomadIntegration `json:"nomad,omitempty"` + Chef *ChefIntegration `json:"chef,omitempty"` + Gitlab *GitlabIntegration `json:"gitlab,omitempty"` + Route53 *Route53Integration `json:"route53,omitempty"` + DockerSwarm *DockerSwarmIntegration `json:"dockerSwarm,omitempty"` + + forceSendFields []string + nullFields []string +} + +type InstanceHealth struct { + InstanceID *string `json:"instanceId,omitempty"` + SpotRequestID *string `json:"spotRequestId,omitempty"` + GroupID *string `json:"groupId,omitempty"` + AvailabilityZone *string `json:"availabilityZone,omitempty"` + LifeCycle *string `json:"lifeCycle,omitempty"` + HealthStatus *string `json:"healthStatus,omitempty"` +} + +type AutoScale struct { + IsEnabled *bool `json:"isEnabled,omitempty"` + IsAutoConfig *bool `json:"isAutoConfig,omitempty"` + Cooldown *int `json:"cooldown,omitempty"` + Headroom *AutoScaleHeadroom `json:"headroom,omitempty"` + Down *AutoScaleDown `json:"down,omitempty"` + + forceSendFields []string + nullFields []string +} + +type AutoScaleECS struct { + AutoScale + Attributes []*AutoScaleAttributes `json:"attributes,omitempty"` + ShouldScaleDownNonServiceTasks *bool `json:"shouldScaleDownNonServiceTasks,omitempty"` + + forceSendFields []string + nullFields []string +} + +type AutoScaleKubernetes struct { + AutoScale + Labels []*AutoScaleLabel `json:"labels,omitempty"` + + forceSendFields []string + nullFields []string +} + +type AutoScaleNomad struct { + AutoScale + Constraints []*AutoScaleConstraint `json:"constraints,omitempty"` + + forceSendFields []string + nullFields []string +} + +type AutoScaleDockerSwarm struct { + AutoScale + + forceSendFields []string + nullFields []string +} + +type AutoScaleHeadroom struct { + CPUPerUnit *int `json:"cpuPerUnit,omitempty"` + GPUPerUnit *int `json:"gpuPerUnit,omitempty"` + MemoryPerUnit *int `json:"memoryPerUnit,omitempty"` + NumOfUnits *int `json:"numOfUnits,omitempty"` + + forceSendFields []string + nullFields []string +} + +type AutoScaleDown struct { + EvaluationPeriods *int `json:"evaluationPeriods,omitempty"` + MaxScaleDownPercentage *float64 `json:"maxScaleDownPercentage,omitempty"` + + forceSendFields []string + nullFields []string +} + +type AutoScaleConstraint struct { + Key *string `json:"key,omitempty"` + Value *string `json:"value,omitempty"` + + forceSendFields []string + nullFields []string +} + +type AutoScaleLabel struct { + Key *string `json:"key,omitempty"` + Value *string `json:"value,omitempty"` + + forceSendFields []string + nullFields []string +} + +type AutoScaleAttributes struct { + Key *string `json:"key,omitempty"` + Value *string `json:"value,omitempty"` + + forceSendFields []string + nullFields []string +} + +type ElasticBeanstalkIntegration struct { + EnvironmentID *string `json:"environmentId,omitempty"` + ManagedActions *BeanstalkManagedActions `json:"managedActions,omitempty"` + DeploymentPreferences *BeanstalkDeploymentPreferences `json:"deploymentPreferences,omitempty"` + + forceSendFields []string + nullFields []string +} + +type BeanstalkManagedActions struct { + PlatformUpdate *BeanstalkPlatformUpdate `json:"platformUpdate,omitempty"` + + forceSendFields []string + nullFields []string +} + +type BeanstalkPlatformUpdate struct { + PerformAt *string `json:"performAt,omitempty"` + TimeWindow *string `json:"timeWindow,omitempty"` + UpdateLevel *string `json:"updateLevel,omitempty"` + + forceSendFields []string + nullFields []string +} + +type BeanstalkDeploymentPreferences struct { + AutomaticRoll *bool `json:"automaticRoll,omitempty"` + BatchSizePercentage *int `json:"batchSizePercentage,omitempty"` + GracePeriod *int `json:"gracePeriod,omitempty"` + Strategy *BeanstalkDeploymentStrategy `json:"strategy,omitempty"` + + forceSendFields []string + nullFields []string +} + +type BeanstalkDeploymentStrategy struct { + Action *string `json:"action,omitempty"` + ShouldDrainInstances *bool `json:"shouldDrainInstances,omitempty"` + + forceSendFields []string + nullFields []string +} + +type CodeDeployIntegration struct { + DeploymentGroups []*DeploymentGroup `json:"deploymentGroups,omitempty"` + CleanUpOnFailure *bool `json:"cleanUpOnFailure,omitempty"` + TerminateInstanceOnFailure *bool `json:"terminateInstanceOnFailure,omitempty"` + + forceSendFields []string + nullFields []string +} + +type DeploymentGroup struct { + ApplicationName *string `json:"applicationName,omitempty"` + DeploymentGroupName *string `json:"deploymentGroupName,omitempty"` + + forceSendFields []string + nullFields []string +} + +type OpsWorksIntegration struct { + LayerID *string `json:"layerId,omitempty"` + StackType *string `json:"stackType,omitempty"` + + forceSendFields []string + nullFields []string +} + +type RancherIntegration struct { + MasterHost *string `json:"masterHost,omitempty"` + AccessKey *string `json:"accessKey,omitempty"` + SecretKey *string `json:"secretKey,omitempty"` + Version *string `json:"version,omitempty"` + + forceSendFields []string + nullFields []string +} + +type EC2ContainerServiceIntegration struct { + ClusterName *string `json:"clusterName,omitempty"` + AutoScale *AutoScaleECS `json:"autoScale,omitempty"` + Batch *Batch `json:"batch,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Batch struct { + JobQueueNames []string `json:"jobQueueNames,omitempty"` + + forceSendFields []string + nullFields []string +} + +type KubernetesIntegration struct { + IntegrationMode *string `json:"integrationMode,omitempty"` + ClusterIdentifier *string `json:"clusterIdentifier,omitempty"` + Server *string `json:"apiServer,omitempty"` + Token *string `json:"token,omitempty"` + AutoScale *AutoScaleKubernetes `json:"autoScale,omitempty"` + + forceSendFields []string + nullFields []string +} + +type MesosphereIntegration struct { + Server *string `json:"apiServer,omitempty"` + + forceSendFields []string + nullFields []string +} + +type NomadIntegration struct { + MasterHost *string `json:"masterHost,omitempty"` + MasterPort *int `json:"masterPort,omitempty"` + ACLToken *string `json:"aclToken,omitempty"` + AutoScale *AutoScaleNomad `json:"autoScale,omitempty"` + + forceSendFields []string + nullFields []string +} + +type ChefIntegration struct { + Server *string `json:"chefServer,omitempty"` + Organization *string `json:"organization,omitempty"` + User *string `json:"user,omitempty"` + PEMKey *string `json:"pemKey,omitempty"` + Version *string `json:"chefVersion,omitempty"` + + forceSendFields []string + nullFields []string +} + +type DockerSwarmIntegration struct { + MasterHost *string `json:"masterHost,omitempty"` + MasterPort *int `json:"masterPort,omitempty"` + AutoScale *AutoScaleDockerSwarm `json:"autoScale,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Route53Integration struct { + Domains []*Domain `json:"domains,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Domain struct { + HostedZoneID *string `json:"hostedZoneId,omitempty"` + SpotinstAccountID *string `json:"spotinstAccountId,omitempty"` + RecordSetType *string `json:"recordSetType,omitempty"` + RecordSets []*RecordSet `json:"recordSets,omitempty"` + + forceSendFields []string + nullFields []string +} + +type RecordSet struct { + Name *string `json:"name,omitempty"` + UsePublicIP *bool `json:"usePublicIp,omitempty"` + UsePublicDNS *bool `json:"usePublicDns,omitempty"` + + forceSendFields []string + nullFields []string +} + +type GitlabIntegration struct { + Runner *GitlabRunner `json:"runner,omitempty"` + + forceSendFields []string + nullFields []string +} + +type GitlabRunner struct { + IsEnabled *bool `json:"isEnabled,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Scheduling struct { + Tasks []*Task `json:"tasks,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Task struct { + IsEnabled *bool `json:"isEnabled,omitempty"` + Type *string `json:"taskType,omitempty"` + Frequency *string `json:"frequency,omitempty"` + CronExpression *string `json:"cronExpression,omitempty"` + StartTime *string `json:"startTime,omitempty"` + ScaleTargetCapacity *int `json:"scaleTargetCapacity,omitempty"` + ScaleMinCapacity *int `json:"scaleMinCapacity,omitempty"` + ScaleMaxCapacity *int `json:"scaleMaxCapacity,omitempty"` + BatchSizePercentage *int `json:"batchSizePercentage,omitempty"` + GracePeriod *int `json:"gracePeriod,omitempty"` + TargetCapacity *int `json:"targetCapacity,omitempty"` + MinCapacity *int `json:"minCapacity,omitempty"` + MaxCapacity *int `json:"maxCapacity,omitempty"` + Adjustment *int `json:"adjustment,omitempty"` + AdjustmentPercentage *int `json:"adjustmentPercentage,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Scaling struct { + Up []*ScalingPolicy `json:"up,omitempty"` + Down []*ScalingPolicy `json:"down,omitempty"` + Target []*ScalingPolicy `json:"target,omitempty"` + MultipleMetrics *MultipleMetrics `json:"multipleMetrics,omitempty"` + + forceSendFields []string + nullFields []string +} + +type MultipleMetrics struct { + Metrics []*Metrics `json:"metrics,omitempty"` + Expressions []*Expressions `json:"expressions,omitempty"` + + forceSendFields []string + nullFields []string +} + +type ScalingPolicy struct { + PolicyName *string `json:"policyName,omitempty"` + MetricName *string `json:"metricName,omitempty"` + Namespace *string `json:"namespace,omitempty"` + Source *string `json:"source,omitempty"` + Statistic *string `json:"statistic,omitempty"` + Unit *string `json:"unit,omitempty"` + Threshold *float64 `json:"threshold,omitempty"` + Adjustment *int `json:"adjustment,omitempty"` + MinTargetCapacity *int `json:"minTargetCapacity,omitempty"` + MaxTargetCapacity *int `json:"maxTargetCapacity,omitempty"` + EvaluationPeriods *int `json:"evaluationPeriods,omitempty"` + Period *int `json:"period,omitempty"` + Cooldown *int `json:"cooldown,omitempty"` + Operator *string `json:"operator,omitempty"` + Dimensions []*Dimension `json:"dimensions,omitempty"` + Action *Action `json:"action,omitempty"` + Target *float64 `json:"target,omitempty"` + IsEnabled *bool `json:"isEnabled,omitempty"` + MaxCapacityPerScale *string `json:"maxCapacityPerScale,omitempty"` + Predictive *Predictive `json:"predictive,omitempty"` + StepAdjustments []*StepAdjustment `json:"stepAdjustments,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Metrics struct { + Name *string `json:"name,omitempty"` + MetricName *string `json:"metricName,omitempty"` + Namespace *string `json:"namespace,omitempty"` + Dimensions []*Dimension `json:"dimensions,omitempty"` + ExtendedStatistic *string `json:"extendedStatistic,omitempty"` + Statistic *string `json:"statistic,omitempty"` + Unit *string `json:"unit,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Expressions struct { + Expression *string `json:"expression,omitempty"` + Name *string `json:"name,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Action struct { + Type *string `json:"type,omitempty"` + Adjustment *string `json:"adjustment,omitempty"` + MinTargetCapacity *string `json:"minTargetCapacity,omitempty"` + MaxTargetCapacity *string `json:"maxTargetCapacity,omitempty"` + Maximum *string `json:"maximum,omitempty"` + Minimum *string `json:"minimum,omitempty"` + Target *string `json:"target,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Dimension struct { + Name *string `json:"name,omitempty"` + Value *string `json:"value,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Predictive struct { + Mode *string `json:"mode,omitempty"` + + forceSendFields []string + nullFields []string +} + +type StepAdjustment struct { + Action *Action `json:"action,omitempty"` + Threshold *int `json:"threshold,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Strategy struct { + Risk *float64 `json:"risk,omitempty"` + OnDemandCount *int `json:"onDemandCount,omitempty"` + DrainingTimeout *int `json:"drainingTimeout,omitempty"` + AvailabilityVsCost *string `json:"availabilityVsCost,omitempty"` + LifetimePeriod *string `json:"lifetimePeriod,omitempty"` + UtilizeReservedInstances *bool `json:"utilizeReservedInstances,omitempty"` + FallbackToOnDemand *bool `json:"fallbackToOd,omitempty"` + SpinUpTime *int `json:"spinUpTime,omitempty"` + Signals []*Signal `json:"signals,omitempty"` + Persistence *Persistence `json:"persistence,omitempty"` + RevertToSpot *RevertToSpot `json:"revertToSpot,omitempty"` + ScalingStrategy *ScalingStrategy `json:"scalingStrategy,omitempty"` + UtilizeCommitments *bool `json:"utilizeCommitments,omitempty"` + MinimumInstanceLifetime *int `json:"minimumInstanceLifetime,omitempty"` + ConsiderODPricing *bool `json:"considerODPricing,omitempty"` + ImmediateODRecoverThreshold *int `json:"immediateODRecoverThreshold,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Persistence struct { + ShouldPersistPrivateIP *bool `json:"shouldPersistPrivateIp,omitempty"` + ShouldPersistBlockDevices *bool `json:"shouldPersistBlockDevices,omitempty"` + ShouldPersistRootDevice *bool `json:"shouldPersistRootDevice,omitempty"` + BlockDevicesMode *string `json:"blockDevicesMode,omitempty"` + + forceSendFields []string + nullFields []string +} + +type RevertToSpot struct { + PerformAt *string `json:"performAt,omitempty"` + TimeWindows []string `json:"timeWindows,omitempty"` + + forceSendFields []string + nullFields []string +} + +type ScalingStrategy struct { + TerminateAtEndOfBillingHour *bool `json:"terminateAtEndOfBillingHour,omitempty"` + TerminationPolicy *string `json:"terminationPolicy,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Signal struct { + Name *string `json:"name,omitempty"` + Timeout *int `json:"timeout,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Capacity struct { + Minimum *int `json:"minimum,omitempty"` + Maximum *int `json:"maximum,omitempty"` + Target *int `json:"target,omitempty"` + Unit *string `json:"unit,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Compute struct { + Product *string `json:"product,omitempty"` + InstanceTypes *InstanceTypes `json:"instanceTypes,omitempty"` + LaunchSpecification *LaunchSpecification `json:"launchSpecification,omitempty"` + AvailabilityZones []*AvailabilityZone `json:"availabilityZones,omitempty"` + PreferredAvailabilityZones []string `json:"preferredAvailabilityZones,omitempty"` + ElasticIPs []string `json:"elasticIps,omitempty"` + EBSVolumePool []*EBSVolume `json:"ebsVolumePool,omitempty"` + PrivateIPs []string `json:"privateIps,omitempty"` + SubnetIDs []string `json:"subnetIds,omitempty"` + + forceSendFields []string + nullFields []string +} + +type EBSVolume struct { + DeviceName *string `json:"deviceName,omitempty"` + VolumeIDs []string `json:"volumeIds,omitempty"` + + forceSendFields []string + nullFields []string +} + +type InstanceTypes struct { + OnDemand *string `json:"ondemand,omitempty"` + Spot []string `json:"spot,omitempty"` + PreferredSpot []string `json:"preferredSpot,omitempty"` + Weights []*InstanceTypeWeight `json:"weights,omitempty"` + OnDemandTypes []string `json:"onDemandTypes,omitempty"` + ResourceRequirements *ResourceRequirements `json:"resourceRequirements,omitempty"` + + forceSendFields []string + nullFields []string +} + +type InstanceTypeWeight struct { + InstanceType *string `json:"instanceType,omitempty"` + Weight *int `json:"weightedCapacity,omitempty"` + + forceSendFields []string + nullFields []string +} + +type ResourceRequirements struct { + ExcludedInstanceFamilies []string `json:"excludedInstanceFamilies,omitempty"` + ExcludedInstanceGenerations []string `json:"excludedInstanceGenerations,omitempty"` + ExcludedInstanceTypes []string `json:"excludedInstanceTypes,omitempty"` + RequiredGpu *RequiredGpu `json:"requiredGpu,omitempty"` + RequiredMemory *RequiredMemory `json:"requiredMemory,omitempty"` + RequiredVCpu *RequiredVCpu `json:"requiredVCpu,omitempty"` + + forceSendFields []string + nullFields []string +} + +type RequiredGpu struct { + Maximum *int `json:"maximum,omitempty"` + Minimum *int `json:"minimum,omitempty"` + + forceSendFields []string + nullFields []string +} + +type RequiredMemory struct { + Maximum *int `json:"maximum,omitempty"` + Minimum *int `json:"minimum,omitempty"` + + forceSendFields []string + nullFields []string +} + +type RequiredVCpu struct { + Maximum *int `json:"maximum,omitempty"` + Minimum *int `json:"minimum,omitempty"` + + forceSendFields []string + nullFields []string +} + +type AvailabilityZone struct { + Name *string `json:"name,omitempty"` + SubnetID *string `json:"subnetId,omitempty"` + PlacementGroupName *string `json:"placementGroupName,omitempty"` + + forceSendFields []string + nullFields []string +} + +type LaunchSpecification struct { + LoadBalancerNames []string `json:"loadBalancerNames,omitempty"` + LoadBalancersConfig *LoadBalancersConfig `json:"loadBalancersConfig,omitempty"` + SecurityGroupIDs []string `json:"securityGroupIds,omitempty"` + HealthCheckType *string `json:"healthCheckType,omitempty"` + HealthCheckGracePeriod *int `json:"healthCheckGracePeriod,omitempty"` + HealthCheckUnhealthyDurationBeforeReplacement *int `json:"healthCheckUnhealthyDurationBeforeReplacement,omitempty"` + Images []*Image `json:"images,omitempty"` + ImageID *string `json:"imageId,omitempty"` + KeyPair *string `json:"keyPair,omitempty"` + UserData *string `json:"userData,omitempty"` + ShutdownScript *string `json:"shutdownScript,omitempty"` + Tenancy *string `json:"tenancy,omitempty"` + Monitoring *bool `json:"monitoring,omitempty"` + EBSOptimized *bool `json:"ebsOptimized,omitempty"` + IAMInstanceProfile *IAMInstanceProfile `json:"iamRole,omitempty"` + CreditSpecification *CreditSpecification `json:"creditSpecification,omitempty"` + BlockDeviceMappings []*BlockDeviceMapping `json:"blockDeviceMappings,omitempty"` + NetworkInterfaces []*NetworkInterface `json:"networkInterfaces,omitempty"` + Tags []*Tag `json:"tags,omitempty"` + MetadataOptions *MetadataOptions `json:"metadataOptions,omitempty"` + CPUOptions *CPUOptions `json:"cpuOptions,omitempty"` + ResourceTagSpecification *ResourceTagSpecification `json:"resourceTagSpecification,omitempty"` + ITF *ITF `json:"itf,omitempty"` + + forceSendFields []string + nullFields []string +} + +type ITF struct { + LoadBalancers []*ITFLoadBalancer `json:"loadBalancers,omitempty"` + MigrationHealthinessThreshold *int `json:"migrationHealthinessThreshold,omitempty"` + FixedTargetGroups *bool `json:"fixedTargetGroups,omitempty"` + WeightStrategy *string `json:"weightStrategy,omitempty"` + TargetGroupConfig *TargetGroupConfig `json:"targetGroupConfig,omitempty"` + DefaultStaticTargetGroups []*StaticTargetGroup `json:"defaultStaticTargetGroups,omitempty"` + + forceSendFields []string + nullFields []string +} + +type ITFLoadBalancer struct { + ListenerRules []*ListenerRule `json:"listenerRules,omitempty"` + LoadBalancerARN *string `json:"loadBalancerArn,omitempty"` + + forceSendFields []string + nullFields []string +} + +type ListenerRule struct { + RuleARN *string `json:"ruleArn,omitempty"` + StaticTargetGroups []*StaticTargetGroup `json:"staticTargetGroups,omitempty"` + + forceSendFields []string + nullFields []string +} + +type StaticTargetGroup struct { + StaticTargetGroupARN *string `json:"arn,omitempty"` + Percentage *float64 `json:"percentage,omitempty"` + + forceSendFields []string + nullFields []string +} + +type TargetGroupConfig struct { + VPCID *string `json:"vpcId,omitempty"` + HealthCheckIntervalSeconds *int `json:"healthCheckIntervalSeconds,omitempty"` + HealthCheckPath *string `json:"healthCheckPath,omitempty"` + HealthCheckPort *string `json:"healthCheckPort,omitempty"` + HealthCheckProtocol *string `json:"healthCheckProtocol,omitempty"` + HealthCheckTimeoutSeconds *int `json:"healthCheckTimeoutSeconds,omitempty"` + HealthyThresholdCount *int `json:"healthyThresholdCount,omitempty"` + UnhealthyThresholdCount *int `json:"unhealthyThresholdCount,omitempty"` + Port *int `json:"port,omitempty"` + Protocol *string `json:"protocol,omitempty"` + ProtocolVersion *string `json:"protocolVersion,omitempty"` + Matcher *Matcher `json:"matcher,omitempty"` + Tags []*Tag `json:"tags,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Matcher struct { + HTTPCode *string `json:"httpCode,omitempty"` + GRPCCode *string `json:"grpcCode,omitempty"` + + forceSendFields []string + nullFields []string +} + +type MetadataOptions struct { + HTTPTokens *string `json:"httpTokens,omitempty"` + HTTPPutResponseHopLimit *int `json:"httpPutResponseHopLimit,omitempty"` + InstanceMetadataTags *string `json:"instanceMetadataTags,omitempty"` + + forceSendFields []string + nullFields []string +} + +type CPUOptions struct { + ThreadsPerCore *int `json:"threadsPerCore,omitempty"` + + forceSendFields []string + nullFields []string +} + +type ResourceTagSpecification struct { + Volumes *Volumes `json:"volumes,omitempty"` + Snapshots *Snapshots `json:"snapshots,omitempty"` + ENIs *ENIs `json:"enis,omitempty"` + AMIs *AMIs `json:"amis,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Volumes struct { + ShouldTag *bool `json:"shouldTag,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Snapshots struct { + ShouldTag *bool `json:"shouldTag,omitempty"` + + forceSendFields []string + nullFields []string +} + +type ENIs struct { + ShouldTag *bool `json:"shouldTag,omitempty"` + + forceSendFields []string + nullFields []string +} + +type AMIs struct { + ShouldTag *bool `json:"shouldTag,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Image struct { + Id *string `json:"id,omitempty"` + + forceSendFields []string + nullFields []string +} + +type LoadBalancersConfig struct { + LoadBalancers []*LoadBalancer `json:"loadBalancers,omitempty"` + + forceSendFields []string + nullFields []string +} + +type LoadBalancer struct { + Name *string `json:"name,omitempty"` + Arn *string `json:"arn,omitempty"` + Type *string `json:"type,omitempty"` + + forceSendFields []string + nullFields []string +} + +type NetworkInterface struct { + ID *string `json:"networkInterfaceId,omitempty"` + Description *string `json:"description,omitempty"` + DeviceIndex *int `json:"deviceIndex,omitempty"` + SecondaryPrivateIPAddressCount *int `json:"secondaryPrivateIpAddressCount,omitempty"` + AssociatePublicIPAddress *bool `json:"associatePublicIpAddress,omitempty"` + AssociateIPV6Address *bool `json:"associateIpv6Address,omitempty"` + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty"` + SecurityGroupsIDs []string `json:"groups,omitempty"` + PrivateIPAddress *string `json:"privateIpAddress,omitempty"` + SubnetID *string `json:"subnetId,omitempty"` + + forceSendFields []string + nullFields []string +} + +type BlockDeviceMapping struct { + DeviceName *string `json:"deviceName,omitempty"` + VirtualName *string `json:"virtualName,omitempty"` + EBS *EBS `json:"ebs,omitempty"` + + forceSendFields []string + nullFields []string +} + +type EBS struct { + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty"` + Encrypted *bool `json:"encrypted,omitempty"` + KmsKeyId *string `json:"kmsKeyId,omitempty"` + SnapshotID *string `json:"snapshotId,omitempty"` + VolumeType *string `json:"volumeType,omitempty"` + VolumeSize *int `json:"volumeSize,omitempty"` + IOPS *int `json:"iops,omitempty"` + Throughput *int `json:"throughput,omitempty"` + + forceSendFields []string + nullFields []string +} + +type IAMInstanceProfile struct { + Name *string `json:"name,omitempty"` + Arn *string `json:"arn,omitempty"` + + forceSendFields []string + nullFields []string +} + +type CreditSpecification struct { + CPUCredits *string `json:"cpuCredits,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Instance struct { + ID *string `json:"instanceId,omitempty"` + SpotRequestID *string `json:"spotInstanceRequestId,omitempty"` + InstanceType *string `json:"instanceType,omitempty"` + Status *string `json:"status,omitempty"` + Product *string `json:"product,omitempty"` + AvailabilityZone *string `json:"availabilityZone,omitempty"` + PrivateIP *string `json:"privateIp,omitempty"` + PublicIP *string `json:"publicIp,omitempty"` + CreatedAt *time.Time `json:"createdAt,omitempty"` + IPv6Address *string `json:"ipv6Address,omitempty"` +} + +type RollStrategy struct { + Action *string `json:"action,omitempty"` + ShouldDrainInstances *bool `json:"shouldDrainInstances,omitempty"` + BatchMinHealthyPercentage *int `json:"batchMinHealthyPercentage,omitempty"` + OnFailure *OnFailure `json:"onFailure,omitempty"` + + forceSendFields []string + nullFields []string +} + +type OnFailure struct { + ActionType *string `json:"actionType,omitempty"` + ShouldHandleAllBatches *bool `json:"shouldHandleAllBatches,omitempty"` + BatchNum *int `json:"batchNum,omitempty"` + DrainingTimeout *int `json:"drainingTimeout,omitempty"` + ShouldDecrementTargetCapacity *bool `json:"shouldDecrementTargetCapacity,omitempty"` + + forceSendFields []string + nullFields []string +} + +type StatefulDeallocation struct { + ShouldDeleteImages *bool `json:"shouldDeleteImages,omitempty"` + ShouldDeleteNetworkInterfaces *bool `json:"shouldDeleteNetworkInterfaces,omitempty"` + ShouldDeleteVolumes *bool `json:"shouldDeleteVolumes,omitempty"` + ShouldDeleteSnapshots *bool `json:"shouldDeleteSnapshots,omitempty"` +} + +type GetInstanceHealthinessInput struct { + GroupID *string `json:"groupId,omitempty"` +} + +type GetInstanceHealthinessOutput struct { + Instances []*InstanceHealth `json:"instances,omitempty"` +} + +type GetGroupEventsInput struct { + GroupID *string `json:"groupId,omitempty"` + FromDate *string `json:"fromDate,omitempty"` +} + +type GetGroupEventsOutput struct { + GroupEvents []*GroupEvent `json:"groupEvents,omitempty"` +} + +type GroupEvent struct { + GroupID *string `json:"groupId,omitempty"` + EventType *string `json:"eventType,omitempty"` + CreatedAt *string `json:"createdAt,omitempty"` + SubEvents []*SubEvent `json:"subEvents,omitempty"` +} + +type SubEvent struct { + // common fields + Type *string `json:"type,omitempty"` + + // type scaleUp + NewSpots []*Spot `json:"newSpots,omitempty"` + NewInstances []*NewInstance `json:"newInstances,omitempty"` + + // type scaleDown + TerminatedSpots []*Spot `json:"terminatedSpots,omitempty"` + TerminatedInstances []*TerminatedInstance `json:"terminatedInstances,omitempty"` + + // type scaleReason + ScalingPolicyName *string `json:"scalingPolicyName,omitempty"` + Value *int `json:"value,omitempty"` + Unit *string `json:"unit,omitempty"` + Threshold *int `json:"threshold,omitempty"` + + // type detachedInstance + InstanceID *string `json:"instanceId,omitempty"` + + // type unhealthyInstances + InstanceIDs []*string `json:"instanceIds,omitempty"` + + // type rollInfo + ID *string `json:"id,omitempty"` + GroupID *string `json:"groupId,omitempty"` + CurrentBatch *int `json:"currentBatch,omitempty"` + Status *string `json:"status,omitempty"` + CreatedAt *string `json:"createdAt,omitempty"` + NumberOfBatches *int `json:"numOfBatches,omitempty"` + GracePeriod *int `json:"gracePeriod,omitempty"` + + // type recoverInstances + OldSpotRequestIDs []*string `json:"oldSpotRequestIDs,omitempty"` + NewSpotRequestIDs []*string `json:"newSpotRequestIDs,omitempty"` + OldInstanceIDs []*string `json:"oldInstanceIDs,omitempty"` + NewInstanceIDs []*string `json:"newInstanceIDs,omitempty"` +} + +type Spot struct { + SpotInstanceRequestID *string `json:"spotInstanceRequestId,omitempty"` +} + +type NewInstance struct { +} + +type TerminatedInstance struct { +} + +type StatefulInstance struct { + StatefulInstanceID *string `json:"id,omitempty"` + InstanceID *string `json:"instanceId,omitempty"` + State *string `json:"state,omitempty"` + PrivateIP *string `json:"privateIp,omitempty"` + ImageID *string `json:"imageId,omitempty"` + Devices []*Device `json:"devices,omitempty"` + CreatedAt *string `json:"createdAt,omitempty"` + LaunchedAt *string `json:"launchedAt,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Device struct { + DeviceName *string `json:"deviceName,omitempty"` + VolumeID *string `json:"volumeId,omitempty"` + SnapshotID *string `json:"snapshotId,omitempty"` + + forceSendFields []string + nullFields []string +} + +type ListGroupsInput struct{} + +type ListGroupsOutput struct { + Groups []*Group `json:"groups,omitempty"` +} + +type CreateGroupInput struct { + Group *Group `json:"group,omitempty"` +} + +type CreateGroupOutput struct { + Group *Group `json:"group,omitempty"` +} + +type ReadGroupInput struct { + GroupID *string `json:"groupId,omitempty"` +} + +type ReadGroupOutput struct { + Group *Group `json:"group,omitempty"` +} + +type UpdateGroupInput struct { + Group *Group `json:"group,omitempty"` + ShouldResumeStateful *bool `json:"-"` + AutoApplyTags *bool `json:"-"` +} + +type UpdateGroupOutput struct { + Group *Group `json:"group,omitempty"` +} + +type DeleteGroupInput struct { + GroupID *string `json:"groupId,omitempty"` + StatefulDeallocation *StatefulDeallocation `json:"statefulDeallocation,omitempty"` +} + +type DeleteGroupOutput struct{} + +type StatusGroupInput struct { + GroupID *string `json:"groupId,omitempty"` +} + +type StatusGroupOutput struct { + Instances []*Instance `json:"instances,omitempty"` +} + +type DetachGroupInput struct { + GroupID *string `json:"groupId,omitempty"` + InstanceIDs []string `json:"instancesToDetach,omitempty"` + ShouldDecrementTargetCapacity *bool `json:"shouldDecrementTargetCapacity,omitempty"` + ShouldTerminateInstances *bool `json:"shouldTerminateInstances,omitempty"` + DrainingTimeout *int `json:"drainingTimeout,omitempty"` +} + +type DetachGroupOutput struct{} + +type DeploymentStatusInput struct { + GroupID *string `json:"groupId,omitempty"` + RollID *string `json:"id,omitempty"` +} + +type RollStatusInput struct { + GroupID *string `json:"groupId,omitempty"` + RollID *string `json:"id,omitempty"` +} + +type BGInstance struct { + InstanceID *string `json:"instanceId,omitempty"` + Lifecycle *string `json:"lifeCycle,omitempty"` + BatchNum *int `json:"batchNum,omitempty"` + Status *string `json:"status,omitempty"` +} + +type BGInstances struct { + Blue []*BGInstance `json:"blue,omitempty"` + Green []*BGInstance `json:"green,omitempty"` +} + +type RollStatusOutput struct { + Progress *Progress `json:"progress,omitempty"` + NumberOfBatches *int `json:"numberOfBatches,omitempty"` + CurrentBatch *int `json:"currentBatch,omitempty"` + GracePeriod *int `json:"gracePeriod,omitempty"` + StrategyAction *string `json:"strategyAction,omitempty"` + HealthCheck *string `json:"healthCheck,omitempty"` + Instances []*BGInstances `json:"instances,omitempty"` +} + +type Roll struct { + Status *string `json:"status,omitempty"` +} + +type RollGroupInput struct { + GroupID *string `json:"groupId,omitempty"` + BatchSizePercentage *int `json:"batchSizePercentage,omitempty"` + GracePeriod *int `json:"gracePeriod,omitempty"` + HealthCheckType *string `json:"healthCheckType,omitempty"` + Strategy *RollStrategy `json:"strategy,omitempty"` +} + +type RollECSGroupInput struct { + GroupID *string `json:"groupId,omitempty"` + Roll *RollECSWrapper `json:"roll,omitempty"` +} + +type RollECSWrapper struct { + BatchSizePercentage *int `json:"batchSizePercentage,omitempty"` + Comment *string `json:"comment,omitempty"` +} + +type RollGroupOutput struct { + RollGroupStatus []*RollGroupStatus `json:"groupDeploymentStatus,omitempty"` +} + +type RollGroupStatus struct { + RollID *string `json:"id,omitempty"` + RollStatus *string `json:"status,omitempty"` + Progress *Progress `json:"progress,omitempty"` + CreatedAt *string `json:"createdAt,omitempty"` + UpdatedAt *string `json:"updatedAt,omitempty"` +} + +type Progress struct { + Unit *string `json:"unit,omitempty"` + Value *float64 `json:"value,omitempty"` +} + +type StopDeploymentInput struct { + GroupID *string `json:"groupId,omitempty"` + RollID *string `json:"id,omitempty"` + Roll *Roll `json:"roll,omitempty"` +} + +type StopDeploymentOutput struct{} + +type ListStatefulInstancesInput struct { + GroupID *string `json:"groupId,omitempty"` +} + +type ListStatefulInstancesOutput struct { + StatefulInstances []*StatefulInstance `json:"statefulInstances,omitempty"` +} + +type PauseStatefulInstanceInput struct { + GroupID *string `json:"groupId,omitempty"` + StatefulInstanceID *string `json:"statefulInstanceId,omitempty"` +} + +type PauseStatefulInstanceOutput struct{} + +type ResumeStatefulInstanceInput struct { + GroupID *string `json:"groupId,omitempty"` + StatefulInstanceID *string `json:"statefulInstanceId,omitempty"` +} + +type ResumeStatefulInstanceOutput struct{} + +type RecycleStatefulInstanceInput struct { + GroupID *string `json:"groupId,omitempty"` + StatefulInstanceID *string `json:"statefulInstanceId,omitempty"` +} + +type RecycleStatefulInstanceOutput struct{} + +type DeallocateStatefulInstanceInput struct { + GroupID *string `json:"groupId,omitempty"` + StatefulInstanceID *string `json:"statefulInstanceId,omitempty"` +} + +type DeallocateStatefulInstanceOutput struct{} + +type Logging struct { + Export *Export `json:"export,omitempty"` + + forceSendFields []string + nullFields []string +} +type Export struct { + S3 *S3 `json:"s3,omitempty"` + + forceSendFields []string + nullFields []string +} +type S3 struct { + Id *string `json:"id,omitempty"` + + forceSendFields []string + nullFields []string +} + +func deploymentStatusFromJSON(in []byte) (*RollGroupStatus, error) { + b := new(RollGroupStatus) + if err := json.Unmarshal(in, b); err != nil { + return nil, err + } + return b, nil +} + +func deploymentStatusesFromJSON(in []byte) ([]*RollGroupStatus, error) { + var rw client.Response + if err := json.Unmarshal(in, &rw); err != nil { + return nil, err + } + out := make([]*RollGroupStatus, len(rw.Response.Items)) + if len(out) == 0 { + return out, nil + } + for i, rb := range rw.Response.Items { + b, err := deploymentStatusFromJSON(rb) + if err != nil { + return nil, err + } + out[i] = b + } + return out, nil +} +func deploymentStatusFromHttpResponse(resp *http.Response) ([]*RollGroupStatus, error) { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + return deploymentStatusesFromJSON(body) +} + +func rollStatusOutputFromJSON(in []byte) (*RollStatusOutput, error) { + b := new(RollStatusOutput) + if err := json.Unmarshal(in, b); err != nil { + return nil, err + } + return b, nil +} + +func rollStatusFromJSON(in []byte) (*RollStatusOutput, error) { + var rw client.Response + if err := json.Unmarshal(in, &rw); err != nil { + return nil, err + } + if len(rw.Response.Items) == 0 { + return nil, nil + } + // Only 1 roll allowed at a time + rollStatusOutput, err := rollStatusOutputFromJSON(rw.Response.Items[0]) + if err != nil { + return nil, err + } + return rollStatusOutput, nil +} + +func rollStatusFromHttpResponse(resp *http.Response) (*RollStatusOutput, error) { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + return rollStatusFromJSON(body) +} + +func groupFromJSON(in []byte) (*Group, error) { + b := new(Group) + if err := json.Unmarshal(in, b); err != nil { + return nil, err + } + return b, nil +} + +func groupsFromJSON(in []byte) ([]*Group, error) { + var rw client.Response + if err := json.Unmarshal(in, &rw); err != nil { + return nil, err + } + out := make([]*Group, len(rw.Response.Items)) + if len(out) == 0 { + return out, nil + } + for i, rb := range rw.Response.Items { + b, err := groupFromJSON(rb) + if err != nil { + return nil, err + } + out[i] = b + } + return out, nil +} + +func groupsFromHttpResponse(resp *http.Response) ([]*Group, error) { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + return groupsFromJSON(body) +} + +func instanceFromJSON(in []byte) (*Instance, error) { + b := new(Instance) + if err := json.Unmarshal(in, b); err != nil { + return nil, err + } + return b, nil +} + +func instancesFromJSON(in []byte) ([]*Instance, error) { + var rw client.Response + if err := json.Unmarshal(in, &rw); err != nil { + return nil, err + } + out := make([]*Instance, len(rw.Response.Items)) + if len(out) == 0 { + return out, nil + } + for i, rb := range rw.Response.Items { + b, err := instanceFromJSON(rb) + if err != nil { + return nil, err + } + out[i] = b + } + return out, nil +} + +func instancesFromHttpResponse(resp *http.Response) ([]*Instance, error) { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + return instancesFromJSON(body) +} + +func instanceHealthFromJSON(in []byte) (*InstanceHealth, error) { + b := new(InstanceHealth) + if err := json.Unmarshal(in, b); err != nil { + return nil, err + } + return b, nil +} + +func listOfInstanceHealthFromJSON(in []byte) ([]*InstanceHealth, error) { + var rw client.Response + if err := json.Unmarshal(in, &rw); err != nil { + return nil, err + } + out := make([]*InstanceHealth, len(rw.Response.Items)) + if len(out) == 0 { + return out, nil + } + for i, rb := range rw.Response.Items { + b, err := instanceHealthFromJSON(rb) + if err != nil { + return nil, err + } + out[i] = b + } + return out, nil +} + +func listOfInstanceHealthFromHttp(resp *http.Response) ([]*InstanceHealth, error) { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + return listOfInstanceHealthFromJSON(body) +} + +func groupEventFromJSON(in []byte) (*GroupEvent, error) { + b := new(GroupEvent) + if err := json.Unmarshal(in, b); err != nil { + return nil, err + } + return b, nil +} + +func groupEventsFromJSON(in []byte) ([]*GroupEvent, error) { + var rw client.Response + if err := json.Unmarshal(in, &rw); err != nil { + return nil, err + } + out := make([]*GroupEvent, len(rw.Response.Items)) + if len(out) == 0 { + return out, nil + } + for i, rb := range rw.Response.Items { + b, err := groupEventFromJSON(rb) + if err != nil { + return nil, err + } + out[i] = b + } + return out, nil +} + +func groupEventsFromHttpResponse(resp *http.Response) ([]*GroupEvent, error) { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + return groupEventsFromJSON(body) +} + +func StatefulInstanceFromJSON(in []byte) (*StatefulInstance, error) { + b := new(StatefulInstance) + if err := json.Unmarshal(in, b); err != nil { + return nil, err + } + return b, nil +} + +func statefulInstancesFromJSON(in []byte) ([]*StatefulInstance, error) { + var rw client.Response + if err := json.Unmarshal(in, &rw); err != nil { + return nil, err + } + out := make([]*StatefulInstance, len(rw.Response.Items)) + if len(out) == 0 { + return out, nil + } + for i, rb := range rw.Response.Items { + b, err := StatefulInstanceFromJSON(rb) + if err != nil { + return nil, err + } + out[i] = b + } + return out, nil +} + +func statefulInstancesFromHttpResponse(resp *http.Response) ([]*StatefulInstance, error) { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + return statefulInstancesFromJSON(body) +} + +func (s *ServiceOp) List(ctx context.Context, input *ListGroupsInput) (*ListGroupsOutput, error) { + r := client.NewRequest(http.MethodGet, "/aws/ec2/group") + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + gs, err := groupsFromHttpResponse(resp) + if err != nil { + return nil, err + } + + return &ListGroupsOutput{Groups: gs}, nil +} + +func (s *ServiceOp) Create(ctx context.Context, input *CreateGroupInput) (*CreateGroupOutput, error) { + r := client.NewRequest(http.MethodPost, "/aws/ec2/group") + r.Obj = input + + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + gs, err := groupsFromHttpResponse(resp) + if err != nil { + return nil, err + } + + output := new(CreateGroupOutput) + if len(gs) > 0 { + output.Group = gs[0] + } + + return output, nil +} + +func (s *ServiceOp) Read(ctx context.Context, input *ReadGroupInput) (*ReadGroupOutput, error) { + path, err := uritemplates.Expand("/aws/ec2/group/{groupId}", uritemplates.Values{ + "groupId": spotinst.StringValue(input.GroupID), + }) + if err != nil { + return nil, err + } + + r := client.NewRequest(http.MethodGet, path) + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + gs, err := groupsFromHttpResponse(resp) + if err != nil { + return nil, err + } + + output := new(ReadGroupOutput) + if len(gs) > 0 { + output.Group = gs[0] + } + + return output, nil +} + +func (s *ServiceOp) Update(ctx context.Context, input *UpdateGroupInput) (*UpdateGroupOutput, error) { + path, err := uritemplates.Expand("/aws/ec2/group/{groupId}", uritemplates.Values{ + "groupId": spotinst.StringValue(input.Group.ID), + }) + if err != nil { + return nil, err + } + + // We do NOT need the ID anymore, so let's drop it. + input.Group.ID = nil + + r := client.NewRequest(http.MethodPut, path) + r.Obj = input + + if input.ShouldResumeStateful != nil { + r.Params.Set("shouldResumeStateful", + strconv.FormatBool(spotinst.BoolValue(input.ShouldResumeStateful))) + } + + if input.AutoApplyTags != nil { + r.Params.Set("autoApplyTags", + strconv.FormatBool(spotinst.BoolValue(input.AutoApplyTags))) + } + + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + gs, err := groupsFromHttpResponse(resp) + if err != nil { + return nil, err + } + + output := new(UpdateGroupOutput) + if len(gs) > 0 { + output.Group = gs[0] + } + + return output, nil +} + +func (s *ServiceOp) Delete(ctx context.Context, input *DeleteGroupInput) (*DeleteGroupOutput, error) { + path, err := uritemplates.Expand("/aws/ec2/group/{groupId}", uritemplates.Values{ + "groupId": spotinst.StringValue(input.GroupID), + }) + if err != nil { + return nil, err + } + + r := client.NewRequest(http.MethodDelete, path) + + if input.StatefulDeallocation != nil { + r.Obj = &DeleteGroupInput{ + StatefulDeallocation: input.StatefulDeallocation, + } + } + + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return &DeleteGroupOutput{}, nil +} + +func (s *ServiceOp) Status(ctx context.Context, input *StatusGroupInput) (*StatusGroupOutput, error) { + path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/status", uritemplates.Values{ + "groupId": spotinst.StringValue(input.GroupID), + }) + if err != nil { + return nil, err + } + + r := client.NewRequest(http.MethodGet, path) + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + is, err := instancesFromHttpResponse(resp) + if err != nil { + return nil, err + } + + return &StatusGroupOutput{Instances: is}, nil +} + +func (s *ServiceOp) DeploymentStatus(ctx context.Context, input *DeploymentStatusInput) (*RollGroupOutput, error) { + path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/roll/{rollId}", uritemplates.Values{ + "groupId": spotinst.StringValue(input.GroupID), + "rollId": spotinst.StringValue(input.RollID), + }) + if err != nil { + return nil, err + } + + r := client.NewRequest(http.MethodGet, path) + + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + deployments, err := deploymentStatusFromHttpResponse(resp) + if err != nil { + return nil, err + } + + return &RollGroupOutput{deployments}, nil +} + +func (s *ServiceOp) DeploymentStatusECS(ctx context.Context, input *DeploymentStatusInput) (*RollGroupOutput, error) { + path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/clusterRoll/{rollId}", uritemplates.Values{ + "groupId": spotinst.StringValue(input.GroupID), + "rollId": spotinst.StringValue(input.RollID), + }) + if err != nil { + return nil, err + } + + r := client.NewRequest(http.MethodGet, path) + + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + deployments, err := deploymentStatusFromHttpResponse(resp) + if err != nil { + return nil, err + } + + return &RollGroupOutput{deployments}, nil +} + +func (s *ServiceOp) StopDeployment(ctx context.Context, input *StopDeploymentInput) (*StopDeploymentOutput, error) { + path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/roll/{rollId}", uritemplates.Values{ + "groupId": spotinst.StringValue(input.GroupID), + "rollId": spotinst.StringValue(input.RollID), + }) + + if err != nil { + return nil, err + } + + input.GroupID = nil + input.RollID = nil + + r := client.NewRequest(http.MethodPut, path) + input.Roll = &Roll{ + Status: spotinst.String("STOPPED"), + } + r.Obj = input + + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return &StopDeploymentOutput{}, nil +} + +func (s *ServiceOp) Detach(ctx context.Context, input *DetachGroupInput) (*DetachGroupOutput, error) { + path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/detachInstances", uritemplates.Values{ + "groupId": spotinst.StringValue(input.GroupID), + }) + if err != nil { + return nil, err + } + + // We do not need the ID anymore so let's drop it. + input.GroupID = nil + + r := client.NewRequest(http.MethodPut, path) + r.Obj = input + + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return &DetachGroupOutput{}, nil +} + +func (s *ServiceOp) Roll(ctx context.Context, input *RollGroupInput) (*RollGroupOutput, error) { + path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/roll", uritemplates.Values{ + "groupId": spotinst.StringValue(input.GroupID), + }) + if err != nil { + return nil, err + } + + // We do not need the ID anymore so let's drop it. + input.GroupID = nil + + r := client.NewRequest(http.MethodPut, path) + r.Obj = input + + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + deployments, err := deploymentStatusFromHttpResponse(resp) + if err != nil { + return nil, err + } + + return &RollGroupOutput{deployments}, nil +} + +func (s *ServiceOp) RollStatus(ctx context.Context, input *RollStatusInput) (*RollStatusOutput, error) { + path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/roll/{rollId}/status", uritemplates.Values{ + "groupId": spotinst.StringValue(input.GroupID), + "rollId": spotinst.StringValue(input.RollID), + }) + if err != nil { + return nil, err + } + + r := client.NewRequest(http.MethodGet, path) + + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + status, err := rollStatusFromHttpResponse(resp) + if err != nil { + return nil, err + } + + return status, nil +} + +func (s *ServiceOp) RollECS(ctx context.Context, input *RollECSGroupInput) (*RollGroupOutput, error) { + path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/clusterRoll", uritemplates.Values{ + "groupId": spotinst.StringValue(input.GroupID), + }) + if err != nil { + return nil, err + } + + // We do not need the ID anymore so let's drop it. + input.GroupID = nil + + r := client.NewRequest(http.MethodPost, path) + r.Obj = input + + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + deployments, err := deploymentStatusFromHttpResponse(resp) + if err != nil { + return nil, err + } + + return &RollGroupOutput{deployments}, nil +} + +func (s *ServiceOp) GetInstanceHealthiness(ctx context.Context, input *GetInstanceHealthinessInput) (*GetInstanceHealthinessOutput, error) { + path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/instanceHealthiness", uritemplates.Values{ + "groupId": spotinst.StringValue(input.GroupID), + }) + + if err != nil { + return nil, err + } + + r := client.NewRequest(http.MethodGet, path) + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + instances, err := listOfInstanceHealthFromHttp(resp) + if err != nil { + return nil, err + } + + return &GetInstanceHealthinessOutput{Instances: instances}, nil +} + +func (s *ServiceOp) GetGroupEvents(ctx context.Context, input *GetGroupEventsInput) (*GetGroupEventsOutput, error) { + path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/events", uritemplates.Values{ + "groupId": spotinst.StringValue(input.GroupID), + }) + if err != nil { + return nil, err + } + + r := client.NewRequest(http.MethodGet, path) + if input.FromDate != nil { + r.Params.Set("fromDate", *input.FromDate) + } + r.Obj = input + + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + events, err := groupEventsFromHttpResponse(resp) + if err != nil { + return nil, err + } + return &GetGroupEventsOutput{GroupEvents: events}, nil +} + +func (s *ServiceOp) ListStatefulInstances(ctx context.Context, input *ListStatefulInstancesInput) (*ListStatefulInstancesOutput, error) { + path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/statefulInstance", uritemplates.Values{ + "groupId": spotinst.StringValue(input.GroupID), + }) + if err != nil { + return nil, err + } + + // We do not need the group ID anymore so let's drop it. + input.GroupID = nil + + r := client.NewRequest(http.MethodGet, path) + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + statefulInstances, err := statefulInstancesFromHttpResponse(resp) + if err != nil { + return nil, err + } + + return &ListStatefulInstancesOutput{StatefulInstances: statefulInstances}, nil +} + +func (s *ServiceOp) PauseStatefulInstance(ctx context.Context, input *PauseStatefulInstanceInput) (*PauseStatefulInstanceOutput, error) { + path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/statefulInstance/{statefulInstanceId}/pause", uritemplates.Values{ + "groupId": spotinst.StringValue(input.GroupID), + "statefulInstanceId": spotinst.StringValue(input.StatefulInstanceID), + }) + if err != nil { + return nil, err + } + + r := client.NewRequest(http.MethodPut, path) + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return &PauseStatefulInstanceOutput{}, nil +} + +func (s *ServiceOp) ResumeStatefulInstance(ctx context.Context, input *ResumeStatefulInstanceInput) (*ResumeStatefulInstanceOutput, error) { + path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/statefulInstance/{statefulInstanceId}/resume", uritemplates.Values{ + "groupId": spotinst.StringValue(input.GroupID), + "statefulInstanceId": spotinst.StringValue(input.StatefulInstanceID), + }) + if err != nil { + return nil, err + } + + r := client.NewRequest(http.MethodPut, path) + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return &ResumeStatefulInstanceOutput{}, nil +} + +func (s *ServiceOp) RecycleStatefulInstance(ctx context.Context, input *RecycleStatefulInstanceInput) (*RecycleStatefulInstanceOutput, error) { + path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/statefulInstance/{statefulInstanceId}/recycle", uritemplates.Values{ + "groupId": spotinst.StringValue(input.GroupID), + "statefulInstanceId": spotinst.StringValue(input.StatefulInstanceID), + }) + if err != nil { + return nil, err + } + + r := client.NewRequest(http.MethodPut, path) + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return &RecycleStatefulInstanceOutput{}, nil +} + +func (s *ServiceOp) DeallocateStatefulInstance(ctx context.Context, input *DeallocateStatefulInstanceInput) (*DeallocateStatefulInstanceOutput, error) { + path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/statefulInstance/{statefulInstanceId}/deallocate", uritemplates.Values{ + "groupId": spotinst.StringValue(input.GroupID), + "statefulInstanceId": spotinst.StringValue(input.StatefulInstanceID), + }) + if err != nil { + return nil, err + } + + r := client.NewRequest(http.MethodPut, path) + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return &DeallocateStatefulInstanceOutput{}, nil +} + +// region Elastic Beanstalk + +type ImportBeanstalkInput struct { + EnvironmentId *string `json:"environmentId,omitempty"` + EnvironmentName *string `json:"environmentName,omitempty"` + Region *string `json:"region,omitempty"` +} + +type ImportBeanstalkOutput struct { + Group *Group `json:"group,omitempty"` +} + +type BeanstalkMaintenanceInput struct { + GroupID *string `json:"groupId,omitempty"` +} + +type BeanstalkMaintenanceItem struct { + Status *string `json:"status,omitempty"` +} + +type BeanstalkMaintenanceOutput struct { + Items []*BeanstalkMaintenanceItem `json:"items,omitempty"` + Status *string `json:"status,omitempty"` +} + +func beanstalkMaintResponseFromJSON(in []byte) (*BeanstalkMaintenanceOutput, error) { + var rw client.Response + if err := json.Unmarshal(in, &rw); err != nil { + return nil, err + } + + var retVal BeanstalkMaintenanceOutput + retVal.Items = make([]*BeanstalkMaintenanceItem, len(rw.Response.Items)) + for i, rb := range rw.Response.Items { + b, err := beanstalkMaintItemFromJSON(rb) + if err != nil { + return nil, err + } + retVal.Items[i] = b + retVal.Status = b.Status + } + return &retVal, nil +} + +func beanstalkMaintItemFromJSON(in []byte) (*BeanstalkMaintenanceItem, error) { + var rw *BeanstalkMaintenanceItem + if err := json.Unmarshal(in, &rw); err != nil { + return nil, err + } + return rw, nil +} + +func beanstalkMaintFromHttpResponse(resp *http.Response) (*BeanstalkMaintenanceOutput, error) { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + return beanstalkMaintResponseFromJSON(body) +} + +func (s *ServiceOp) ImportBeanstalkEnv(ctx context.Context, input *ImportBeanstalkInput) (*ImportBeanstalkOutput, error) { + path := "/aws/ec2/group/beanstalk/import" + r := client.NewRequest(http.MethodGet, path) + + if input.EnvironmentId != nil { + r.Params["environmentId"] = []string{spotinst.StringValue(input.EnvironmentId)} + } else if input.EnvironmentName != nil { + r.Params["environmentName"] = []string{spotinst.StringValue(input.EnvironmentName)} + } + + r.Params["region"] = []string{spotinst.StringValue(input.Region)} + + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + gs, err := groupsFromHttpResponse(resp) + if err != nil { + return nil, err + } + + output := new(ImportBeanstalkOutput) + if len(gs) > 0 { + output.Group = gs[0] + } + + return output, nil +} + +func (s *ServiceOp) StartBeanstalkMaintenance(ctx context.Context, input *BeanstalkMaintenanceInput) (*BeanstalkMaintenanceOutput, error) { + path, err := uritemplates.Expand("/aws/ec2/group/{groupID}/beanstalk/maintenance/start", uritemplates.Values{ + "groupID": spotinst.StringValue(input.GroupID), + }) + + if err != nil { + return nil, err + } + + r := client.NewRequest(http.MethodPut, path) + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return &BeanstalkMaintenanceOutput{}, nil +} + +func (s *ServiceOp) GetBeanstalkMaintenanceStatus(ctx context.Context, input *BeanstalkMaintenanceInput) (*string, error) { + path, err := uritemplates.Expand("/aws/ec2/group/{groupID}/beanstalk/maintenance/status", uritemplates.Values{ + "groupID": spotinst.StringValue(input.GroupID), + }) + + if err != nil { + return nil, err + } + + r := client.NewRequest(http.MethodGet, path) + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + output, err := beanstalkMaintFromHttpResponse(resp) + if err != nil { + return nil, err + } + + return output.Status, nil +} + +func (s *ServiceOp) FinishBeanstalkMaintenance(ctx context.Context, input *BeanstalkMaintenanceInput) (*BeanstalkMaintenanceOutput, error) { + path, err := uritemplates.Expand("/aws/ec2/group/{groupID}/beanstalk/maintenance/finish", uritemplates.Values{ + "groupID": spotinst.StringValue(input.GroupID), + }) + + if err != nil { + return nil, err + } + + r := client.NewRequest(http.MethodPut, path) + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return &BeanstalkMaintenanceOutput{}, nil +} + +// endregion + +// region Group + +func (o Group) MarshalJSON() ([]byte, error) { + type noMethod Group + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Group) SetId(v *string) *Group { + if o.ID = v; o.ID == nil { + o.nullFields = append(o.nullFields, "ID") + } + return o +} + +func (o *Group) SetName(v *string) *Group { + if o.Name = v; o.Name == nil { + o.nullFields = append(o.nullFields, "Name") + } + return o +} + +func (o *Group) SetDescription(v *string) *Group { + if o.Description = v; o.Description == nil { + o.nullFields = append(o.nullFields, "Description") + } + return o +} + +func (o *Group) SetCapacity(v *Capacity) *Group { + if o.Capacity = v; o.Capacity == nil { + o.nullFields = append(o.nullFields, "Capacity") + } + return o +} + +func (o *Group) SetCompute(v *Compute) *Group { + if o.Compute = v; o.Compute == nil { + o.nullFields = append(o.nullFields, "Compute") + } + return o +} + +func (o *Group) SetStrategy(v *Strategy) *Group { + if o.Strategy = v; o.Strategy == nil { + o.nullFields = append(o.nullFields, "Strategy") + } + return o +} + +func (o *Group) SetScaling(v *Scaling) *Group { + if o.Scaling = v; o.Scaling == nil { + o.nullFields = append(o.nullFields, "Scaling") + } + return o +} + +func (o *Group) SetScheduling(v *Scheduling) *Group { + if o.Scheduling = v; o.Scheduling == nil { + o.nullFields = append(o.nullFields, "Scheduling") + } + return o +} + +func (o *Group) SetIntegration(v *Integration) *Group { + if o.Integration = v; o.Integration == nil { + o.nullFields = append(o.nullFields, "Integration") + } + return o +} + +func (o *Group) SetRegion(v *string) *Group { + if o.Region = v; o.Region == nil { + o.nullFields = append(o.nullFields, "Region") + } + return o +} + +func (o *Group) SetLogging(v *Logging) *Group { + if o.Logging = v; o.Logging == nil { + o.nullFields = append(o.nullFields, "Logging") + } + return o +} + +// endregion + +// region Integration + +func (o Integration) MarshalJSON() ([]byte, error) { + type noMethod Integration + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Integration) SetRoute53(v *Route53Integration) *Integration { + if o.Route53 = v; o.Route53 == nil { + o.nullFields = append(o.nullFields, "Route53") + } + return o +} + +func (o *Integration) SetDockerSwarm(v *DockerSwarmIntegration) *Integration { + if o.DockerSwarm = v; o.DockerSwarm == nil { + o.nullFields = append(o.nullFields, "DockerSwarm") + } + return o +} + +func (o *Integration) SetEC2ContainerService(v *EC2ContainerServiceIntegration) *Integration { + if o.EC2ContainerService = v; o.EC2ContainerService == nil { + o.nullFields = append(o.nullFields, "EC2ContainerService") + } + return o +} + +func (o *Integration) SetElasticBeanstalk(v *ElasticBeanstalkIntegration) *Integration { + if o.ElasticBeanstalk = v; o.ElasticBeanstalk == nil { + o.nullFields = append(o.nullFields, "ElasticBeanstalk") + } + return o +} + +func (o *Integration) SetCodeDeploy(v *CodeDeployIntegration) *Integration { + if o.CodeDeploy = v; o.CodeDeploy == nil { + o.nullFields = append(o.nullFields, "CodeDeploy") + } + return o +} + +func (o *Integration) SetOpsWorks(v *OpsWorksIntegration) *Integration { + if o.OpsWorks = v; o.OpsWorks == nil { + o.nullFields = append(o.nullFields, "OpsWorks") + } + return o +} + +func (o *Integration) SetRancher(v *RancherIntegration) *Integration { + if o.Rancher = v; o.Rancher == nil { + o.nullFields = append(o.nullFields, "Rancher") + } + return o +} + +func (o *Integration) SetKubernetes(v *KubernetesIntegration) *Integration { + if o.Kubernetes = v; o.Kubernetes == nil { + o.nullFields = append(o.nullFields, "Kubernetes") + } + return o +} + +func (o *Integration) SetMesosphere(v *MesosphereIntegration) *Integration { + if o.Mesosphere = v; o.Mesosphere == nil { + o.nullFields = append(o.nullFields, "Mesosphere") + } + return o +} + +func (o *Integration) SetNomad(v *NomadIntegration) *Integration { + if o.Nomad = v; o.Nomad == nil { + o.nullFields = append(o.nullFields, "Nomad") + } + return o +} + +func (o *Integration) SetChef(v *ChefIntegration) *Integration { + if o.Chef = v; o.Chef == nil { + o.nullFields = append(o.nullFields, "Chef") + } + return o +} + +func (o *Integration) SetGitlab(v *GitlabIntegration) *Integration { + if o.Gitlab = v; o.Gitlab == nil { + o.nullFields = append(o.nullFields, "Gitlab") + } + return o +} + +// endregion + +// region RancherIntegration + +func (o RancherIntegration) MarshalJSON() ([]byte, error) { + type noMethod RancherIntegration + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *RancherIntegration) SetMasterHost(v *string) *RancherIntegration { + if o.MasterHost = v; o.MasterHost == nil { + o.nullFields = append(o.nullFields, "MasterHost") + } + return o +} + +func (o *RancherIntegration) SetAccessKey(v *string) *RancherIntegration { + if o.AccessKey = v; o.AccessKey == nil { + o.nullFields = append(o.nullFields, "AccessKey") + } + return o +} + +func (o *RancherIntegration) SetSecretKey(v *string) *RancherIntegration { + if o.SecretKey = v; o.SecretKey == nil { + o.nullFields = append(o.nullFields, "SecretKey") + } + return o +} + +func (o *RancherIntegration) SetVersion(v *string) *RancherIntegration { + if o.Version = v; o.Version == nil { + o.nullFields = append(o.nullFields, "Version") + } + return o +} + +// endregion + +// region ElasticBeanstalkIntegration + +func (o ElasticBeanstalkIntegration) MarshalJSON() ([]byte, error) { + type noMethod ElasticBeanstalkIntegration + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *ElasticBeanstalkIntegration) SetEnvironmentID(v *string) *ElasticBeanstalkIntegration { + if o.EnvironmentID = v; o.EnvironmentID == nil { + o.nullFields = append(o.nullFields, "EnvironmentID") + } + return o +} + +func (o *ElasticBeanstalkIntegration) SetManagedActions(v *BeanstalkManagedActions) *ElasticBeanstalkIntegration { + if o.ManagedActions = v; o.ManagedActions == nil { + o.nullFields = append(o.nullFields, "ManagedActions") + } + return o +} + +func (o *ElasticBeanstalkIntegration) SetDeploymentPreferences(v *BeanstalkDeploymentPreferences) *ElasticBeanstalkIntegration { + if o.DeploymentPreferences = v; o.DeploymentPreferences == nil { + o.nullFields = append(o.nullFields, "DeploymentPreferences") + } + return o +} + +// endregion + +// region BeanstalkManagedActions + +func (o BeanstalkManagedActions) MarshalJSON() ([]byte, error) { + type noMethod BeanstalkManagedActions + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *BeanstalkManagedActions) SetPlatformUpdate(v *BeanstalkPlatformUpdate) *BeanstalkManagedActions { + if o.PlatformUpdate = v; o.PlatformUpdate == nil { + o.nullFields = append(o.nullFields, "PlatformUpdate") + } + return o +} + +// endregion + +// region BeanstalkPlatformUpdate + +func (o BeanstalkPlatformUpdate) MarshalJSON() ([]byte, error) { + type noMethod BeanstalkPlatformUpdate + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *BeanstalkPlatformUpdate) SetPerformAt(v *string) *BeanstalkPlatformUpdate { + if o.PerformAt = v; o.PerformAt == nil { + o.nullFields = append(o.nullFields, "PerformAt") + } + return o +} + +func (o *BeanstalkPlatformUpdate) SetTimeWindow(v *string) *BeanstalkPlatformUpdate { + if o.TimeWindow = v; o.TimeWindow == nil { + o.nullFields = append(o.nullFields, "TimeWindow") + } + return o +} + +func (o *BeanstalkPlatformUpdate) SetUpdateLevel(v *string) *BeanstalkPlatformUpdate { + if o.UpdateLevel = v; o.UpdateLevel == nil { + o.nullFields = append(o.nullFields, "UpdateLevel") + } + return o +} + +// endregion + +// region BeanstalkDeploymentPreferences + +func (o BeanstalkDeploymentPreferences) MarshalJSON() ([]byte, error) { + type noMethod BeanstalkDeploymentPreferences + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *BeanstalkDeploymentPreferences) SetAutomaticRoll(v *bool) *BeanstalkDeploymentPreferences { + if o.AutomaticRoll = v; o.AutomaticRoll == nil { + o.nullFields = append(o.nullFields, "AutomaticRoll") + } + return o +} + +func (o *BeanstalkDeploymentPreferences) SetBatchSizePercentage(v *int) *BeanstalkDeploymentPreferences { + if o.BatchSizePercentage = v; o.BatchSizePercentage == nil { + o.nullFields = append(o.nullFields, "BatchSizePercentage") + } + return o +} + +func (o *BeanstalkDeploymentPreferences) SetGracePeriod(v *int) *BeanstalkDeploymentPreferences { + if o.GracePeriod = v; o.GracePeriod == nil { + o.nullFields = append(o.nullFields, "GracePeriod") + } + return o +} + +func (o *BeanstalkDeploymentPreferences) SetStrategy(v *BeanstalkDeploymentStrategy) *BeanstalkDeploymentPreferences { + if o.Strategy = v; o.Strategy == nil { + o.nullFields = append(o.nullFields, "Strategy") + } + return o +} + +// endregion + +// region BeanstalkDeploymentStrategy + +func (o BeanstalkDeploymentStrategy) MarshalJSON() ([]byte, error) { + type noMethod BeanstalkDeploymentStrategy + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *BeanstalkDeploymentStrategy) SetAction(v *string) *BeanstalkDeploymentStrategy { + if o.Action = v; o.Action == nil { + o.nullFields = append(o.nullFields, "Action") + } + return o +} + +func (o *BeanstalkDeploymentStrategy) SetShouldDrainInstances(v *bool) *BeanstalkDeploymentStrategy { + if o.ShouldDrainInstances = v; o.ShouldDrainInstances == nil { + o.nullFields = append(o.nullFields, "ShouldDrainInstances") + } + return o +} + +// endregion + +// region EC2ContainerServiceIntegration + +func (o EC2ContainerServiceIntegration) MarshalJSON() ([]byte, error) { + type noMethod EC2ContainerServiceIntegration + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *EC2ContainerServiceIntegration) SetClusterName(v *string) *EC2ContainerServiceIntegration { + if o.ClusterName = v; o.ClusterName == nil { + o.nullFields = append(o.nullFields, "ClusterName") + } + return o +} + +func (o *EC2ContainerServiceIntegration) SetAutoScale(v *AutoScaleECS) *EC2ContainerServiceIntegration { + if o.AutoScale = v; o.AutoScale == nil { + o.nullFields = append(o.nullFields, "AutoScale") + } + return o +} + +func (o *EC2ContainerServiceIntegration) SetBatch(v *Batch) *EC2ContainerServiceIntegration { + if o.Batch = v; o.Batch == nil { + o.nullFields = append(o.nullFields, "Batch") + } + return o +} + +// endregion + +// region AutoScaleECS + +func (o AutoScaleECS) MarshalJSON() ([]byte, error) { + type noMethod AutoScaleECS + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *AutoScaleECS) SetAttributes(v []*AutoScaleAttributes) *AutoScaleECS { + if o.Attributes = v; o.Attributes == nil { + o.nullFields = append(o.nullFields, "Attributes") + } + return o +} + +func (o *AutoScaleECS) SetShouldScaleDownNonServiceTasks(v *bool) *AutoScaleECS { + if o.ShouldScaleDownNonServiceTasks = v; o.ShouldScaleDownNonServiceTasks == nil { + o.nullFields = append(o.nullFields, "ShouldScaleDownNonServiceTasks") + } + return o +} + +// endregion + +// region Batch + +func (o Batch) MarshalJSON() ([]byte, error) { + type noMethod Batch + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Batch) SetJobQueueNames(v []string) *Batch { + if o.JobQueueNames = v; o.JobQueueNames == nil { + o.nullFields = append(o.nullFields, "JobQueueNames") + } + return o +} + +// endregion + +// region DockerSwarmIntegration + +func (o DockerSwarmIntegration) MarshalJSON() ([]byte, error) { + type noMethod DockerSwarmIntegration + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *DockerSwarmIntegration) SetMasterHost(v *string) *DockerSwarmIntegration { + if o.MasterHost = v; o.MasterHost == nil { + o.nullFields = append(o.nullFields, "MasterHost") + } + return o +} + +func (o *DockerSwarmIntegration) SetMasterPort(v *int) *DockerSwarmIntegration { + if o.MasterPort = v; o.MasterPort == nil { + o.nullFields = append(o.nullFields, "MasterPort") + } + return o +} + +func (o *DockerSwarmIntegration) SetAutoScale(v *AutoScaleDockerSwarm) *DockerSwarmIntegration { + if o.AutoScale = v; o.AutoScale == nil { + o.nullFields = append(o.nullFields, "AutoScale") + } + return o +} + +func (o AutoScaleDockerSwarm) MarshalJSON() ([]byte, error) { + type noMethod AutoScaleDockerSwarm + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +// endregion + +// region Route53 + +func (o Route53Integration) MarshalJSON() ([]byte, error) { + type noMethod Route53Integration + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Route53Integration) SetDomains(v []*Domain) *Route53Integration { + if o.Domains = v; o.Domains == nil { + o.nullFields = append(o.nullFields, "Domains") + } + return o +} + +// endregion + +// region Domain + +func (o Domain) MarshalJSON() ([]byte, error) { + type noMethod Domain + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Domain) SetHostedZoneID(v *string) *Domain { + if o.HostedZoneID = v; o.HostedZoneID == nil { + o.nullFields = append(o.nullFields, "HostedZoneID") + } + return o +} + +func (o *Domain) SetSpotinstAccountID(v *string) *Domain { + if o.SpotinstAccountID = v; o.SpotinstAccountID == nil { + o.nullFields = append(o.nullFields, "SpotinstAccountID") + } + return o +} + +func (o *Domain) SetRecordSetType(v *string) *Domain { + if o.RecordSetType = v; o.RecordSetType == nil { + o.nullFields = append(o.nullFields, "RecordSetType") + } + return o +} + +func (o *Domain) SetRecordSets(v []*RecordSet) *Domain { + if o.RecordSets = v; o.RecordSets == nil { + o.nullFields = append(o.nullFields, "RecordSets") + } + return o +} + +// endregion + +// region RecordSets + +func (o RecordSet) MarshalJSON() ([]byte, error) { + type noMethod RecordSet + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *RecordSet) SetName(v *string) *RecordSet { + if o.Name = v; o.Name == nil { + o.nullFields = append(o.nullFields, "Name") + } + return o +} + +func (o *RecordSet) SetUsePublicIP(v *bool) *RecordSet { + if o.UsePublicIP = v; o.UsePublicIP == nil { + o.nullFields = append(o.nullFields, "UsePublicIP") + } + return o +} + +func (o *RecordSet) SetUsePublicDNS(v *bool) *RecordSet { + if o.UsePublicDNS = v; o.UsePublicDNS == nil { + o.nullFields = append(o.nullFields, "UsePublicDNS") + } + return o +} + +// endregion + +// region AutoScale + +func (o AutoScale) MarshalJSON() ([]byte, error) { + type noMethod AutoScale + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *AutoScale) SetIsEnabled(v *bool) *AutoScale { + if o.IsEnabled = v; o.IsEnabled == nil { + o.nullFields = append(o.nullFields, "IsEnabled") + } + return o +} + +func (o *AutoScale) SetIsAutoConfig(v *bool) *AutoScale { + if o.IsAutoConfig = v; o.IsAutoConfig == nil { + o.nullFields = append(o.nullFields, "IsAutoConfig") + } + return o +} + +func (o *AutoScale) SetCooldown(v *int) *AutoScale { + if o.Cooldown = v; o.Cooldown == nil { + o.nullFields = append(o.nullFields, "Cooldown") + } + return o +} + +func (o *AutoScale) SetHeadroom(v *AutoScaleHeadroom) *AutoScale { + if o.Headroom = v; o.Headroom == nil { + o.nullFields = append(o.nullFields, "Headroom") + } + return o +} + +func (o *AutoScale) SetDown(v *AutoScaleDown) *AutoScale { + if o.Down = v; o.Down == nil { + o.nullFields = append(o.nullFields, "Down") + } + return o +} + +// endregion + +// region AutoScaleHeadroom + +func (o AutoScaleHeadroom) MarshalJSON() ([]byte, error) { + type noMethod AutoScaleHeadroom + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *AutoScaleHeadroom) SetCPUPerUnit(v *int) *AutoScaleHeadroom { + if o.CPUPerUnit = v; o.CPUPerUnit == nil { + o.nullFields = append(o.nullFields, "CPUPerUnit") + } + return o +} + +func (o *AutoScaleHeadroom) SetGPUPerUnit(v *int) *AutoScaleHeadroom { + if o.GPUPerUnit = v; o.GPUPerUnit == nil { + o.nullFields = append(o.nullFields, "GPUPerUnit") + } + return o +} + +func (o *AutoScaleHeadroom) SetMemoryPerUnit(v *int) *AutoScaleHeadroom { + if o.MemoryPerUnit = v; o.MemoryPerUnit == nil { + o.nullFields = append(o.nullFields, "MemoryPerUnit") + } + return o +} + +func (o *AutoScaleHeadroom) SetNumOfUnits(v *int) *AutoScaleHeadroom { + if o.NumOfUnits = v; o.NumOfUnits == nil { + o.nullFields = append(o.nullFields, "NumOfUnits") + } + return o +} + +// endregion + +// region AutoScaleDown + +func (o AutoScaleDown) MarshalJSON() ([]byte, error) { + type noMethod AutoScaleDown + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *AutoScaleDown) SetEvaluationPeriods(v *int) *AutoScaleDown { + if o.EvaluationPeriods = v; o.EvaluationPeriods == nil { + o.nullFields = append(o.nullFields, "EvaluationPeriods") + } + return o +} + +func (o *AutoScaleDown) SetMaxScaleDownPercentage(v *float64) *AutoScaleDown { + if o.MaxScaleDownPercentage = v; o.MaxScaleDownPercentage == nil { + o.nullFields = append(o.nullFields, "MaxScaleDownPercentage") + } + return o +} + +// endregion + +// region AutoScaleConstraint + +func (o AutoScaleConstraint) MarshalJSON() ([]byte, error) { + type noMethod AutoScaleConstraint + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *AutoScaleConstraint) SetKey(v *string) *AutoScaleConstraint { + if o.Key = v; o.Key == nil { + o.nullFields = append(o.nullFields, "Key") + } + return o +} + +func (o *AutoScaleConstraint) SetValue(v *string) *AutoScaleConstraint { + if o.Value = v; o.Value == nil { + o.nullFields = append(o.nullFields, "Value") + } + return o +} + +// endregion + +// region AutoScaleLabel + +func (o AutoScaleLabel) MarshalJSON() ([]byte, error) { + type noMethod AutoScaleLabel + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *AutoScaleLabel) SetKey(v *string) *AutoScaleLabel { + if o.Key = v; o.Key == nil { + o.nullFields = append(o.nullFields, "Key") + } + return o +} + +func (o *AutoScaleLabel) SetValue(v *string) *AutoScaleLabel { + if o.Value = v; o.Value == nil { + o.nullFields = append(o.nullFields, "Value") + } + return o +} + +// endregion + +// region KubernetesIntegration + +func (o KubernetesIntegration) MarshalJSON() ([]byte, error) { + type noMethod KubernetesIntegration + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *KubernetesIntegration) SetIntegrationMode(v *string) *KubernetesIntegration { + if o.IntegrationMode = v; o.IntegrationMode == nil { + o.nullFields = append(o.nullFields, "IntegrationMode") + } + return o +} + +func (o *KubernetesIntegration) SetClusterIdentifier(v *string) *KubernetesIntegration { + if o.ClusterIdentifier = v; o.ClusterIdentifier == nil { + o.nullFields = append(o.nullFields, "ClusterIdentifier") + } + return o +} + +func (o *KubernetesIntegration) SetServer(v *string) *KubernetesIntegration { + if o.Server = v; o.Server == nil { + o.nullFields = append(o.nullFields, "Server") + } + return o +} + +func (o *KubernetesIntegration) SetToken(v *string) *KubernetesIntegration { + if o.Token = v; o.Token == nil { + o.nullFields = append(o.nullFields, "Token") + } + return o +} + +func (o *KubernetesIntegration) SetAutoScale(v *AutoScaleKubernetes) *KubernetesIntegration { + if o.AutoScale = v; o.AutoScale == nil { + o.nullFields = append(o.nullFields, "AutoScale") + } + return o +} + +func (o AutoScaleKubernetes) MarshalJSON() ([]byte, error) { + type noMethod AutoScaleKubernetes + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *AutoScaleKubernetes) SetLabels(v []*AutoScaleLabel) *AutoScaleKubernetes { + if o.Labels = v; o.Labels == nil { + o.nullFields = append(o.nullFields, "Labels") + } + return o +} + +// endregion + +// region MesosphereIntegration + +func (o MesosphereIntegration) MarshalJSON() ([]byte, error) { + type noMethod MesosphereIntegration + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *MesosphereIntegration) SetServer(v *string) *MesosphereIntegration { + if o.Server = v; o.Server == nil { + o.nullFields = append(o.nullFields, "Server") + } + return o +} + +// endregion + +// region NomadIntegration + +func (o NomadIntegration) MarshalJSON() ([]byte, error) { + type noMethod NomadIntegration + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *NomadIntegration) SetMasterHost(v *string) *NomadIntegration { + if o.MasterHost = v; o.MasterHost == nil { + o.nullFields = append(o.nullFields, "MasterHost") + } + return o +} + +func (o *NomadIntegration) SetMasterPort(v *int) *NomadIntegration { + if o.MasterPort = v; o.MasterPort == nil { + o.nullFields = append(o.nullFields, "MasterPort") + } + return o +} + +func (o *NomadIntegration) SetAclToken(v *string) *NomadIntegration { + if o.ACLToken = v; o.ACLToken == nil { + o.nullFields = append(o.nullFields, "ACLToken") + } + return o +} + +func (o *NomadIntegration) SetAutoScale(v *AutoScaleNomad) *NomadIntegration { + if o.AutoScale = v; o.AutoScale == nil { + o.nullFields = append(o.nullFields, "AutoScale") + } + return o +} + +func (o AutoScaleNomad) MarshalJSON() ([]byte, error) { + type noMethod AutoScaleNomad + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *AutoScaleNomad) SetConstraints(v []*AutoScaleConstraint) *AutoScaleNomad { + if o.Constraints = v; o.Constraints == nil { + o.nullFields = append(o.nullFields, "Constraints") + } + return o +} + +// endregion + +// region ChefIntegration + +func (o ChefIntegration) MarshalJSON() ([]byte, error) { + type noMethod ChefIntegration + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *ChefIntegration) SetServer(v *string) *ChefIntegration { + if o.Server = v; o.Server == nil { + o.nullFields = append(o.nullFields, "Server") + } + return o +} + +func (o *ChefIntegration) SetOrganization(v *string) *ChefIntegration { + if o.Organization = v; o.Organization == nil { + o.nullFields = append(o.nullFields, "Organization") + } + return o +} + +func (o *ChefIntegration) SetUser(v *string) *ChefIntegration { + if o.User = v; o.User == nil { + o.nullFields = append(o.nullFields, "User") + } + return o +} + +func (o *ChefIntegration) SetPEMKey(v *string) *ChefIntegration { + if o.PEMKey = v; o.PEMKey == nil { + o.nullFields = append(o.nullFields, "PEMKey") + } + return o +} + +func (o *ChefIntegration) SetVersion(v *string) *ChefIntegration { + if o.Version = v; o.Version == nil { + o.nullFields = append(o.nullFields, "Version") + } + return o +} + +// endregion + +// region Gitlab + +func (o GitlabIntegration) MarshalJSON() ([]byte, error) { + type noMethod GitlabIntegration + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *GitlabIntegration) SetRunner(v *GitlabRunner) *GitlabIntegration { + if o.Runner = v; o.Runner == nil { + o.nullFields = append(o.nullFields, "Runner") + } + return o +} + +func (o GitlabRunner) MarshalJSON() ([]byte, error) { + type noMethod GitlabRunner + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *GitlabRunner) SetIsEnabled(v *bool) *GitlabRunner { + if o.IsEnabled = v; o.IsEnabled == nil { + o.nullFields = append(o.nullFields, "IsEnabled") + } + return o +} + +// endregion + +// region Scheduling + +func (o Scheduling) MarshalJSON() ([]byte, error) { + type noMethod Scheduling + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Scheduling) SetTasks(v []*Task) *Scheduling { + if o.Tasks = v; o.Tasks == nil { + o.nullFields = append(o.nullFields, "Tasks") + } + return o +} + +// endregion + +// region Task + +func (o Task) MarshalJSON() ([]byte, error) { + type noMethod Task + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Task) SetIsEnabled(v *bool) *Task { + if o.IsEnabled = v; o.IsEnabled == nil { + o.nullFields = append(o.nullFields, "IsEnabled") + } + return o +} + +func (o *Task) SetType(v *string) *Task { + if o.Type = v; o.Type == nil { + o.nullFields = append(o.nullFields, "Type") + } + return o +} + +func (o *Task) SetFrequency(v *string) *Task { + if o.Frequency = v; o.Frequency == nil { + o.nullFields = append(o.nullFields, "Frequency") + } + return o +} + +func (o *Task) SetCronExpression(v *string) *Task { + if o.CronExpression = v; o.CronExpression == nil { + o.nullFields = append(o.nullFields, "CronExpression") + } + return o +} + +func (o *Task) SetStartTime(v *string) *Task { + if o.StartTime = v; o.StartTime == nil { + o.nullFields = append(o.nullFields, "StartTime") + } + return o +} + +func (o *Task) SetScaleTargetCapacity(v *int) *Task { + if o.ScaleTargetCapacity = v; o.ScaleTargetCapacity == nil { + o.nullFields = append(o.nullFields, "ScaleTargetCapacity") + } + return o +} + +func (o *Task) SetScaleMinCapacity(v *int) *Task { + if o.ScaleMinCapacity = v; o.ScaleMinCapacity == nil { + o.nullFields = append(o.nullFields, "ScaleMinCapacity") + } + return o +} + +func (o *Task) SetScaleMaxCapacity(v *int) *Task { + if o.ScaleMaxCapacity = v; o.ScaleMaxCapacity == nil { + o.nullFields = append(o.nullFields, "ScaleMaxCapacity") + } + return o +} + +func (o *Task) SetBatchSizePercentage(v *int) *Task { + if o.BatchSizePercentage = v; o.BatchSizePercentage == nil { + o.nullFields = append(o.nullFields, "BatchSizePercentage") + } + return o +} + +func (o *Task) SetGracePeriod(v *int) *Task { + if o.GracePeriod = v; o.GracePeriod == nil { + o.nullFields = append(o.nullFields, "GracePeriod") + } + return o +} + +func (o *Task) SetTargetCapacity(v *int) *Task { + if o.TargetCapacity = v; o.TargetCapacity == nil { + o.nullFields = append(o.nullFields, "TargetCapacity") + } + return o +} + +func (o *Task) SetMinCapacity(v *int) *Task { + if o.MinCapacity = v; o.MinCapacity == nil { + o.nullFields = append(o.nullFields, "MinCapacity") + } + return o +} + +func (o *Task) SetMaxCapacity(v *int) *Task { + if o.MaxCapacity = v; o.MaxCapacity == nil { + o.nullFields = append(o.nullFields, "MaxCapacity") + } + return o +} + +func (o *Task) SetAdjustment(v *int) *Task { + if o.Adjustment = v; o.Adjustment == nil { + o.nullFields = append(o.nullFields, "Adjustment") + } + return o +} + +func (o *Task) SetAdjustmentPercentage(v *int) *Task { + if o.AdjustmentPercentage = v; o.AdjustmentPercentage == nil { + o.nullFields = append(o.nullFields, "AdjustmentPercentage") + } + return o +} + +// endregion + +// region Scaling + +func (o Scaling) MarshalJSON() ([]byte, error) { + type noMethod Scaling + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Scaling) SetUp(v []*ScalingPolicy) *Scaling { + if o.Up = v; o.Up == nil { + o.nullFields = append(o.nullFields, "Up") + } + return o +} + +func (o *Scaling) SetDown(v []*ScalingPolicy) *Scaling { + if o.Down = v; o.Down == nil { + o.nullFields = append(o.nullFields, "Down") + } + return o +} + +func (o *Scaling) SetTarget(v []*ScalingPolicy) *Scaling { + if o.Target = v; o.Target == nil { + o.nullFields = append(o.nullFields, "Target") + } + return o +} + +func (o *Scaling) SetMultipleMetrics(v *MultipleMetrics) *Scaling { + if o.MultipleMetrics = v; o.MultipleMetrics == nil { + o.nullFields = append(o.nullFields, "MultipleMetrics") + } + return o +} + +// endregion + +// region ScalingPolicy + +func (o ScalingPolicy) MarshalJSON() ([]byte, error) { + type noMethod ScalingPolicy + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *ScalingPolicy) SetPolicyName(v *string) *ScalingPolicy { + if o.PolicyName = v; o.PolicyName == nil { + o.nullFields = append(o.nullFields, "PolicyName") + } + return o +} + +func (o *ScalingPolicy) SetMetricName(v *string) *ScalingPolicy { + if o.MetricName = v; o.MetricName == nil { + o.nullFields = append(o.nullFields, "MetricName") + } + return o +} + +func (o *ScalingPolicy) SetNamespace(v *string) *ScalingPolicy { + if o.Namespace = v; o.Namespace == nil { + o.nullFields = append(o.nullFields, "Namespace") + } + return o +} + +func (o *ScalingPolicy) SetSource(v *string) *ScalingPolicy { + if o.Source = v; o.Source == nil { + o.nullFields = append(o.nullFields, "Source") + } + return o +} + +func (o *ScalingPolicy) SetStatistic(v *string) *ScalingPolicy { + if o.Statistic = v; o.Statistic == nil { + o.nullFields = append(o.nullFields, "Statistic") + } + return o +} + +func (o *ScalingPolicy) SetUnit(v *string) *ScalingPolicy { + if o.Unit = v; o.Unit == nil { + o.nullFields = append(o.nullFields, "Unit") + } + return o +} + +func (o *ScalingPolicy) SetThreshold(v *float64) *ScalingPolicy { + if o.Threshold = v; o.Threshold == nil { + o.nullFields = append(o.nullFields, "Threshold") + } + return o +} + +func (o *ScalingPolicy) SetAdjustment(v *int) *ScalingPolicy { + if o.Adjustment = v; o.Adjustment == nil { + o.nullFields = append(o.nullFields, "Adjustment") + } + return o +} + +func (o *ScalingPolicy) SetMinTargetCapacity(v *int) *ScalingPolicy { + if o.MinTargetCapacity = v; o.MinTargetCapacity == nil { + o.nullFields = append(o.nullFields, "MinTargetCapacity") + } + return o +} + +func (o *ScalingPolicy) SetMaxTargetCapacity(v *int) *ScalingPolicy { + if o.MaxTargetCapacity = v; o.MaxTargetCapacity == nil { + o.nullFields = append(o.nullFields, "MaxTargetCapacity") + } + return o +} + +func (o *ScalingPolicy) SetEvaluationPeriods(v *int) *ScalingPolicy { + if o.EvaluationPeriods = v; o.EvaluationPeriods == nil { + o.nullFields = append(o.nullFields, "EvaluationPeriods") + } + return o +} + +func (o *ScalingPolicy) SetPeriod(v *int) *ScalingPolicy { + if o.Period = v; o.Period == nil { + o.nullFields = append(o.nullFields, "Period") + } + return o +} + +func (o *ScalingPolicy) SetCooldown(v *int) *ScalingPolicy { + if o.Cooldown = v; o.Cooldown == nil { + o.nullFields = append(o.nullFields, "Cooldown") + } + return o +} + +func (o *ScalingPolicy) SetOperator(v *string) *ScalingPolicy { + if o.Operator = v; o.Operator == nil { + o.nullFields = append(o.nullFields, "Operator") + } + return o +} + +func (o *ScalingPolicy) SetDimensions(v []*Dimension) *ScalingPolicy { + if o.Dimensions = v; o.Dimensions == nil { + o.nullFields = append(o.nullFields, "Dimensions") + } + return o +} + +func (o *ScalingPolicy) SetPredictive(v *Predictive) *ScalingPolicy { + if o.Predictive = v; o.Predictive == nil { + o.nullFields = append(o.nullFields, "Predictive") + } + return o +} + +func (o *ScalingPolicy) SetAction(v *Action) *ScalingPolicy { + if o.Action = v; o.Action == nil { + o.nullFields = append(o.nullFields, "Action") + } + return o +} + +func (o *ScalingPolicy) SetTarget(v *float64) *ScalingPolicy { + if o.Target = v; o.Target == nil { + o.nullFields = append(o.nullFields, "Target") + } + return o +} + +func (o *ScalingPolicy) SetIsEnabled(v *bool) *ScalingPolicy { + if o.IsEnabled = v; o.IsEnabled == nil { + o.nullFields = append(o.nullFields, "IsEnabled") + } + return o +} + +func (o *ScalingPolicy) SetMaxCapacityPerScale(v *string) *ScalingPolicy { + if o.MaxCapacityPerScale = v; o.MaxCapacityPerScale == nil { + o.nullFields = append(o.nullFields, "MaxCapacityPerScale") + } + return o +} + +func (o *ScalingPolicy) SetStepAdjustments(v []*StepAdjustment) *ScalingPolicy { + if o.StepAdjustments = v; o.StepAdjustments == nil { + o.nullFields = append(o.nullFields, "StepAdjustments") + } + return o +} + +// endregion + +// region MultipleMetrics + +func (o MultipleMetrics) MarshalJSON() ([]byte, error) { + type noMethod MultipleMetrics + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *MultipleMetrics) SetExpressions(v []*Expressions) *MultipleMetrics { + if o.Expressions = v; o.Expressions == nil { + o.nullFields = append(o.nullFields, "Expressions") + } + return o +} + +func (o *MultipleMetrics) SetMetrics(v []*Metrics) *MultipleMetrics { + if o.Metrics = v; o.Metrics == nil { + o.nullFields = append(o.nullFields, "Metrics") + } + return o +} + +// endregion + +// region Metrics + +func (o Metrics) MarshalJSON() ([]byte, error) { + type noMethod Metrics + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Metrics) SetMetricName(v *string) *Metrics { + if o.MetricName = v; o.MetricName == nil { + o.nullFields = append(o.nullFields, "MetricName") + } + return o +} + +func (o *Metrics) SetNamespace(v *string) *Metrics { + if o.Namespace = v; o.Namespace == nil { + o.nullFields = append(o.nullFields, "Namespace") + } + return o +} + +func (o *Metrics) SetDimensions(v []*Dimension) *Metrics { + if o.Dimensions = v; o.Dimensions == nil { + o.nullFields = append(o.nullFields, "Dimensions") + } + return o +} + +func (o *Metrics) SetName(v *string) *Metrics { + if o.Name = v; o.Name == nil { + o.nullFields = append(o.nullFields, "Name") + } + return o +} + +func (o *Metrics) SetExtendedStatistic(v *string) *Metrics { + if o.ExtendedStatistic = v; o.ExtendedStatistic == nil { + o.nullFields = append(o.nullFields, "ExtendedStatistic") + } + return o +} + +func (o *Metrics) SetStatistic(v *string) *Metrics { + if o.Statistic = v; o.Statistic == nil { + o.nullFields = append(o.nullFields, "Statistic") + } + return o +} + +func (o *Metrics) SetUnit(v *string) *Metrics { + if o.Unit = v; o.Unit == nil { + o.nullFields = append(o.nullFields, "Unit") + } + return o +} + +// endregion + +// region Expression + +func (o Expressions) MarshalJSON() ([]byte, error) { + type noMethod Expressions + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Expressions) SetExpression(v *string) *Expressions { + if o.Expression = v; o.Expression == nil { + o.nullFields = append(o.nullFields, "Expression") + } + return o +} + +func (o *Expressions) SetName(v *string) *Expressions { + if o.Name = v; o.Name == nil { + o.nullFields = append(o.nullFields, "Name") + } + return o +} + +// endregion + +// region Action + +func (o Action) MarshalJSON() ([]byte, error) { + type noMethod Action + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Action) SetType(v *string) *Action { + if o.Type = v; o.Type == nil { + o.nullFields = append(o.nullFields, "Type") + } + return o +} + +func (o *Action) SetAdjustment(v *string) *Action { + if o.Adjustment = v; o.Adjustment == nil { + o.nullFields = append(o.nullFields, "Adjustment") + } + return o +} + +func (o *Action) SetMinTargetCapacity(v *string) *Action { + if o.MinTargetCapacity = v; o.MinTargetCapacity == nil { + o.nullFields = append(o.nullFields, "MinTargetCapacity") + } + return o +} + +func (o *Action) SetMaxTargetCapacity(v *string) *Action { + if o.MaxTargetCapacity = v; o.MaxTargetCapacity == nil { + o.nullFields = append(o.nullFields, "MaxTargetCapacity") + } + return o +} + +func (o *Action) SetMaximum(v *string) *Action { + if o.Maximum = v; o.Maximum == nil { + o.nullFields = append(o.nullFields, "Maximum") + } + return o +} + +func (o *Action) SetMinimum(v *string) *Action { + if o.Minimum = v; o.Minimum == nil { + o.nullFields = append(o.nullFields, "Minimum") + } + return o +} + +func (o *Action) SetTarget(v *string) *Action { + if o.Target = v; o.Target == nil { + o.nullFields = append(o.nullFields, "Target") + } + return o +} + +// endregion + +// region Dimension + +func (o Dimension) MarshalJSON() ([]byte, error) { + type noMethod Dimension + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Dimension) SetName(v *string) *Dimension { + if o.Name = v; o.Name == nil { + o.nullFields = append(o.nullFields, "Name") + } + return o +} + +func (o *Dimension) SetValue(v *string) *Dimension { + if o.Value = v; o.Value == nil { + o.nullFields = append(o.nullFields, "Value") + } + return o +} + +// endregion + +// region Predictive + +func (o *Predictive) MarshalJSON() ([]byte, error) { + type noMethod Predictive + raw := noMethod(*o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Predictive) SetMode(v *string) *Predictive { + if o.Mode = v; o.Mode == nil { + o.nullFields = append(o.nullFields, "Mode") + } + return o +} + +// endregion + +// region StepAdjustments + +func (o StepAdjustment) MarshalJSON() ([]byte, error) { + type noMethod StepAdjustment + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *StepAdjustment) SetAction(v *Action) *StepAdjustment { + if o.Action = v; o.Action == nil { + o.nullFields = append(o.nullFields, "Action") + } + return o +} + +func (o *StepAdjustment) SetThreshold(v *int) *StepAdjustment { + if o.Threshold = v; o.Threshold == nil { + o.nullFields = append(o.nullFields, "Threshold") + } + return o +} + +// endregion + +// region Strategy + +func (o Strategy) MarshalJSON() ([]byte, error) { + type noMethod Strategy + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Strategy) SetRisk(v *float64) *Strategy { + if o.Risk = v; o.Risk == nil { + o.nullFields = append(o.nullFields, "Risk") + } + return o +} + +func (o *Strategy) SetOnDemandCount(v *int) *Strategy { + if o.OnDemandCount = v; o.OnDemandCount == nil { + o.nullFields = append(o.nullFields, "OnDemandCount") + } + return o +} + +func (o *Strategy) SetImmediateODRecoverThreshold(v *int) *Strategy { + if o.ImmediateODRecoverThreshold = v; o.ImmediateODRecoverThreshold == nil { + o.nullFields = append(o.nullFields, "ImmediateODRecoverThreshold") + } + return o +} + +func (o *Strategy) SetDrainingTimeout(v *int) *Strategy { + if o.DrainingTimeout = v; o.DrainingTimeout == nil { + o.nullFields = append(o.nullFields, "DrainingTimeout") + } + return o +} + +func (o *Strategy) SetAvailabilityVsCost(v *string) *Strategy { + if o.AvailabilityVsCost = v; o.AvailabilityVsCost == nil { + o.nullFields = append(o.nullFields, "AvailabilityVsCost") + } + return o +} + +func (o *Strategy) SetLifetimePeriod(v *string) *Strategy { + if o.LifetimePeriod = v; o.LifetimePeriod == nil { + o.nullFields = append(o.nullFields, "LifetimePeriod") + } + return o +} + +func (o *Strategy) SetUtilizeReservedInstances(v *bool) *Strategy { + if o.UtilizeReservedInstances = v; o.UtilizeReservedInstances == nil { + o.nullFields = append(o.nullFields, "UtilizeReservedInstances") + } + return o +} + +func (o *Strategy) SetFallbackToOnDemand(v *bool) *Strategy { + if o.FallbackToOnDemand = v; o.FallbackToOnDemand == nil { + o.nullFields = append(o.nullFields, "FallbackToOnDemand") + } + return o +} + +func (o *Strategy) SetSpinUpTime(v *int) *Strategy { + if o.SpinUpTime = v; o.SpinUpTime == nil { + o.nullFields = append(o.nullFields, "SpinUpTime") + } + return o +} + +func (o *Strategy) SetSignals(v []*Signal) *Strategy { + if o.Signals = v; o.Signals == nil { + o.nullFields = append(o.nullFields, "Signals") + } + return o +} + +func (o *Strategy) SetPersistence(v *Persistence) *Strategy { + if o.Persistence = v; o.Persistence == nil { + o.nullFields = append(o.nullFields, "Persistence") + } + return o +} + +func (o *Strategy) SetRevertToSpot(v *RevertToSpot) *Strategy { + if o.RevertToSpot = v; o.RevertToSpot == nil { + o.nullFields = append(o.nullFields, "RevertToSpot") + } + return o +} + +func (o *Strategy) SetScalingStrategy(v *ScalingStrategy) *Strategy { + if o.ScalingStrategy = v; o.ScalingStrategy == nil { + o.nullFields = append(o.nullFields, "ScalingStrategy") + } + return o +} + +func (o *Strategy) SetUtilizeCommitments(v *bool) *Strategy { + if o.UtilizeCommitments = v; o.UtilizeCommitments == nil { + o.nullFields = append(o.nullFields, "UtilizeCommitments") + } + return o +} + +func (o *Strategy) SetMinimumInstanceLifetime(v *int) *Strategy { + if o.MinimumInstanceLifetime = v; o.MinimumInstanceLifetime == nil { + o.nullFields = append(o.nullFields, "MinimumInstanceLifetime") + } + return o +} +func (o *Strategy) SetConsiderODPricing(v *bool) *Strategy { + if o.ConsiderODPricing = v; o.ConsiderODPricing == nil { + o.nullFields = append(o.nullFields, "ConsiderODPricing") + } + return o +} + +// endregion + +// region ScalingStrategy + +func (o ScalingStrategy) MarshalJSON() ([]byte, error) { + type noMethod ScalingStrategy + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *ScalingStrategy) SetTerminationPolicy(v *string) *ScalingStrategy { + if o.TerminationPolicy = v; o.TerminationPolicy == nil { + o.nullFields = append(o.nullFields, "TerminationPolicy") + } + return o +} + +func (o *ScalingStrategy) SetTerminateAtEndOfBillingHour(v *bool) *ScalingStrategy { + if o.TerminateAtEndOfBillingHour = v; o.TerminateAtEndOfBillingHour == nil { + o.nullFields = append(o.nullFields, "TerminateAtEndOfBillingHour") + } + return o +} + +// endregion + +// region Persistence + +func (o Persistence) MarshalJSON() ([]byte, error) { + type noMethod Persistence + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Persistence) SetShouldPersistPrivateIP(v *bool) *Persistence { + if o.ShouldPersistPrivateIP = v; o.ShouldPersistPrivateIP == nil { + o.nullFields = append(o.nullFields, "ShouldPersistPrivateIP") + } + return o +} + +func (o *Persistence) SetShouldPersistBlockDevices(v *bool) *Persistence { + if o.ShouldPersistBlockDevices = v; o.ShouldPersistBlockDevices == nil { + o.nullFields = append(o.nullFields, "ShouldPersistBlockDevices") + } + return o +} + +func (o *Persistence) SetShouldPersistRootDevice(v *bool) *Persistence { + if o.ShouldPersistRootDevice = v; o.ShouldPersistRootDevice == nil { + o.nullFields = append(o.nullFields, "ShouldPersistRootDevice") + } + return o +} + +func (o *Persistence) SetBlockDevicesMode(v *string) *Persistence { + if o.BlockDevicesMode = v; o.BlockDevicesMode == nil { + o.nullFields = append(o.nullFields, "BlockDevicesMode") + } + return o +} + +// endregion + +// region RevertToSpot + +func (o RevertToSpot) MarshalJSON() ([]byte, error) { + type noMethod RevertToSpot + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *RevertToSpot) SetPerformAt(v *string) *RevertToSpot { + if o.PerformAt = v; o.PerformAt == nil { + o.nullFields = append(o.nullFields, "PerformAt") + } + return o +} + +func (o *RevertToSpot) SetTimeWindows(v []string) *RevertToSpot { + if o.TimeWindows = v; o.TimeWindows == nil { + o.nullFields = append(o.nullFields, "TimeWindows") + } + return o +} + +// endregion + +// region Signal + +func (o Signal) MarshalJSON() ([]byte, error) { + type noMethod Signal + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Signal) SetName(v *string) *Signal { + if o.Name = v; o.Name == nil { + o.nullFields = append(o.nullFields, "Name") + } + return o +} + +func (o *Signal) SetTimeout(v *int) *Signal { + if o.Timeout = v; o.Timeout == nil { + o.nullFields = append(o.nullFields, "Timeout") + } + return o +} + +// endregion + +// region Capacity + +func (o Capacity) MarshalJSON() ([]byte, error) { + type noMethod Capacity + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Capacity) SetMinimum(v *int) *Capacity { + if o.Minimum = v; o.Minimum == nil { + o.nullFields = append(o.nullFields, "Minimum") + } + return o +} + +func (o *Capacity) SetMaximum(v *int) *Capacity { + if o.Maximum = v; o.Maximum == nil { + o.nullFields = append(o.nullFields, "Maximum") + } + return o +} + +func (o *Capacity) SetTarget(v *int) *Capacity { + if o.Target = v; o.Target == nil { + o.nullFields = append(o.nullFields, "Target") + } + return o +} + +func (o *Capacity) SetUnit(v *string) *Capacity { + if o.Unit = v; o.Unit == nil { + o.nullFields = append(o.nullFields, "Unit") + } + return o +} + +// endregion + +// region Compute + +func (o Compute) MarshalJSON() ([]byte, error) { + type noMethod Compute + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Compute) SetProduct(v *string) *Compute { + if o.Product = v; o.Product == nil { + o.nullFields = append(o.nullFields, "Product") + } + + return o +} + +func (o *Compute) SetPrivateIPs(v []string) *Compute { + if o.PrivateIPs = v; o.PrivateIPs == nil { + o.nullFields = append(o.nullFields, "PrivateIPs") + } + + return o +} + +func (o *Compute) SetInstanceTypes(v *InstanceTypes) *Compute { + if o.InstanceTypes = v; o.InstanceTypes == nil { + o.nullFields = append(o.nullFields, "InstanceTypes") + } + return o +} + +func (o *Compute) SetLaunchSpecification(v *LaunchSpecification) *Compute { + if o.LaunchSpecification = v; o.LaunchSpecification == nil { + o.nullFields = append(o.nullFields, "LaunchSpecification") + } + return o +} + +func (o *Compute) SetAvailabilityZones(v []*AvailabilityZone) *Compute { + if o.AvailabilityZones = v; o.AvailabilityZones == nil { + o.nullFields = append(o.nullFields, "AvailabilityZones") + } + return o +} + +func (o *Compute) SetPreferredAvailabilityZones(v []string) *Compute { + if o.PreferredAvailabilityZones = v; o.PreferredAvailabilityZones == nil { + o.nullFields = append(o.nullFields, "PreferredAvailabilityZones") + } + return o +} + +func (o *Compute) SetElasticIPs(v []string) *Compute { + if o.ElasticIPs = v; o.ElasticIPs == nil { + o.nullFields = append(o.nullFields, "ElasticIPs") + } + return o +} + +func (o *Compute) SetEBSVolumePool(v []*EBSVolume) *Compute { + if o.EBSVolumePool = v; o.EBSVolumePool == nil { + o.nullFields = append(o.nullFields, "EBSVolumePool") + } + return o +} + +func (o *Compute) SetSubnetIDs(v []string) *Compute { + if o.SubnetIDs = v; o.SubnetIDs == nil { + o.nullFields = append(o.nullFields, "SubnetIDs") + } + return o +} + +// endregion + +// region EBSVolume + +func (o EBSVolume) MarshalJSON() ([]byte, error) { + type noMethod EBSVolume + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *EBSVolume) SetDeviceName(v *string) *EBSVolume { + if o.DeviceName = v; o.DeviceName == nil { + o.nullFields = append(o.nullFields, "DeviceName") + } + return o +} + +func (o *EBSVolume) SetVolumeIDs(v []string) *EBSVolume { + if o.VolumeIDs = v; o.VolumeIDs == nil { + o.nullFields = append(o.nullFields, "VolumeIDs") + } + return o +} + +// endregion + +// region InstanceTypes + +func (o InstanceTypes) MarshalJSON() ([]byte, error) { + type noMethod InstanceTypes + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *InstanceTypes) SetOnDemand(v *string) *InstanceTypes { + if o.OnDemand = v; o.OnDemand == nil { + o.nullFields = append(o.nullFields, "OnDemand") + } + return o +} + +func (o *InstanceTypes) SetSpot(v []string) *InstanceTypes { + if o.Spot = v; o.Spot == nil { + o.nullFields = append(o.nullFields, "Spot") + } + return o +} + +func (o *InstanceTypes) SetPreferredSpot(v []string) *InstanceTypes { + if o.PreferredSpot = v; o.PreferredSpot == nil { + o.nullFields = append(o.nullFields, "PreferredSpot") + } + return o +} + +func (o *InstanceTypes) SetWeights(v []*InstanceTypeWeight) *InstanceTypes { + if o.Weights = v; o.Weights == nil { + o.nullFields = append(o.nullFields, "Weights") + } + return o +} + +func (o *InstanceTypes) SetOnDemandTypes(v []string) *InstanceTypes { + if o.OnDemandTypes = v; o.OnDemandTypes == nil { + o.nullFields = append(o.nullFields, "OnDemandTypes") + } + return o +} + +func (o *InstanceTypes) SetResourceRequirements(v *ResourceRequirements) *InstanceTypes { + if o.ResourceRequirements = v; o.ResourceRequirements == nil { + o.nullFields = append(o.nullFields, "ResourceRequirements") + } + return o +} + +// endregion + +// region InstanceTypeWeight + +func (o InstanceTypeWeight) MarshalJSON() ([]byte, error) { + type noMethod InstanceTypeWeight + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *InstanceTypeWeight) SetInstanceType(v *string) *InstanceTypeWeight { + if o.InstanceType = v; o.InstanceType == nil { + o.nullFields = append(o.nullFields, "InstanceType") + } + return o +} + +func (o *InstanceTypeWeight) SetWeight(v *int) *InstanceTypeWeight { + if o.Weight = v; o.Weight == nil { + o.nullFields = append(o.nullFields, "Weight") + } + return o +} + +// endregion + +func (o ResourceRequirements) MarshalJSON() ([]byte, error) { + type noMethod ResourceRequirements + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *ResourceRequirements) SetExcludedInstanceFamilies(v []string) *ResourceRequirements { + if o.ExcludedInstanceFamilies = v; o.ExcludedInstanceFamilies == nil { + o.nullFields = append(o.nullFields, "ExcludedInstanceFamilies") + } + return o +} + +func (o *ResourceRequirements) SetExcludedInstanceGenerations(v []string) *ResourceRequirements { + if o.ExcludedInstanceGenerations = v; o.ExcludedInstanceGenerations == nil { + o.nullFields = append(o.nullFields, "ExcludedInstanceGenerations") + } + return o +} + +func (o *ResourceRequirements) SetExcludedInstanceTypes(v []string) *ResourceRequirements { + if o.ExcludedInstanceTypes = v; o.ExcludedInstanceTypes == nil { + o.nullFields = append(o.nullFields, "ExcludedInstanceTypes") + } + return o +} + +func (o *ResourceRequirements) SetRequiredGpu(v *RequiredGpu) *ResourceRequirements { + if o.RequiredGpu = v; o.RequiredGpu == nil { + o.nullFields = append(o.nullFields, "RequiredGpu") + } + return o +} + +func (o *ResourceRequirements) SetRequiredVCpu(v *RequiredVCpu) *ResourceRequirements { + if o.RequiredVCpu = v; o.RequiredVCpu == nil { + o.nullFields = append(o.nullFields, "RequiredVCpu") + } + return o +} + +func (o *ResourceRequirements) SetRequiredMemory(v *RequiredMemory) *ResourceRequirements { + if o.RequiredMemory = v; o.RequiredMemory == nil { + o.nullFields = append(o.nullFields, "RequiredMemory") + } + return o +} + +func (o RequiredGpu) MarshalJSON() ([]byte, error) { + type noMethod RequiredGpu + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *RequiredGpu) SetMaximum(v *int) *RequiredGpu { + if o.Maximum = v; o.Maximum == nil { + o.nullFields = append(o.nullFields, "Maximum") + } + return o +} + +func (o *RequiredGpu) SetMinimum(v *int) *RequiredGpu { + if o.Minimum = v; o.Minimum == nil { + o.nullFields = append(o.nullFields, "Minimum") + } + return o +} + +func (o RequiredMemory) MarshalJSON() ([]byte, error) { + type noMethod RequiredMemory + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *RequiredMemory) SetMaximum(v *int) *RequiredMemory { + if o.Maximum = v; o.Maximum == nil { + o.nullFields = append(o.nullFields, "Maximum") + } + return o +} + +func (o *RequiredMemory) SetMinimum(v *int) *RequiredMemory { + if o.Minimum = v; o.Minimum == nil { + o.nullFields = append(o.nullFields, "Minimum") + } + return o +} + +func (o RequiredVCpu) MarshalJSON() ([]byte, error) { + type noMethod RequiredVCpu + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *RequiredVCpu) SetMaximum(v *int) *RequiredVCpu { + if o.Maximum = v; o.Maximum == nil { + o.nullFields = append(o.nullFields, "Maximum") + } + return o +} + +func (o *RequiredVCpu) SetMinimum(v *int) *RequiredVCpu { + if o.Minimum = v; o.Minimum == nil { + o.nullFields = append(o.nullFields, "Minimum") + } + return o +} + +// region AvailabilityZone + +func (o AvailabilityZone) MarshalJSON() ([]byte, error) { + type noMethod AvailabilityZone + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *AvailabilityZone) SetName(v *string) *AvailabilityZone { + if o.Name = v; o.Name == nil { + o.nullFields = append(o.nullFields, "Name") + } + return o +} + +func (o *AvailabilityZone) SetSubnetId(v *string) *AvailabilityZone { + if o.SubnetID = v; o.SubnetID == nil { + o.nullFields = append(o.nullFields, "SubnetID") + } + return o +} + +func (o *AvailabilityZone) SetPlacementGroupName(v *string) *AvailabilityZone { + if o.PlacementGroupName = v; o.PlacementGroupName == nil { + o.nullFields = append(o.nullFields, "PlacementGroupName") + } + return o +} + +// endregion + +// region LaunchSpecification + +func (o LaunchSpecification) MarshalJSON() ([]byte, error) { + type noMethod LaunchSpecification + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *LaunchSpecification) SetLoadBalancerNames(v []string) *LaunchSpecification { + if o.LoadBalancerNames = v; o.LoadBalancerNames == nil { + o.nullFields = append(o.nullFields, "LoadBalancerNames") + } + return o +} + +func (o *LaunchSpecification) SetLoadBalancersConfig(v *LoadBalancersConfig) *LaunchSpecification { + if o.LoadBalancersConfig = v; o.LoadBalancersConfig == nil { + o.nullFields = append(o.nullFields, "LoadBalancersConfig") + } + return o +} + +func (o *LaunchSpecification) SetSecurityGroupIDs(v []string) *LaunchSpecification { + if o.SecurityGroupIDs = v; o.SecurityGroupIDs == nil { + o.nullFields = append(o.nullFields, "SecurityGroupIDs") + } + return o +} + +func (o *LaunchSpecification) SetHealthCheckType(v *string) *LaunchSpecification { + if o.HealthCheckType = v; o.HealthCheckType == nil { + o.nullFields = append(o.nullFields, "HealthCheckType") + } + return o +} + +func (o *LaunchSpecification) SetHealthCheckGracePeriod(v *int) *LaunchSpecification { + if o.HealthCheckGracePeriod = v; o.HealthCheckGracePeriod == nil { + o.nullFields = append(o.nullFields, "HealthCheckGracePeriod") + } + return o +} + +func (o *LaunchSpecification) SetHealthCheckUnhealthyDurationBeforeReplacement(v *int) *LaunchSpecification { + if o.HealthCheckUnhealthyDurationBeforeReplacement = v; o.HealthCheckUnhealthyDurationBeforeReplacement == nil { + o.nullFields = append(o.nullFields, "HealthCheckUnhealthyDurationBeforeReplacement") + } + return o +} + +func (o *LaunchSpecification) SetImages(v []*Image) *LaunchSpecification { + if o.Images = v; o.Images == nil { + o.nullFields = append(o.nullFields, "Images") + } + return o +} + +func (o *LaunchSpecification) SetImageId(v *string) *LaunchSpecification { + if o.ImageID = v; o.ImageID == nil { + o.nullFields = append(o.nullFields, "ImageID") + + } + return o +} + +func (o *LaunchSpecification) SetKeyPair(v *string) *LaunchSpecification { + if o.KeyPair = v; o.KeyPair == nil { + o.nullFields = append(o.nullFields, "KeyPair") + } + return o +} + +func (o *LaunchSpecification) SetUserData(v *string) *LaunchSpecification { + if o.UserData = v; o.UserData == nil { + o.nullFields = append(o.nullFields, "UserData") + } + return o +} + +func (o *LaunchSpecification) SetShutdownScript(v *string) *LaunchSpecification { + if o.ShutdownScript = v; o.ShutdownScript == nil { + o.nullFields = append(o.nullFields, "ShutdownScript") + } + return o +} + +func (o *LaunchSpecification) SetTenancy(v *string) *LaunchSpecification { + if o.Tenancy = v; o.Tenancy == nil { + o.nullFields = append(o.nullFields, "Tenancy") + } + return o +} + +func (o *LaunchSpecification) SetMonitoring(v *bool) *LaunchSpecification { + if o.Monitoring = v; o.Monitoring == nil { + o.nullFields = append(o.nullFields, "Monitoring") + } + return o +} + +func (o *LaunchSpecification) SetEBSOptimized(v *bool) *LaunchSpecification { + if o.EBSOptimized = v; o.EBSOptimized == nil { + o.nullFields = append(o.nullFields, "EBSOptimized") + } + return o +} + +func (o *LaunchSpecification) SetIAMInstanceProfile(v *IAMInstanceProfile) *LaunchSpecification { + if o.IAMInstanceProfile = v; o.IAMInstanceProfile == nil { + o.nullFields = append(o.nullFields, "IAMInstanceProfile") + } + return o +} + +func (o *LaunchSpecification) SetCreditSpecification(v *CreditSpecification) *LaunchSpecification { + if o.CreditSpecification = v; o.CreditSpecification == nil { + o.nullFields = append(o.nullFields, "CreditSpecification") + } + return o +} + +func (o *LaunchSpecification) SetBlockDeviceMappings(v []*BlockDeviceMapping) *LaunchSpecification { + if o.BlockDeviceMappings = v; o.BlockDeviceMappings == nil { + o.nullFields = append(o.nullFields, "BlockDeviceMappings") + } + return o +} + +func (o *LaunchSpecification) SetNetworkInterfaces(v []*NetworkInterface) *LaunchSpecification { + if o.NetworkInterfaces = v; o.NetworkInterfaces == nil { + o.nullFields = append(o.nullFields, "NetworkInterfaces") + } + return o +} + +func (o *LaunchSpecification) SetTags(v []*Tag) *LaunchSpecification { + if o.Tags = v; o.Tags == nil { + o.nullFields = append(o.nullFields, "Tags") + } + return o +} + +func (o *LaunchSpecification) SetMetadataOptions(v *MetadataOptions) *LaunchSpecification { + if o.MetadataOptions = v; o.MetadataOptions == nil { + o.nullFields = append(o.nullFields, "MetadataOptions") + } + return o +} + +func (o *LaunchSpecification) SetCPUOptions(v *CPUOptions) *LaunchSpecification { + if o.CPUOptions = v; o.CPUOptions == nil { + o.nullFields = append(o.nullFields, "CPUOptions") + } + return o +} + +func (o *LaunchSpecification) SetResourceTagSpecification(v *ResourceTagSpecification) *LaunchSpecification { + if o.ResourceTagSpecification = v; o.ResourceTagSpecification == nil { + o.nullFields = append(o.nullFields, "ResourceTagSpecification") + } + return o +} + +func (o *LaunchSpecification) SetITF(v *ITF) *LaunchSpecification { + if o.ITF = v; o.ITF == nil { + o.nullFields = append(o.nullFields, "ITF") + } + return o +} + +// endregion + +// region Matcher + +func (o Matcher) MarshalJSON() ([]byte, error) { + type noMethod Matcher + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Matcher) SetHTTPCode(v *string) *Matcher { + if o.HTTPCode = v; o.HTTPCode == nil { + o.nullFields = append(o.nullFields, "HTTPCode") + } + return o +} + +func (o *Matcher) SetGRPCCode(v *string) *Matcher { + if o.GRPCCode = v; o.GRPCCode == nil { + o.nullFields = append(o.nullFields, "GRPCCode") + } + return o +} + +// endregion + +// region ITF + +func (o ITF) MarshalJSON() ([]byte, error) { + type noMethod ITF + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *ITF) SetLoadBalancers(v []*ITFLoadBalancer) *ITF { + if o.LoadBalancers = v; o.LoadBalancers == nil { + o.nullFields = append(o.nullFields, "LoadBalancers") + } + return o +} + +func (o *ITF) SetMigrationHealthinessThreshold(v *int) *ITF { + if o.MigrationHealthinessThreshold = v; o.MigrationHealthinessThreshold == nil { + o.nullFields = append(o.nullFields, "MigrationHealthinessThreshold") + } + return o +} + +func (o *ITF) SetFixedTargetGroups(v *bool) *ITF { + if o.FixedTargetGroups = v; o.FixedTargetGroups == nil { + o.nullFields = append(o.nullFields, "FixedTargetGroups") + } + return o +} + +func (o *ITF) SetWeightStrategy(v *string) *ITF { + if o.WeightStrategy = v; o.WeightStrategy == nil { + o.nullFields = append(o.nullFields, "WeightStrategy") + } + return o +} + +func (o *ITF) SetTargetGroupConfig(v *TargetGroupConfig) *ITF { + if o.TargetGroupConfig = v; o.TargetGroupConfig == nil { + o.nullFields = append(o.nullFields, "TargetGroupConfig") + } + return o +} + +func (o *ITF) SetDefaultStaticTargetGroups(v []*StaticTargetGroup) *ITF { + if o.DefaultStaticTargetGroups = v; o.DefaultStaticTargetGroups == nil { + o.nullFields = append(o.nullFields, "DefaultStaticTargetGroups") + } + return o +} + +// endregion + +// region ITFLoadBalancer + +func (o ITFLoadBalancer) MarshalJSON() ([]byte, error) { + type noMethod ITFLoadBalancer + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *ITFLoadBalancer) SetListenerRules(v []*ListenerRule) *ITFLoadBalancer { + if o.ListenerRules = v; o.ListenerRules == nil { + o.nullFields = append(o.nullFields, "ListenerRules") + } + return o +} + +func (o *ITFLoadBalancer) SetLoadBalancerARN(v *string) *ITFLoadBalancer { + if o.LoadBalancerARN = v; o.LoadBalancerARN == nil { + o.nullFields = append(o.nullFields, "LoadBalancerARN") + } + return o +} + +// endregion + +// region ListenerRule + +func (o ListenerRule) MarshalJSON() ([]byte, error) { + type noMethod ListenerRule + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *ListenerRule) SetRuleARN(v *string) *ListenerRule { + if o.RuleARN = v; o.RuleARN == nil { + o.nullFields = append(o.nullFields, "RuleARN") + } + return o +} + +func (o *ListenerRule) SetStaticTargetGroups(v []*StaticTargetGroup) *ListenerRule { + if o.StaticTargetGroups = v; o.StaticTargetGroups == nil { + o.nullFields = append(o.nullFields, "StaticTargetGroups") + } + return o +} + +// endregion + +// region StaticTargetGroup + +func (o StaticTargetGroup) MarshalJSON() ([]byte, error) { + type noMethod StaticTargetGroup + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *StaticTargetGroup) SetStaticTargetGroupARN(v *string) *StaticTargetGroup { + if o.StaticTargetGroupARN = v; o.StaticTargetGroupARN == nil { + o.nullFields = append(o.nullFields, "StaticTargetGroupARN") + } + return o +} + +func (o *StaticTargetGroup) SetPercentage(v *float64) *StaticTargetGroup { + if o.Percentage = v; o.Percentage == nil { + o.nullFields = append(o.nullFields, "Percentage") + } + return o +} + +// region TargetGroupConfig + +func (o TargetGroupConfig) MarshalJSON() ([]byte, error) { + type noMethod TargetGroupConfig + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *TargetGroupConfig) SetVPCId(v *string) *TargetGroupConfig { + if o.VPCID = v; o.VPCID == nil { + o.nullFields = append(o.nullFields, "VPCID") + } + return o +} + +func (o *TargetGroupConfig) SetHealthCheckIntervalSeconds(v *int) *TargetGroupConfig { + if o.HealthCheckIntervalSeconds = v; o.HealthCheckIntervalSeconds == nil { + o.nullFields = append(o.nullFields, "HealthCheckIntervalSeconds") + } + return o +} + +func (o *TargetGroupConfig) SetHealthCheckPath(v *string) *TargetGroupConfig { + if o.HealthCheckPath = v; o.HealthCheckPath == nil { + o.nullFields = append(o.nullFields, "HealthCheckPath") + } + return o +} + +func (o *TargetGroupConfig) SetHealthCheckPort(v *string) *TargetGroupConfig { + if o.HealthCheckPort = v; o.HealthCheckPort == nil { + o.nullFields = append(o.nullFields, "HealthCheckPort") + } + return o +} + +func (o *TargetGroupConfig) SetHealthCheckProtocol(v *string) *TargetGroupConfig { + if o.HealthCheckProtocol = v; o.HealthCheckProtocol == nil { + o.nullFields = append(o.nullFields, "HealthCheckProtocol") + } + return o +} + +func (o *TargetGroupConfig) SetHealthyThresholdCount(v *int) *TargetGroupConfig { + if o.HealthyThresholdCount = v; o.HealthyThresholdCount == nil { + o.nullFields = append(o.nullFields, "HealthyThresholdCount") + } + return o +} + +func (o *TargetGroupConfig) SetUnhealthyThresholdCount(v *int) *TargetGroupConfig { + if o.UnhealthyThresholdCount = v; o.UnhealthyThresholdCount == nil { + o.nullFields = append(o.nullFields, "UnhealthyThresholdCount") + } + return o +} + +func (o *TargetGroupConfig) SetHealthCheckTimeoutSeconds(v *int) *TargetGroupConfig { + if o.HealthCheckTimeoutSeconds = v; o.HealthCheckTimeoutSeconds == nil { + o.nullFields = append(o.nullFields, "HealthCheckTimeoutSeconds") + } + return o +} + +func (o *TargetGroupConfig) SetPort(v *int) *TargetGroupConfig { + if o.Port = v; o.Port == nil { + o.nullFields = append(o.nullFields, "Port") + } + return o +} + +func (o *TargetGroupConfig) SetProtocol(v *string) *TargetGroupConfig { + if o.Protocol = v; o.Protocol == nil { + o.nullFields = append(o.nullFields, "Protocol") + } + return o +} + +func (o *TargetGroupConfig) SetProtocolVersion(v *string) *TargetGroupConfig { + if o.ProtocolVersion = v; o.ProtocolVersion == nil { + o.nullFields = append(o.nullFields, "ProtocolVersion") + } + return o +} + +func (o *TargetGroupConfig) SetMatcher(v *Matcher) *TargetGroupConfig { + if o.Matcher = v; o.Matcher == nil { + o.nullFields = append(o.nullFields, "Matcher") + } + return o +} + +func (o *TargetGroupConfig) SetTags(v []*Tag) *TargetGroupConfig { + if o.Tags = v; o.Tags == nil { + o.nullFields = append(o.nullFields, "Tags") + } + return o +} + +// endregion + +// region Image + +func (o Image) MarshalJSON() ([]byte, error) { + type noMethod Image + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Image) SetId(v *string) *Image { + if o.Id = v; o.Id == nil { + o.nullFields = append(o.nullFields, "Id") + } + return o +} + +// endregion + +// region LoadBalancersConfig + +func (o LoadBalancersConfig) MarshalJSON() ([]byte, error) { + type noMethod LoadBalancersConfig + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *LoadBalancersConfig) SetLoadBalancers(v []*LoadBalancer) *LoadBalancersConfig { + if o.LoadBalancers = v; o.LoadBalancers == nil { + o.nullFields = append(o.nullFields, "LoadBalancers") + } + return o +} + +// endregion + +// region LoadBalancer + +func (o LoadBalancer) MarshalJSON() ([]byte, error) { + type noMethod LoadBalancer + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *LoadBalancer) SetName(v *string) *LoadBalancer { + if o.Name = v; o.Name == nil { + o.nullFields = append(o.nullFields, "Name") + } + return o +} + +func (o *LoadBalancer) SetArn(v *string) *LoadBalancer { + if o.Arn = v; o.Arn == nil { + o.nullFields = append(o.nullFields, "Arn") + } + return o +} + +func (o *LoadBalancer) SetType(v *string) *LoadBalancer { + if o.Type = v; o.Type == nil { + o.nullFields = append(o.nullFields, "Type") + } + return o +} + +// endregion + +// region NetworkInterface + +func (o NetworkInterface) MarshalJSON() ([]byte, error) { + type noMethod NetworkInterface + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *NetworkInterface) SetId(v *string) *NetworkInterface { + if o.ID = v; o.ID == nil { + o.nullFields = append(o.nullFields, "ID") + } + return o +} + +func (o *NetworkInterface) SetDescription(v *string) *NetworkInterface { + if o.Description = v; o.Description == nil { + o.nullFields = append(o.nullFields, "Description") + } + return o +} + +func (o *NetworkInterface) SetDeviceIndex(v *int) *NetworkInterface { + if o.DeviceIndex = v; o.DeviceIndex == nil { + o.nullFields = append(o.nullFields, "DeviceIndex") + } + return o +} + +func (o *NetworkInterface) SetSecondaryPrivateIPAddressCount(v *int) *NetworkInterface { + if o.SecondaryPrivateIPAddressCount = v; o.SecondaryPrivateIPAddressCount == nil { + o.nullFields = append(o.nullFields, "SecondaryPrivateIPAddressCount") + } + return o +} + +func (o *NetworkInterface) SetAssociatePublicIPAddress(v *bool) *NetworkInterface { + if o.AssociatePublicIPAddress = v; o.AssociatePublicIPAddress == nil { + o.nullFields = append(o.nullFields, "AssociatePublicIPAddress") + } + return o +} + +func (o *NetworkInterface) SetAssociateIPV6Address(v *bool) *NetworkInterface { + if o.AssociateIPV6Address = v; o.AssociateIPV6Address == nil { + o.nullFields = append(o.nullFields, "AssociateIPV6Address") + } + return o +} + +func (o *NetworkInterface) SetDeleteOnTermination(v *bool) *NetworkInterface { + if o.DeleteOnTermination = v; o.DeleteOnTermination == nil { + o.nullFields = append(o.nullFields, "DeleteOnTermination") + } + return o +} + +func (o *NetworkInterface) SetSecurityGroupsIDs(v []string) *NetworkInterface { + if o.SecurityGroupsIDs = v; o.SecurityGroupsIDs == nil { + o.nullFields = append(o.nullFields, "SecurityGroupsIDs") + } + return o +} + +func (o *NetworkInterface) SetPrivateIPAddress(v *string) *NetworkInterface { + if o.PrivateIPAddress = v; o.PrivateIPAddress == nil { + o.nullFields = append(o.nullFields, "PrivateIPAddress") + } + return o +} + +func (o *NetworkInterface) SetSubnetId(v *string) *NetworkInterface { + if o.SubnetID = v; o.SubnetID == nil { + o.nullFields = append(o.nullFields, "SubnetID") + } + return o +} + +// endregion + +// region BlockDeviceMapping + +func (o BlockDeviceMapping) MarshalJSON() ([]byte, error) { + type noMethod BlockDeviceMapping + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *BlockDeviceMapping) SetDeviceName(v *string) *BlockDeviceMapping { + if o.DeviceName = v; o.DeviceName == nil { + o.nullFields = append(o.nullFields, "DeviceName") + } + return o +} + +func (o *BlockDeviceMapping) SetVirtualName(v *string) *BlockDeviceMapping { + if o.VirtualName = v; o.VirtualName == nil { + o.nullFields = append(o.nullFields, "VirtualName") + } + return o +} + +func (o *BlockDeviceMapping) SetEBS(v *EBS) *BlockDeviceMapping { + if o.EBS = v; o.EBS == nil { + o.nullFields = append(o.nullFields, "EBS") + } + return o +} + +// endregion + +// region EBS + +func (o EBS) MarshalJSON() ([]byte, error) { + type noMethod EBS + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *EBS) SetDeleteOnTermination(v *bool) *EBS { + if o.DeleteOnTermination = v; o.DeleteOnTermination == nil { + o.nullFields = append(o.nullFields, "DeleteOnTermination") + } + return o +} + +func (o *EBS) SetEncrypted(v *bool) *EBS { + if o.Encrypted = v; o.Encrypted == nil { + o.nullFields = append(o.nullFields, "Encrypted") + } + return o +} + +func (o *EBS) SetKmsKeyId(v *string) *EBS { + if o.KmsKeyId = v; o.KmsKeyId == nil { + o.nullFields = append(o.nullFields, "KmsKeyId") + } + return o +} + +func (o *EBS) SetSnapshotId(v *string) *EBS { + if o.SnapshotID = v; o.SnapshotID == nil { + o.nullFields = append(o.nullFields, "SnapshotID") + } + return o +} + +func (o *EBS) SetVolumeType(v *string) *EBS { + if o.VolumeType = v; o.VolumeType == nil { + o.nullFields = append(o.nullFields, "VolumeType") + } + return o +} + +func (o *EBS) SetVolumeSize(v *int) *EBS { + if o.VolumeSize = v; o.VolumeSize == nil { + o.nullFields = append(o.nullFields, "VolumeSize") + } + return o +} + +func (o *EBS) SetIOPS(v *int) *EBS { + if o.IOPS = v; o.IOPS == nil { + o.nullFields = append(o.nullFields, "IOPS") + } + return o +} + +func (o *EBS) SetThroughput(v *int) *EBS { + if o.Throughput = v; o.Throughput == nil { + o.nullFields = append(o.nullFields, "Throughput") + } + return o +} + +// endregion + +// region IAMInstanceProfile + +func (o IAMInstanceProfile) MarshalJSON() ([]byte, error) { + type noMethod IAMInstanceProfile + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *IAMInstanceProfile) SetName(v *string) *IAMInstanceProfile { + if o.Name = v; o.Name == nil { + o.nullFields = append(o.nullFields, "Name") + } + return o +} + +func (o *IAMInstanceProfile) SetArn(v *string) *IAMInstanceProfile { + if o.Arn = v; o.Arn == nil { + o.nullFields = append(o.nullFields, "Arn") + } + return o +} + +// endregion + +// region CreditSpecification + +func (o CreditSpecification) MarshalJSON() ([]byte, error) { + type noMethod CreditSpecification + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *CreditSpecification) SetCPUCredits(v *string) *CreditSpecification { + if o.CPUCredits = v; o.CPUCredits == nil { + o.nullFields = append(o.nullFields, "CPUCredits") + } + return o +} + +// endregion + +// region RollStrategy + +func (o RollStrategy) MarshalJSON() ([]byte, error) { + type noMethod RollStrategy + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *RollStrategy) SetAction(v *string) *RollStrategy { + if o.Action = v; o.Action == nil { + o.nullFields = append(o.nullFields, "Action") + } + return o +} + +func (o *RollStrategy) SetShouldDrainInstances(v *bool) *RollStrategy { + if o.ShouldDrainInstances = v; o.ShouldDrainInstances == nil { + o.nullFields = append(o.nullFields, "ShouldDrainInstances") + } + return o +} + +func (o *RollStrategy) SetOnFailure(v *OnFailure) *RollStrategy { + if o.OnFailure = v; o.OnFailure == nil { + o.nullFields = append(o.nullFields, "OnFailure") + } + return o +} + +// endregion + +// region RollStrategy + +func (o OnFailure) MarshalJSON() ([]byte, error) { + type noMethod OnFailure + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *OnFailure) SetActionType(v *string) *OnFailure { + if o.ActionType = v; o.ActionType == nil { + o.nullFields = append(o.nullFields, "ActionType") + } + return o +} + +func (o *OnFailure) SetShouldHandleAllBatches(v *bool) *OnFailure { + if o.ShouldHandleAllBatches = v; o.ShouldHandleAllBatches == nil { + o.nullFields = append(o.nullFields, "ShouldHandleAllBatches") + } + return o +} + +func (o *OnFailure) SetBatchNum(v *int) *OnFailure { + if o.BatchNum = v; o.BatchNum == nil { + o.nullFields = append(o.nullFields, "BatchNum") + } + return o +} + +func (o *OnFailure) SetDrainingTimeout(v *int) *OnFailure { + if o.DrainingTimeout = v; o.DrainingTimeout == nil { + o.nullFields = append(o.nullFields, "DrainingTimeout") + } + return o +} + +func (o *OnFailure) SetShouldDecrementTargetCapacity(v *bool) *OnFailure { + if o.ShouldDecrementTargetCapacity = v; o.ShouldDecrementTargetCapacity == nil { + o.nullFields = append(o.nullFields, "ShouldDecrementTargetCapacity") + } + return o +} + +// endregion + +// region CodeDeployIntegration + +func (o CodeDeployIntegration) MarshalJSON() ([]byte, error) { + type noMethod CodeDeployIntegration + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *CodeDeployIntegration) SetDeploymentGroups(v []*DeploymentGroup) *CodeDeployIntegration { + if o.DeploymentGroups = v; o.DeploymentGroups == nil { + o.nullFields = append(o.nullFields, "DeploymentGroups") + } + return o +} + +func (o *CodeDeployIntegration) SetCleanUpOnFailure(v *bool) *CodeDeployIntegration { + if o.CleanUpOnFailure = v; o.CleanUpOnFailure == nil { + o.nullFields = append(o.nullFields, "CleanUpOnFailure") + } + return o +} + +func (o *CodeDeployIntegration) SetTerminateInstanceOnFailure(v *bool) *CodeDeployIntegration { + if o.TerminateInstanceOnFailure = v; o.TerminateInstanceOnFailure == nil { + o.nullFields = append(o.nullFields, "TerminateInstanceOnFailure") + } + return o +} + +// endregion + +// region DeploymentGroup + +func (o DeploymentGroup) MarshalJSON() ([]byte, error) { + type noMethod DeploymentGroup + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *DeploymentGroup) SetApplicationName(v *string) *DeploymentGroup { + if o.ApplicationName = v; o.ApplicationName == nil { + o.nullFields = append(o.nullFields, "ApplicationName") + } + return o +} + +func (o *DeploymentGroup) SetDeploymentGroupName(v *string) *DeploymentGroup { + if o.DeploymentGroupName = v; o.DeploymentGroupName == nil { + o.nullFields = append(o.nullFields, "DeploymentGroupName") + } + return o +} + +// endregion + +// region OpsWorksIntegration + +func (o OpsWorksIntegration) MarshalJSON() ([]byte, error) { + type noMethod OpsWorksIntegration + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *OpsWorksIntegration) SetLayerId(v *string) *OpsWorksIntegration { + if o.LayerID = v; o.LayerID == nil { + o.nullFields = append(o.nullFields, "LayerID") + } + return o +} + +func (o *OpsWorksIntegration) SetStackType(v *string) *OpsWorksIntegration { + if o.StackType = v; o.StackType == nil { + o.nullFields = append(o.nullFields, "StackType") + } + return o +} + +// endregion + +// region Scale Request + +type ScaleUpSpotItem struct { + SpotInstanceRequestID *string `json:"spotInstanceRequestId,omitempty"` + AvailabilityZone *string `json:"availabilityZone,omitempty"` + InstanceType *string `json:"instanceType,omitempty"` +} + +type ScaleUpOnDemandItem struct { + InstanceID *string `json:"instanceId,omitempty"` + AvailabilityZone *string `json:"availabilityZone,omitempty"` + InstanceType *string `json:"instanceType,omitempty"` +} + +type ScaleDownSpotItem struct { + SpotInstanceRequestID *string `json:"spotInstanceRequestId,omitempty"` +} + +type ScaleDownOnDemandItem struct { + InstanceID *string `json:"instanceId,omitempty"` +} + +type ScaleItem struct { + NewSpotRequests []*ScaleUpSpotItem `json:"newSpotRequests,omitempty"` + NewInstances []*ScaleUpOnDemandItem `json:"newInstances,omitempty"` + VictimSpotRequests []*ScaleDownSpotItem `json:"victimSpotRequests,omitempty"` + VictimInstances []*ScaleDownOnDemandItem `json:"victimInstances,omitempty"` +} + +type ScaleGroupInput struct { + GroupID *string `json:"groupId,omitempty"` + ScaleType *string `json:"type,omitempty"` + Adjustment *int `json:"adjustment,omitempty"` +} + +type ScaleGroupOutput struct { + Items []*ScaleItem `json:"items"` +} + +func scaleUpResponseFromJSON(in []byte) (*ScaleGroupOutput, error) { + var rw client.Response + if err := json.Unmarshal(in, &rw); err != nil { + return nil, err + } + + var retVal ScaleGroupOutput + retVal.Items = make([]*ScaleItem, len(rw.Response.Items)) + for i, rb := range rw.Response.Items { + b, err := scaleUpItemFromJSON(rb) + if err != nil { + return nil, err + } + retVal.Items[i] = b + } + + return &retVal, nil +} + +func scaleUpItemFromJSON(in []byte) (*ScaleItem, error) { + var rw *ScaleItem + if err := json.Unmarshal(in, &rw); err != nil { + return nil, err + } + return rw, nil +} + +func scaleFromHttpResponse(resp *http.Response) (*ScaleGroupOutput, error) { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + return scaleUpResponseFromJSON(body) +} + +func (s *ServiceOp) Scale(ctx context.Context, input *ScaleGroupInput) (*ScaleGroupOutput, error) { + path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/scale/{type}", uritemplates.Values{ + "groupId": spotinst.StringValue(input.GroupID), + "type": spotinst.StringValue(input.ScaleType), + }) + if err != nil { + return nil, err + } + + // We do not need the ID anymore so let's drop it. + input.GroupID = nil + + r := client.NewRequest(http.MethodPut, path) + + if input.Adjustment != nil { + r.Params.Set("adjustment", strconv.Itoa(*input.Adjustment)) + } + r.Obj = input + + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + output, err := scaleFromHttpResponse(resp) + if err != nil { + return nil, err + } + + return output, err +} + +// endregion + +// region SuspendProcesses + +type SuspendProcesses struct { + Suspensions []*Suspension `json:"suspensions,omitempty"` + Processes []string `json:"processes,omitempty"` +} + +type Suspension struct { + Name *string `json:"name,omitempty"` + TTLInMinutes *int `json:"ttlInMinutes,omitempty"` + + // Read-only fields. + ExpiresAt *time.Time `json:"expiresAt,omitempty"` + + forceSendFields []string + nullFields []string +} + +type CreateSuspensionsInput struct { + GroupID *string `json:"groupId,omitempty"` + Suspensions []*Suspension `json:"suspensions,omitempty"` +} + +type CreateSuspensionsOutput struct { + SuspendProcesses *SuspendProcesses `json:"suspendProcesses,omitempty"` +} + +type ListSuspensionsInput struct { + GroupID *string `json:"groupId,omitempty"` +} + +type ListSuspensionsOutput struct { + SuspendProcesses *SuspendProcesses `json:"suspendProcesses,omitempty"` +} + +type DeleteSuspensionsInput struct { + GroupID *string `json:"groupId,omitempty"` + Processes []string `json:"processes,omitempty"` +} + +type DeleteSuspensionsOutput struct{} + +func suspendProcessesFromHttpResponse(resp *http.Response) ([]*SuspendProcesses, error) { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + return suspendProcessesFromJSON(body) +} + +func suspendProcessesObjFromJSON(in []byte) (*SuspendProcesses, error) { + v := new(SuspendProcesses) + if err := json.Unmarshal(in, v); err != nil { + return nil, err + } + return v, nil +} + +func suspendProcessesFromJSON(in []byte) ([]*SuspendProcesses, error) { + var rw client.Response + if err := json.Unmarshal(in, &rw); err != nil { + return nil, err + } + out := make([]*SuspendProcesses, len(rw.Response.Items)) + if len(out) == 0 { + return out, nil + } + for i, rb := range rw.Response.Items { + v, err := suspendProcessesObjFromJSON(rb) + if err != nil { + return nil, err + } + out[i] = v + } + return out, nil +} + +func (s *ServiceOp) CreateSuspensions(ctx context.Context, input *CreateSuspensionsInput) (*CreateSuspensionsOutput, error) { + path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/suspension", uritemplates.Values{ + "groupId": spotinst.StringValue(input.GroupID), + }) + if err != nil { + return nil, err + } + + // We do not need the ID anymore so let's drop it. + input.GroupID = nil + + r := client.NewRequest(http.MethodPost, path) + r.Obj = input + + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + suspendProcesses, err := suspendProcessesFromHttpResponse(resp) + if err != nil { + return nil, err + } + + output := new(CreateSuspensionsOutput) + if len(suspendProcesses) > 0 { + output.SuspendProcesses = suspendProcesses[0] + } + + return output, nil +} + +func (s *ServiceOp) ListSuspensions(ctx context.Context, input *ListSuspensionsInput) (*ListSuspensionsOutput, error) { + path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/suspension", uritemplates.Values{ + "groupId": spotinst.StringValue(input.GroupID), + }) + if err != nil { + return nil, err + } + + r := client.NewRequest(http.MethodGet, path) + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + suspendProcesses, err := suspendProcessesFromHttpResponse(resp) + if err != nil { + return nil, err + } + + output := new(ListSuspensionsOutput) + if len(suspendProcesses) > 0 { + output.SuspendProcesses = suspendProcesses[0] + } + + return output, nil +} + +func (s *ServiceOp) DeleteSuspensions(ctx context.Context, input *DeleteSuspensionsInput) (*DeleteSuspensionsOutput, error) { + path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/suspension", uritemplates.Values{ + "groupId": spotinst.StringValue(input.GroupID), + }) + if err != nil { + return nil, err + } + + // We do not need the ID anymore so let's drop it. + input.GroupID = nil + + r := client.NewRequest(http.MethodDelete, path) + r.Obj = input + + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return &DeleteSuspensionsOutput{}, nil +} + +func (o Suspension) MarshalJSON() ([]byte, error) { + type noMethod Suspension + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Suspension) SetName(v *string) *Suspension { + if o.Name = v; o.Name == nil { + o.nullFields = append(o.nullFields, "Name") + } + return o +} + +func (o *Suspension) SetTTLInMinutes(v *int) *Suspension { + if o.TTLInMinutes = v; o.TTLInMinutes == nil { + o.nullFields = append(o.nullFields, "TTLInMinutes") + } + return o +} + +// endregion + +// region MetadataOptions + +func (o MetadataOptions) MarshalJSON() ([]byte, error) { + type noMethod MetadataOptions + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *MetadataOptions) SetHTTPTokens(v *string) *MetadataOptions { + if o.HTTPTokens = v; o.HTTPTokens == nil { + o.nullFields = append(o.nullFields, "HTTPTokens") + } + return o +} + +func (o *MetadataOptions) SetHTTPPutResponseHopLimit(v *int) *MetadataOptions { + if o.HTTPPutResponseHopLimit = v; o.HTTPPutResponseHopLimit == nil { + o.nullFields = append(o.nullFields, "HTTPPutResponseHopLimit") + } + return o +} + +func (o *MetadataOptions) SetInstanceMetadataTags(v *string) *MetadataOptions { + if o.InstanceMetadataTags = v; o.InstanceMetadataTags == nil { + o.nullFields = append(o.nullFields, "InstanceMetadataTags") + } + return o +} + +// endregion + +// region CPUOptions + +func (o CPUOptions) MarshalJSON() ([]byte, error) { + type noMethod CPUOptions + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} +func (o *CPUOptions) SetThreadsPerCore(v *int) *CPUOptions { + if o.ThreadsPerCore = v; o.ThreadsPerCore == nil { + o.nullFields = append(o.nullFields, "ThreadsPerCore") + } + return o +} + +// endregion + +// region StatefulInstance + +func (o StatefulInstance) MarshalJSON() ([]byte, error) { + type noMethod StatefulInstance + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *StatefulInstance) SetStatefulInstanceID(v *string) *StatefulInstance { + if o.StatefulInstanceID = v; o.StatefulInstanceID == nil { + o.nullFields = append(o.nullFields, "StatefulInstanceID") + } + return o +} + +func (o *StatefulInstance) SetInstanceID(v *string) *StatefulInstance { + if o.InstanceID = v; o.InstanceID == nil { + o.nullFields = append(o.nullFields, "InstanceID") + } + return o +} + +func (o *StatefulInstance) SetState(v *string) *StatefulInstance { + if o.State = v; o.State == nil { + o.nullFields = append(o.nullFields, "State") + } + return o +} + +func (o *StatefulInstance) SetPrivateIP(v *string) *StatefulInstance { + if o.PrivateIP = v; o.PrivateIP == nil { + o.nullFields = append(o.nullFields, "PrivateIP") + } + return o +} + +func (o *StatefulInstance) SetImageID(v *string) *StatefulInstance { + if o.ImageID = v; o.ImageID == nil { + o.nullFields = append(o.nullFields, "ImageID") + } + return o +} + +func (o *StatefulInstance) SetDevices(v []*Device) *StatefulInstance { + if o.Devices = v; o.Devices == nil { + o.nullFields = append(o.nullFields, "Devices") + } + return o +} + +// endregion + +// region Device + +func (o Device) MarshalJSON() ([]byte, error) { + type noMethod Device + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Device) SetDeviceName(v *string) *Device { + if o.DeviceName = v; o.DeviceName == nil { + o.nullFields = append(o.nullFields, "DeviceName") + } + return o +} + +func (o *Device) SetVolumeID(v *string) *Device { + if o.VolumeID = v; o.VolumeID == nil { + o.nullFields = append(o.nullFields, "VolumeID") + } + return o +} + +func (o *Device) SetSnapshotID(v *string) *Device { + if o.SnapshotID = v; o.SnapshotID == nil { + o.nullFields = append(o.nullFields, "SnapshotID") + } + return o +} + +// endregion + +// region ResourceTagSpecification + +func (o ResourceTagSpecification) MarshalJSON() ([]byte, error) { + type noMethod ResourceTagSpecification + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *ResourceTagSpecification) SetVolumes(v *Volumes) *ResourceTagSpecification { + if o.Volumes = v; o.Volumes == nil { + o.nullFields = append(o.nullFields, "Volumes") + } + return o +} + +func (o *ResourceTagSpecification) SetSnapshots(v *Snapshots) *ResourceTagSpecification { + if o.Snapshots = v; o.Snapshots == nil { + o.nullFields = append(o.nullFields, "Snapshots") + } + return o +} + +func (o *ResourceTagSpecification) SetENIs(v *ENIs) *ResourceTagSpecification { + if o.ENIs = v; o.ENIs == nil { + o.nullFields = append(o.nullFields, "ENIs") + } + return o +} + +func (o *ResourceTagSpecification) SetAMIs(v *AMIs) *ResourceTagSpecification { + if o.AMIs = v; o.AMIs == nil { + o.nullFields = append(o.nullFields, "AMIs") + } + return o +} + +// endregion + +// region Volumes + +func (o Volumes) MarshalJSON() ([]byte, error) { + type noMethod Volumes + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Volumes) SetShouldTag(v *bool) *Volumes { + if o.ShouldTag = v; o.ShouldTag == nil { + o.nullFields = append(o.nullFields, "ShouldTag") + } + return o +} + +// endregion + +// region Snapshots + +func (o Snapshots) MarshalJSON() ([]byte, error) { + type noMethod Snapshots + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Snapshots) SetShouldTag(v *bool) *Snapshots { + if o.ShouldTag = v; o.ShouldTag == nil { + o.nullFields = append(o.nullFields, "ShouldTag") + } + return o +} + +// endregion + +// region ENIs + +func (o ENIs) MarshalJSON() ([]byte, error) { + type noMethod ENIs + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *ENIs) SetShouldTag(v *bool) *ENIs { + if o.ShouldTag = v; o.ShouldTag == nil { + o.nullFields = append(o.nullFields, "ShouldTag") + } + return o +} + +// endregion + +// region AMIs + +func (o AMIs) MarshalJSON() ([]byte, error) { + type noMethod AMIs + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *AMIs) SetShouldTag(v *bool) *AMIs { + if o.ShouldTag = v; o.ShouldTag == nil { + o.nullFields = append(o.nullFields, "ShouldTag") + } + return o +} + +// endregion + +// region Logging + +func (o Logging) MarshalJSON() ([]byte, error) { + type noMethod Logging + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Logging) SetExport(v *Export) *Logging { + if o.Export = v; o.Export == nil { + o.nullFields = append(o.nullFields, "Export") + } + return o +} + +// endregion + +// region Export + +func (o Export) MarshalJSON() ([]byte, error) { + type noMethod Export + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Export) SetS3(v *S3) *Export { + if o.S3 = v; o.S3 == nil { + o.nullFields = append(o.nullFields, "S3") + } + return o +} + +// endregion + +// region S3 + +func (o S3) MarshalJSON() ([]byte, error) { + type noMethod S3 + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *S3) SetId(v *string) *S3 { + if o.Id = v; o.Id == nil { + o.nullFields = append(o.nullFields, "Id") + } + return o +} + +// endregion diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/service.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/service.go new file mode 100644 index 000000000000..248b773821a1 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/service.go @@ -0,0 +1,65 @@ +package aws + +import ( + "context" + + "github.com/spotinst/spotinst-sdk-go/spotinst" + "github.com/spotinst/spotinst-sdk-go/spotinst/client" + "github.com/spotinst/spotinst-sdk-go/spotinst/session" +) + +// Service provides the API operation methods for making requests to endpoints +// of the Spotinst API. See this package's package overview docs for details on +// the service. +type Service interface { + List(context.Context, *ListGroupsInput) (*ListGroupsOutput, error) + Create(context.Context, *CreateGroupInput) (*CreateGroupOutput, error) + Read(context.Context, *ReadGroupInput) (*ReadGroupOutput, error) + Update(context.Context, *UpdateGroupInput) (*UpdateGroupOutput, error) + Delete(context.Context, *DeleteGroupInput) (*DeleteGroupOutput, error) + Status(context.Context, *StatusGroupInput) (*StatusGroupOutput, error) + Scale(context.Context, *ScaleGroupInput) (*ScaleGroupOutput, error) + Detach(context.Context, *DetachGroupInput) (*DetachGroupOutput, error) + + DeploymentStatus(context.Context, *DeploymentStatusInput) (*RollGroupOutput, error) + DeploymentStatusECS(context.Context, *DeploymentStatusInput) (*RollGroupOutput, error) + StopDeployment(context.Context, *StopDeploymentInput) (*StopDeploymentOutput, error) + + Roll(context.Context, *RollGroupInput) (*RollGroupOutput, error) + RollStatus(context.Context, *RollStatusInput) (*RollStatusOutput, error) + RollECS(context.Context, *RollECSGroupInput) (*RollGroupOutput, error) + + GetInstanceHealthiness(context.Context, *GetInstanceHealthinessInput) (*GetInstanceHealthinessOutput, error) + GetGroupEvents(context.Context, *GetGroupEventsInput) (*GetGroupEventsOutput, error) + + ImportBeanstalkEnv(context.Context, *ImportBeanstalkInput) (*ImportBeanstalkOutput, error) + StartBeanstalkMaintenance(context.Context, *BeanstalkMaintenanceInput) (*BeanstalkMaintenanceOutput, error) + FinishBeanstalkMaintenance(context.Context, *BeanstalkMaintenanceInput) (*BeanstalkMaintenanceOutput, error) + GetBeanstalkMaintenanceStatus(context.Context, *BeanstalkMaintenanceInput) (*string, error) + + CreateSuspensions(context.Context, *CreateSuspensionsInput) (*CreateSuspensionsOutput, error) + ListSuspensions(context.Context, *ListSuspensionsInput) (*ListSuspensionsOutput, error) + DeleteSuspensions(context.Context, *DeleteSuspensionsInput) (*DeleteSuspensionsOutput, error) + + ListStatefulInstances(context.Context, *ListStatefulInstancesInput) (*ListStatefulInstancesOutput, error) + PauseStatefulInstance(context.Context, *PauseStatefulInstanceInput) (*PauseStatefulInstanceOutput, error) + ResumeStatefulInstance(context.Context, *ResumeStatefulInstanceInput) (*ResumeStatefulInstanceOutput, error) + RecycleStatefulInstance(context.Context, *RecycleStatefulInstanceInput) (*RecycleStatefulInstanceOutput, error) + DeallocateStatefulInstance(context.Context, *DeallocateStatefulInstanceInput) (*DeallocateStatefulInstanceOutput, error) +} + +type ServiceOp struct { + Client *client.Client +} + +var _ Service = &ServiceOp{} + +func New(sess *session.Session, cfgs ...*spotinst.Config) *ServiceOp { + cfg := &spotinst.Config{} + cfg.Merge(sess.Config) + cfg.Merge(cfgs...) + + return &ServiceOp{ + Client: client.New(sess.Config), + } +} diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/tag.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/tag.go new file mode 100644 index 000000000000..c552efcb993f --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/tag.go @@ -0,0 +1,31 @@ +package aws + +import "github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil" + +type Tag struct { + Key *string `json:"tagKey,omitempty"` + Value *string `json:"tagValue,omitempty"` + + forceSendFields []string + nullFields []string +} + +func (o Tag) MarshalJSON() ([]byte, error) { + type noMethod Tag + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Tag) SetKey(v *string) *Tag { + if o.Key = v; o.Key == nil { + o.nullFields = append(o.nullFields, "Key") + } + return o +} + +func (o *Tag) SetValue(v *string) *Tag { + if o.Value = v; o.Value == nil { + o.nullFields = append(o.nullFields, "Value") + } + return o +} diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/v3/azure.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/v3/azure.go new file mode 100644 index 000000000000..8f53835ca377 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/v3/azure.go @@ -0,0 +1,997 @@ +package v3 + +import ( + "context" + "encoding/json" + "io/ioutil" + "net/http" + "time" + + "github.com/spotinst/spotinst-sdk-go/spotinst" + "github.com/spotinst/spotinst-sdk-go/spotinst/client" + "github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil" + "github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates" +) + +type Group struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + ResourceGroupName *string `json:"resourceGroupName,omitempty"` + Region *string `json:"region,omitempty"` + Capacity *Capacity `json:"capacity,omitempty"` + Compute *Compute `json:"compute,omitempty"` + Strategy *Strategy `json:"strategy,omitempty"` + + // Read-only fields. + CreatedAt *time.Time `json:"createdAt,omitempty"` + UpdatedAt *time.Time `json:"updatedAt,omitempty"` + + // forceSendFields is a list of field names (e.g. "Keys") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + forceSendFields []string + + // nullFields is a list of field names (e.g. "Keys") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + nullFields []string +} + +type Strategy struct { + OnDemandCount *int `json:"onDemandCount,omitempty"` + DrainingTimeout *int `json:"drainingTimeout,omitempty"` + SpotPercentage *int `json:"spotPercentage,omitempty"` + FallbackToOnDemand *bool `json:"fallbackToOd,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Capacity struct { + Minimum *int `json:"minimum,omitempty"` + Maximum *int `json:"maximum,omitempty"` + Target *int `json:"target,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Compute struct { + VMSizes *VMSizes `json:"vmSizes,omitempty"` + OS *string `json:"os,omitempty"` + LaunchSpecification *LaunchSpecification `json:"launchSpecification,omitempty"` + + forceSendFields []string + nullFields []string +} + +type VMSizes struct { + OnDemandSizes []string `json:"odSizes,omitempty"` + SpotSizes []string `json:"spotSizes,omitempty"` + + forceSendFields []string + nullFields []string +} + +type LaunchSpecification struct { + Image *Image `json:"image,omitempty"` + Network *Network `json:"network,omitempty"` + Login *Login `json:"login,omitempty"` + CustomData *string `json:"customData,omitempty"` + ManagedServiceIdentities []*ManagedServiceIdentity `json:"managedServiceIdentities,omitempty"` + Tags []*Tags `json:"tags,omitempty"` + LoadBalancersConfig *LoadBalancersConfig `json:"loadBalancersConfig,omitempty"` + ShutdownScript *string `json:"shutdownScript,omitempty"` + + forceSendFields []string + nullFields []string +} + +type LoadBalancersConfig struct { + LoadBalancers []*LoadBalancer `json:"loadBalancers,omitempty"` + + forceSendFields []string + nullFields []string +} + +type LoadBalancer struct { + Type *string `json:"type,omitempty"` + ResourceGroupName *string `json:"resourceGroupName,omitempty"` + Name *string `json:"name,omitempty"` + SKU *string `json:"sku,omitempty"` + BackendPoolNames []string `json:"backendPoolNames,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Image struct { + MarketPlace *MarketPlaceImage `json:"marketplace,omitempty"` + Custom *CustomImage `json:"custom,omitempty"` + + forceSendFields []string + nullFields []string +} + +type MarketPlaceImage struct { + Publisher *string `json:"publisher,omitempty"` + Offer *string `json:"offer,omitempty"` + SKU *string `json:"sku,omitempty"` + Version *string `json:"version,omitempty"` + + forceSendFields []string + nullFields []string +} + +type CustomImage struct { + ResourceGroupName *string `json:"resourceGroupName,omitempty"` + Name *string `json:"name,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Network struct { + VirtualNetworkName *string `json:"virtualNetworkName,omitempty"` + ResourceGroupName *string `json:"resourceGroupName,omitempty"` + NetworkInterfaces []*NetworkInterface `json:"networkInterfaces,omitempty"` + + forceSendFields []string + nullFields []string +} + +type NetworkInterface struct { + SubnetName *string `json:"subnetName,omitempty"` + AssignPublicIP *bool `json:"assignPublicIp,omitempty"` + IsPrimary *bool `json:"isPrimary,omitempty"` + AdditionalIPConfigs []*AdditionalIPConfig `json:"additionalIpConfigurations,omitempty"` + ApplicationSecurityGroups []*ApplicationSecurityGroup `json:"applicationSecurityGroups,omitempty"` + + forceSendFields []string + nullFields []string +} + +type AdditionalIPConfig struct { + Name *string `json:"name,omitempty"` + PrivateIPAddressVersion *string `json:"privateIpAddressVersion,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Login struct { + UserName *string `json:"userName,omitempty"` + SSHPublicKey *string `json:"sshPublicKey,omitempty"` + Password *string `json:"password,omitempty"` + + forceSendFields []string + nullFields []string +} + +type ApplicationSecurityGroup struct { + Name *string `json:"name,omitempty"` + ResourceGroupName *string `json:"resourceGroupName,omitempty"` + + forceSendFields []string + nullFields []string +} + +type ManagedServiceIdentity struct { + ResourceGroupName *string `json:"resourceGroupName,omitempty"` + Name *string `json:"name,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Tags struct { + TagKey *string `json:"tagKey,omitempty"` + TagValue *string `json:"tagValue,omitempty"` + + forceSendFields []string + nullFields []string +} + +type CreateGroupInput struct { + Group *Group `json:"group,omitempty"` +} + +type CreateGroupOutput struct { + Group *Group `json:"group,omitempty"` +} + +type ReadGroupInput struct { + GroupID *string `json:"groupId,omitempty"` +} + +type ReadGroupOutput struct { + Group *Group `json:"group,omitempty"` +} + +type UpdateGroupInput struct { + Group *Group `json:"group,omitempty"` +} + +type UpdateGroupOutput struct { + Group *Group `json:"group,omitempty"` +} + +type DeleteGroupInput struct { + GroupID *string `json:"groupId,omitempty"` +} + +type DeleteGroupOutput struct{} + +type ListGroupsInput struct{} + +type ListGroupsOutput struct { + Groups []*Group `json:"groups,omitempty"` +} + +// region Unmarshallers + +func groupFromJSON(in []byte) (*Group, error) { + b := new(Group) + if err := json.Unmarshal(in, b); err != nil { + return nil, err + } + return b, nil +} + +func groupsFromJSON(in []byte) ([]*Group, error) { + var rw client.Response + if err := json.Unmarshal(in, &rw); err != nil { + return nil, err + } + out := make([]*Group, len(rw.Response.Items)) + if len(out) == 0 { + return out, nil + } + for i, rb := range rw.Response.Items { + b, err := groupFromJSON(rb) + if err != nil { + return nil, err + } + out[i] = b + } + return out, nil +} + +func groupsFromHttpResponse(resp *http.Response) ([]*Group, error) { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + return groupsFromJSON(body) +} + +// endregion + +// region API requests + +func (s *ServiceOp) List(ctx context.Context, input *ListGroupsInput) (*ListGroupsOutput, error) { + r := client.NewRequest(http.MethodGet, "/azure/compute/group") + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + gs, err := groupsFromHttpResponse(resp) + if err != nil { + return nil, err + } + + return &ListGroupsOutput{Groups: gs}, nil +} + +func (s *ServiceOp) Create(ctx context.Context, input *CreateGroupInput) (*CreateGroupOutput, error) { + r := client.NewRequest(http.MethodPost, "/azure/compute/group") + r.Obj = input + + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + gs, err := groupsFromHttpResponse(resp) + if err != nil { + return nil, err + } + + output := new(CreateGroupOutput) + if len(gs) > 0 { + output.Group = gs[0] + } + + return output, nil +} + +func (s *ServiceOp) Read(ctx context.Context, input *ReadGroupInput) (*ReadGroupOutput, error) { + path, err := uritemplates.Expand("/azure/compute/group/{groupId}", uritemplates.Values{ + "groupId": spotinst.StringValue(input.GroupID), + }) + if err != nil { + return nil, err + } + + r := client.NewRequest(http.MethodGet, path) + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + gs, err := groupsFromHttpResponse(resp) + if err != nil { + return nil, err + } + + output := new(ReadGroupOutput) + if len(gs) > 0 { + output.Group = gs[0] + } + + return output, nil +} + +func (s *ServiceOp) Update(ctx context.Context, input *UpdateGroupInput) (*UpdateGroupOutput, error) { + path, err := uritemplates.Expand("/azure/compute/group/{groupId}", uritemplates.Values{ + "groupId": spotinst.StringValue(input.Group.ID), + }) + if err != nil { + return nil, err + } + + // We do NOT need the ID anymore, so let's drop it. + input.Group.ID = nil + + r := client.NewRequest(http.MethodPut, path) + r.Obj = input + + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + gs, err := groupsFromHttpResponse(resp) + if err != nil { + return nil, err + } + + output := new(UpdateGroupOutput) + if len(gs) > 0 { + output.Group = gs[0] + } + + return output, nil +} + +func (s *ServiceOp) Delete(ctx context.Context, input *DeleteGroupInput) (*DeleteGroupOutput, error) { + path, err := uritemplates.Expand("/azure/compute/group/{groupId}", uritemplates.Values{ + "groupId": spotinst.StringValue(input.GroupID), + }) + if err != nil { + return nil, err + } + + r := client.NewRequest(http.MethodDelete, path) + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return &DeleteGroupOutput{}, nil +} + +// endregion + +// region Group + +func (o Group) MarshalJSON() ([]byte, error) { + type noMethod Group + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Group) SetId(v *string) *Group { + if o.ID = v; o.ID == nil { + o.nullFields = append(o.nullFields, "ID") + } + return o +} + +func (o *Group) SetName(v *string) *Group { + if o.Name = v; o.Name == nil { + o.nullFields = append(o.nullFields, "Name") + } + return o +} + +func (o *Group) SetResourceGroupName(v *string) *Group { + if o.ResourceGroupName = v; o.ResourceGroupName == nil { + o.nullFields = append(o.nullFields, "ResourceGroupName") + } + return o +} + +func (o *Group) SetCapacity(v *Capacity) *Group { + if o.Capacity = v; o.Capacity == nil { + o.nullFields = append(o.nullFields, "Capacity") + } + return o +} + +func (o *Group) SetCompute(v *Compute) *Group { + if o.Compute = v; o.Compute == nil { + o.nullFields = append(o.nullFields, "Compute") + } + return o +} + +func (o *Group) SetStrategy(v *Strategy) *Group { + if o.Strategy = v; o.Strategy == nil { + o.nullFields = append(o.nullFields, "Strategy") + } + return o +} + +func (o *Group) SetRegion(v *string) *Group { + if o.Region = v; o.Region == nil { + o.nullFields = append(o.nullFields, "Region") + } + return o +} + +// endregion + +// region Strategy + +func (o Strategy) MarshalJSON() ([]byte, error) { + type noMethod Strategy + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Strategy) SetOnDemandCount(v *int) *Strategy { + if o.OnDemandCount = v; o.OnDemandCount == nil { + o.nullFields = append(o.nullFields, "OnDemandCount") + } + return o +} + +func (o *Strategy) SetDrainingTimeout(v *int) *Strategy { + if o.DrainingTimeout = v; o.DrainingTimeout == nil { + o.nullFields = append(o.nullFields, "DrainingTimeout") + } + return o +} + +func (o *Strategy) SetSpotPercentage(v *int) *Strategy { + if o.SpotPercentage = v; o.SpotPercentage == nil { + o.nullFields = append(o.nullFields, "SpotPercentage") + } + return o +} + +func (o *Strategy) SetFallbackToOnDemand(v *bool) *Strategy { + if o.FallbackToOnDemand = v; o.FallbackToOnDemand == nil { + o.nullFields = append(o.nullFields, "FallbackToOnDemand") + } + return o +} + +// endregion + +// region Capacity + +func (o Capacity) MarshalJSON() ([]byte, error) { + type noMethod Capacity + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Capacity) SetMinimum(v *int) *Capacity { + if o.Minimum = v; o.Minimum == nil { + o.nullFields = append(o.nullFields, "Minimum") + } + return o +} + +func (o *Capacity) SetMaximum(v *int) *Capacity { + if o.Maximum = v; o.Maximum == nil { + o.nullFields = append(o.nullFields, "Maximum") + } + return o +} + +func (o *Capacity) SetTarget(v *int) *Capacity { + if o.Target = v; o.Target == nil { + o.nullFields = append(o.nullFields, "Target") + } + return o +} + +// endregion + +// region Compute + +func (o Compute) MarshalJSON() ([]byte, error) { + type noMethod Compute + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Compute) SetVMSizes(v *VMSizes) *Compute { + if o.VMSizes = v; o.VMSizes == nil { + o.nullFields = append(o.nullFields, "VMSizes") + } + return o +} + +func (o *Compute) SetOS(v *string) *Compute { + if o.OS = v; o.OS == nil { + o.nullFields = append(o.nullFields, "OS") + } + return o +} + +func (o *Compute) SetLaunchSpecification(v *LaunchSpecification) *Compute { + if o.LaunchSpecification = v; o.LaunchSpecification == nil { + o.nullFields = append(o.nullFields, "LaunchSpecification") + } + return o +} + +// endregion + +// region VMSize + +func (o VMSizes) MarshalJSON() ([]byte, error) { + type noMethod VMSizes + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *VMSizes) SetOnDemandSizes(v []string) *VMSizes { + if o.OnDemandSizes = v; o.OnDemandSizes == nil { + o.nullFields = append(o.nullFields, "OnDemandSizes") + } + return o +} + +func (o *VMSizes) SetSpotSizes(v []string) *VMSizes { + if o.SpotSizes = v; o.SpotSizes == nil { + o.nullFields = append(o.nullFields, "SpotSizes") + } + return o +} + +// endregion + +// region LaunchSpecification + +func (o LaunchSpecification) MarshalJSON() ([]byte, error) { + type noMethod LaunchSpecification + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *LaunchSpecification) SetImage(v *Image) *LaunchSpecification { + if o.Image = v; o.Image == nil { + o.nullFields = append(o.nullFields, "Image") + } + return o +} + +func (o *LaunchSpecification) SetNetwork(v *Network) *LaunchSpecification { + if o.Network = v; o.Network == nil { + o.nullFields = append(o.nullFields, "Network") + } + return o +} + +func (o *LaunchSpecification) SetLogin(v *Login) *LaunchSpecification { + if o.Login = v; o.Login == nil { + o.nullFields = append(o.nullFields, "Login") + } + return o +} + +func (o *LaunchSpecification) SetCustomData(v *string) *LaunchSpecification { + if o.CustomData = v; o.CustomData == nil { + o.nullFields = append(o.nullFields, "CustomData") + } + return o +} + +func (o *LaunchSpecification) SetManagedServiceIdentities(v []*ManagedServiceIdentity) *LaunchSpecification { + if o.ManagedServiceIdentities = v; o.ManagedServiceIdentities == nil { + o.nullFields = append(o.nullFields, "ManagedServiceIdentities") + } + return o +} + +func (o *LaunchSpecification) SetLoadBalancersConfig(v *LoadBalancersConfig) *LaunchSpecification { + if o.LoadBalancersConfig = v; o.LoadBalancersConfig == nil { + o.nullFields = append(o.nullFields, "LoadBalancersConfig") + } + return o +} + +// SetShutdownScript sets the shutdown script used when draining instances +func (o *LaunchSpecification) SetShutdownScript(v *string) *LaunchSpecification { + if o.ShutdownScript = v; o.ShutdownScript == nil { + o.nullFields = append(o.nullFields, "ShutdownScript") + } + return o +} + +func (o *LaunchSpecification) SetTags(v []*Tags) *LaunchSpecification { + if o.Tags = v; o.Tags == nil { + o.nullFields = append(o.nullFields, "Tags") + } + return o +} + +// endregion + +// region Image + +func (o Image) MarshalJSON() ([]byte, error) { + type noMethod Image + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Image) SetMarketPlaceImage(v *MarketPlaceImage) *Image { + if o.MarketPlace = v; o.MarketPlace == nil { + o.nullFields = append(o.nullFields, "MarketPlace") + } + return o +} + +func (o *Image) SetCustom(v *CustomImage) *Image { + if o.Custom = v; o.Custom == nil { + o.nullFields = append(o.nullFields, "Custom") + } + return o +} + +// endregion + +// region MarketPlaceImage + +func (o MarketPlaceImage) MarshalJSON() ([]byte, error) { + type noMethod MarketPlaceImage + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *MarketPlaceImage) SetPublisher(v *string) *MarketPlaceImage { + if o.Publisher = v; o.Publisher == nil { + o.nullFields = append(o.nullFields, "Publisher") + } + return o +} + +func (o *MarketPlaceImage) SetOffer(v *string) *MarketPlaceImage { + if o.Offer = v; o.Offer == nil { + o.nullFields = append(o.nullFields, "Offer") + } + return o +} + +func (o *MarketPlaceImage) SetSKU(v *string) *MarketPlaceImage { + if o.SKU = v; o.SKU == nil { + o.nullFields = append(o.nullFields, "SKU") + } + return o +} + +func (o *MarketPlaceImage) SetVersion(v *string) *MarketPlaceImage { + if o.Version = v; o.Version == nil { + o.nullFields = append(o.nullFields, "Version") + } + return o +} + +// endregion + +// region Tags + +func (o Tags) MarshalJSON() ([]byte, error) { + type noMethod Tags + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Tags) SetTagKey(v *string) *Tags { + if o.TagKey = v; o.TagKey == nil { + o.nullFields = append(o.nullFields, "TagKey") + } + return o +} + +func (o *Tags) SetTagValue(v *string) *Tags { + if o.TagValue = v; o.TagValue == nil { + o.nullFields = append(o.nullFields, "TagValue") + } + return o +} + +// endregion + +// region CustomImage + +func (o CustomImage) MarshalJSON() ([]byte, error) { + type noMethod CustomImage + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *CustomImage) SetResourceGroupName(v *string) *CustomImage { + if o.ResourceGroupName = v; o.ResourceGroupName == nil { + o.nullFields = append(o.nullFields, "ResourceGroupName") + } + return o +} + +func (o *CustomImage) SetName(v *string) *CustomImage { + if o.Name = v; o.Name == nil { + o.nullFields = append(o.nullFields, "Name") + } + return o +} + +// endregion + +// region Network + +func (o Network) MarshalJSON() ([]byte, error) { + type noMethod Network + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Network) SetVirtualNetworkName(v *string) *Network { + if o.VirtualNetworkName = v; o.VirtualNetworkName == nil { + o.nullFields = append(o.nullFields, "VirtualNetworkName") + } + return o +} + +func (o *Network) SetResourceGroupName(v *string) *Network { + if o.ResourceGroupName = v; o.ResourceGroupName == nil { + o.nullFields = append(o.nullFields, "ResourceGroupName") + } + return o +} + +func (o *Network) SetNetworkInterfaces(v []*NetworkInterface) *Network { + if o.NetworkInterfaces = v; o.NetworkInterfaces == nil { + o.nullFields = append(o.nullFields, "NetworkInterfaces") + } + return o +} + +// endregion + +// region NetworkInterface + +func (o NetworkInterface) MarshalJSON() ([]byte, error) { + type noMethod NetworkInterface + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *NetworkInterface) SetSubnetName(v *string) *NetworkInterface { + if o.SubnetName = v; o.SubnetName == nil { + o.nullFields = append(o.nullFields, "SubnetName") + } + return o +} + +func (o *NetworkInterface) SetAdditionalIPConfigs(v []*AdditionalIPConfig) *NetworkInterface { + if o.AdditionalIPConfigs = v; o.AdditionalIPConfigs == nil { + o.nullFields = append(o.nullFields, "AdditionalIPConfigs") + } + return o +} + +func (o *NetworkInterface) SetAssignPublicIP(v *bool) *NetworkInterface { + if o.AssignPublicIP = v; o.AssignPublicIP == nil { + o.nullFields = append(o.nullFields, "AssignPublicIP") + } + return o +} + +func (o *NetworkInterface) SetIsPrimary(v *bool) *NetworkInterface { + if o.IsPrimary = v; o.IsPrimary == nil { + o.nullFields = append(o.nullFields, "IsPrimary") + } + return o +} + +func (o *NetworkInterface) SetApplicationSecurityGroups(v []*ApplicationSecurityGroup) *NetworkInterface { + if o.ApplicationSecurityGroups = v; o.ApplicationSecurityGroups == nil { + o.nullFields = append(o.nullFields, "ApplicationSecurityGroups") + } + return o +} + +// endregion + +// region AdditionalIPConfig + +func (o AdditionalIPConfig) MarshalJSON() ([]byte, error) { + type noMethod AdditionalIPConfig + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *AdditionalIPConfig) SetName(v *string) *AdditionalIPConfig { + if o.Name = v; o.Name == nil { + o.nullFields = append(o.nullFields, "Name") + } + return o +} + +func (o *AdditionalIPConfig) SetPrivateIPAddressVersion(v *string) *AdditionalIPConfig { + if o.PrivateIPAddressVersion = v; o.PrivateIPAddressVersion == nil { + o.nullFields = append(o.nullFields, "PrivateIPAddressVersion") + } + return o +} + +// endregion + +// region Login + +func (o Login) MarshalJSON() ([]byte, error) { + type noMethod Login + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Login) SetUserName(v *string) *Login { + if o.UserName = v; o.UserName == nil { + o.nullFields = append(o.nullFields, "UserName") + } + return o +} + +func (o *Login) SetSSHPublicKey(v *string) *Login { + if o.SSHPublicKey = v; o.SSHPublicKey == nil { + o.nullFields = append(o.nullFields, "SSHPublicKey") + } + return o +} + +func (o *Login) SetPassword(v *string) *Login { + if o.Password = v; o.Password == nil { + o.nullFields = append(o.nullFields, "Password") + } + return o +} + +// endregion + +// region ApplicationSecurityGroup + +func (o ApplicationSecurityGroup) MarshalJSON() ([]byte, error) { + type noMethod ApplicationSecurityGroup + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *ApplicationSecurityGroup) SetName(v *string) *ApplicationSecurityGroup { + if o.Name = v; o.Name == nil { + o.nullFields = append(o.nullFields, "Name") + } + return o +} + +func (o *ApplicationSecurityGroup) SetResourceGroupName(v *string) *ApplicationSecurityGroup { + if o.ResourceGroupName = v; o.ResourceGroupName == nil { + o.nullFields = append(o.nullFields, "ResourceGroupName") + } + return o +} + +// endregion + +// region ManagedServiceIdentity + +func (o ManagedServiceIdentity) MarshalJSON() ([]byte, error) { + type noMethod ManagedServiceIdentity + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *ManagedServiceIdentity) SetResourceGroupName(v *string) *ManagedServiceIdentity { + if o.ResourceGroupName = v; o.ResourceGroupName == nil { + o.nullFields = append(o.nullFields, "ResourceGroupName") + } + return o +} + +func (o *ManagedServiceIdentity) SetName(v *string) *ManagedServiceIdentity { + if o.Name = v; o.Name == nil { + o.nullFields = append(o.nullFields, "Name") + } + return o +} + +// endregion + +// region LoadBalancersConfig + +func (o LoadBalancersConfig) MarshalJSON() ([]byte, error) { + type noMethod LoadBalancersConfig + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *LoadBalancersConfig) SetLoadBalancers(v []*LoadBalancer) *LoadBalancersConfig { + if o.LoadBalancers = v; o.LoadBalancers == nil { + o.nullFields = append(o.nullFields, "LoadBalancers") + } + return o +} + +// endregion + +// region LoadBalancer + +func (o LoadBalancer) MarshalJSON() ([]byte, error) { + type noMethod LoadBalancer + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *LoadBalancer) SetType(v *string) *LoadBalancer { + if o.Type = v; o.Type == nil { + o.nullFields = append(o.nullFields, "Type") + } + return o +} + +func (o *LoadBalancer) SetResourceGroupName(v *string) *LoadBalancer { + if o.ResourceGroupName = v; o.ResourceGroupName == nil { + o.nullFields = append(o.nullFields, "ResourceGroupName") + } + return o +} + +func (o *LoadBalancer) SetName(v *string) *LoadBalancer { + if o.Name = v; o.Name == nil { + o.nullFields = append(o.nullFields, "Name") + } + return o +} + +func (o *LoadBalancer) SetSKU(v *string) *LoadBalancer { + if o.SKU = v; o.SKU == nil { + o.nullFields = append(o.nullFields, "SKU") + } + return o +} + +func (o *LoadBalancer) SeBackendPoolNames(v []string) *LoadBalancer { + if o.BackendPoolNames = v; o.BackendPoolNames == nil { + o.nullFields = append(o.nullFields, "BackendPoolNames") + } + return o +} + +// endregion diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/v3/service.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/v3/service.go new file mode 100644 index 000000000000..8af9d2699699 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/v3/service.go @@ -0,0 +1,36 @@ +package v3 + +import ( + "context" + + "github.com/spotinst/spotinst-sdk-go/spotinst" + "github.com/spotinst/spotinst-sdk-go/spotinst/client" + "github.com/spotinst/spotinst-sdk-go/spotinst/session" +) + +// Service provides the API operation methods for making requests to endpoints +// of the Spotinst API. See this package's package overview docs for details on +// the service. +type Service interface { + Create(context.Context, *CreateGroupInput) (*CreateGroupOutput, error) + Read(context.Context, *ReadGroupInput) (*ReadGroupOutput, error) + Update(context.Context, *UpdateGroupInput) (*UpdateGroupOutput, error) + Delete(context.Context, *DeleteGroupInput) (*DeleteGroupOutput, error) + List(context.Context, *ListGroupsInput) (*ListGroupsOutput, error) +} + +type ServiceOp struct { + Client *client.Client +} + +var _ Service = &ServiceOp{} + +func New(sess *session.Session, cfgs ...*spotinst.Config) *ServiceOp { + cfg := &spotinst.Config{} + cfg.Merge(sess.Config) + cfg.Merge(cfgs...) + + return &ServiceOp{ + Client: client.New(sess.Config), + } +} diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gcp/gcp.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gcp/gcp.go new file mode 100644 index 000000000000..19384b6a9873 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gcp/gcp.go @@ -0,0 +1,2184 @@ +package gcp + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strconv" + "time" + + "github.com/spotinst/spotinst-sdk-go/spotinst" + "github.com/spotinst/spotinst-sdk-go/spotinst/client" + "github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil" + "github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates" +) + +// Group defines a GCP Elastigroup. +type Group struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + NodeImage *string `json:"nodeImage,omitempty"` + Capacity *Capacity `json:"capacity,omitempty"` + Compute *Compute `json:"compute,omitempty"` + Scaling *Scaling `json:"scaling,omitempty"` + Scheduling *Scheduling `json:"scheduling,omitempty"` + Strategy *Strategy `json:"strategy,omitempty"` + Integration *Integration `json:"thirdPartiesIntegration,omitempty"` + + // Read-only fields. + CreatedAt *time.Time `json:"createdAt,omitempty"` + UpdatedAt *time.Time `json:"updatedAt,omitempty"` + + // forceSendFields is a list of field names (e.g. "Keys") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + forceSendFields []string + + // nullFields is a list of field names (e.g. "Keys") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + nullFields []string +} + +// region AutoScale structs + +type AutoScale struct { + IsEnabled *bool `json:"isEnabled,omitempty"` + IsAutoConfig *bool `json:"isAutoConfig,omitempty"` + Cooldown *int `json:"cooldown,omitempty"` + Headroom *AutoScaleHeadroom `json:"headroom,omitempty"` + Down *AutoScaleDown `json:"down,omitempty"` + + forceSendFields []string + nullFields []string +} + +type AutoScaleDown struct { + EvaluationPeriods *int `json:"evaluationPeriods,omitempty"` + + forceSendFields []string + nullFields []string +} + +type AutoScaleHeadroom struct { + CPUPerUnit *int `json:"cpuPerUnit,omitempty"` + MemoryPerUnit *int `json:"memoryPerUnit,omitempty"` + NumOfUnits *int `json:"numOfUnits,omitempty"` + + forceSendFields []string + nullFields []string +} + +type AutoScaleLabel struct { + Key *string `json:"key,omitempty"` + Value *string `json:"value,omitempty"` + + forceSendFields []string + nullFields []string +} + +// endregion + +// region Capacity structs + +// Capacity defines the capacity attributes of a Group instance +type Capacity struct { + Maximum *int `json:"maximum,omitempty"` + Minimum *int `json:"minimum,omitempty"` + Target *int `json:"target,omitempty"` + + forceSendFields []string + nullFields []string +} + +// endregion + +// region Compute structs + +// AccessConfig defines the access configuration for a network. AccessConfig is an element of NetworkInterface. +type AccessConfig struct { + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + + forceSendFields []string + nullFields []string +} + +// AliasIPRange defines the alias ip range for a network. AliasIPRange is an element of NetworkInterface. +type AliasIPRange struct { + IPCIDRRange *string `json:"ipCidrRange,omitempty"` + SubnetworkRangeName *string `json:"subnetworkRangeName,omitempty"` + + forceSendFields []string + nullFields []string +} + +// BackendServiceConfig constains a list of backend service configurations. +type BackendServiceConfig struct { + BackendServices []*BackendService `json:"backendServices,omitempty"` + forceSendFields []string + nullFields []string +} + +// BackendService defines the configuration for a single backend service. +type BackendService struct { + BackendServiceName *string `json:"backendServiceName,omitempty"` + LocationType *string `json:"locationType,omitempty"` + Scheme *string `json:"scheme,omitempty"` + NamedPorts *NamedPorts `json:"namedPorts,omitempty"` + + forceSendFields []string + nullFields []string +} + +// Compute defines the compute attributes of a Group. +type Compute struct { + AvailabilityZones []string `json:"availabilityZones,omitempty"` + GPU *GPU `json:"gpu,omitempty"` + Health *Health `json:"health,omitempty"` + InstanceTypes *InstanceTypes `json:"instanceTypes,omitempty"` + LaunchSpecification *LaunchSpecification `json:"launchSpecification,omitempty"` + Subnets []*Subnet `json:"subnets,omitempty"` + + forceSendFields []string + nullFields []string +} + +// CustomInstance defines the memory and vCPU constraints of an instance +type CustomInstance struct { + VCPU *int `json:"vCPU,omitempty"` + MemoryGiB *int `json:"memoryGiB,omitempty"` + + forceSendFields []string + nullFields []string +} + +// Disk defines the a block of memory resources for the group. Stored in an array of Disks, as defined in LaunchSpecification. +type Disk struct { + AutoDelete *bool `json:"autoDelete,omitempty"` + Boot *bool `json:"boot,omitempty"` + DeviceName *string `json:"deviceName,omitempty"` + InitializeParams *InitializeParams `json:"initializeParams,omitempty"` + Interface *string `json:"interface,omitempty"` + Mode *string `json:"mode,omitempty"` + Source *string `json:"source,omitempty"` + Type *string `json:"type,omitempty"` + + forceSendFields []string + nullFields []string +} + +// GPU defines the kind and number of GPUs to use with the group. GPU is an element of Compute. +type GPU struct { + Type *string `json:"type,omitempty"` + Count *int `json:"count,omitempty"` + + forceSendFields []string + nullFields []string +} + +// Health defines the healthcheck attributes for the group. Health is an element of Compute. +type Health struct { + AutoHealing *bool `json:"autoHealing,omitempty"` + GracePeriod *int `json:"gracePeriod,omitempty"` + HealthCheckType *string `json:"healthCheckType,omitempty"` + UnhealthyDuration *int `json:"unhealthyDuration,omitempty"` + + forceSendFields []string + nullFields []string +} + +// InitializeParams defines the initialization parameters for a Disk object. +type InitializeParams struct { + DiskSizeGB *int `json:"diskSizeGb,omitempty"` + DiskType *string `json:"diskType,omitempty"` + SourceImage *string `json:"sourceImage,omitempty"` + + forceSendFields []string + nullFields []string +} + +// InstanceTypes defines the type of instances to use with the group. InstanceTypes is an element of Compute. +type InstanceTypes struct { + OnDemand *string `json:"ondemand,omitempty"` + Preemptible []string `json:"preemptible,omitempty"` + Custom []*CustomInstance `json:"custom,omitempty"` + + forceSendFields []string + nullFields []string +} + +// Label defines an object holding a key:value pair. Label is an element of LaunchSpecification. +type Label struct { + Key *string `json:"key,omitempty"` + Value *string `json:"value,omitempty"` + + forceSendFields []string + nullFields []string +} + +// LaunchSpecification defines launch attributes for the Group. LaunchSpecification is an element of Compute. +type LaunchSpecification struct { + BackendServiceConfig *BackendServiceConfig `json:"backendServiceConfig,omitempty"` + Disks []*Disk `json:"disks,omitempty"` + Labels []*Label `json:"labels,omitempty"` + IPForwarding *bool `json:"ipForwarding,omitempty"` + NetworkInterfaces []*NetworkInterface `json:"networkInterfaces,omitempty"` + Metadata []*Metadata `json:"metadata,omitempty"` + ServiceAccount *string `json:"serviceAccount,omitempty"` + StartupScript *string `json:"startupScript,omitempty"` + ShutdownScript *string `json:"shutdownScript,omitempty"` + Tags []string `json:"tags,omitempty"` + InstanceNamePrefix *string `json:"instanceNamePrefix,omitempty"` + + forceSendFields []string + nullFields []string +} + +// Metadata defines an object holding a key:value pair. Metadata is an element of LaunchSpecification. +type Metadata struct { + Key *string `json:"key,omitempty"` + Value *string `json:"value,omitempty"` + + forceSendFields []string + nullFields []string +} + +// NamedPorts describes the name and list of ports to use with the backend service +type NamedPorts struct { + Name *string `json:"name,omitempty"` + Ports []int `json:"ports,omitempty"` + + forceSendFields []string + nullFields []string +} + +// NetworkInterface defines the network configuration for a Group. NetworkInterface is an element of LaunchSpecification. +type NetworkInterface struct { + AccessConfigs []*AccessConfig `json:"accessConfigs,omitempty"` + AliasIPRanges []*AliasIPRange `json:"aliasIpRanges,omitempty"` + Network *string `json:"network,omitempty"` + ProjectID *string `json:"projectId,omitempty"` + + forceSendFields []string + nullFields []string +} + +// Subnet defines the attributes of a single Subnet. The Subnets list is an element of Compute. +type Subnet struct { + Region *string `json:"region,omitempty"` + SubnetNames []string `json:"subnetNames,omitempty"` + + forceSendFields []string + nullFields []string +} + +// endregion + +// region GKE structs + +// ImportGKEGroup contains a modified group struct used for overriding cluster parameters on import +type ImportGKEGroup struct { + AvailabilityZones []string `json:"availabilityZones,omitempty"` + Capacity *CapacityGKE `json:"capacity,omitempty"` + Name *string `json:"name,omitempty"` + InstanceTypes *InstanceTypesGKE `json:"instanceTypes,omitempty"` + PreemptiblePercentage *int `json:"preemptiblePercentage,omitempty"` + NodeImage *string `json:"nodeImage,omitempty"` + + forceSendFields []string + nullFields []string +} + +type CapacityGKE struct { + Capacity //embedding + + forceSendFields []string + nullFields []string +} + +type InstanceTypesGKE struct { + OnDemand *string `json:"ondemand,omitempty"` + Preemptible []string `json:"preemptible,omitempty"` + + forceSendFields []string + nullFields []string +} + +// endregion + +// region Scaling structs + +// Action defines the action attributes of a ScalingPolicy. +type Action struct { + Adjustment *int `json:"adjustment,omitempty"` + Type *string `json:"type,omitempty"` + + forceSendFields []string + nullFields []string +} + +// Dimension defines the attributes for the dimensions of a ScalingPolicy. +type Dimension struct { + Name *string `json:"name,omitempty"` + Value *string `json:"value,omitempty"` + + forceSendFields []string + nullFields []string +} + +// Scaling defines the scaling attributes of a Group +type Scaling struct { + Up []*ScalingPolicy `json:"up,omitempty"` + Down []*ScalingPolicy `json:"down,omitempty"` + + forceSendFields []string + nullFields []string +} + +// ScalingPolicy defines the scaling attributes for both up and down policies. ScalingPolicy is an element of Scaling. +type ScalingPolicy struct { + Action *Action `json:"action,omitempty"` + Cooldown *int `json:"cooldown,omitempty"` + Dimensions []*Dimension `json:"dimensions,omitempty"` + EvaluationPeriods *int `json:"evaluationPeriods,omitempty"` + MetricName *string `json:"metricName,omitempty"` + Namespace *string `json:"namespace,omitempty"` + Operator *string `json:"operator,omitempty"` + Period *int `json:"period,omitempty"` + PolicyName *string `json:"policyName,omitempty"` + Source *string `json:"source,omitempty"` + Statistic *string `json:"statistic,omitempty"` + Threshold *float64 `json:"threshold,omitempty"` + Unit *string `json:"unit,omitempty"` + + forceSendFields []string + nullFields []string +} + +// endregion + +// region Strategy structs + +// Strategy defines the strategy attributes of a Group. +type Strategy struct { + DrainingTimeout *int `json:"drainingTimeout,omitempty"` + FallbackToOnDemand *bool `json:"fallbackToOd,omitempty"` + PreemptiblePercentage *int `json:"preemptiblePercentage,omitempty"` + OnDemandCount *int `json:"onDemandCount,omitempty"` + ProvisioningModel *string `json:"provisioningModel,omitempty"` + + forceSendFields []string + nullFields []string +} + +// endregion + +// region Scheduling + +type Scheduling struct { + Tasks []*Task `json:"tasks,omitempty"` + + forceSendFields []string + nullFields []string +} + +type Task struct { + IsEnabled *bool `json:"isEnabled,omitempty"` + Type *string `json:"taskType,omitempty"` + CronExpression *string `json:"cronExpression,omitempty"` + TargetCapacity *int `json:"targetCapacity,omitempty"` + MinCapacity *int `json:"minCapacity,omitempty"` + MaxCapacity *int `json:"maxCapacity,omitempty"` + + forceSendFields []string + nullFields []string +} + +// endregion + +// region Integration structs + +type Integration struct { + GKE *GKEIntegration `json:"gke,omitempty"` + DockerSwarm *DockerSwarmIntegration `json:"dockerSwarm,omitempty"` + + forceSendFields []string + nullFields []string +} + +// region GKEIntegration structs + +type GKEIntegration struct { + ClusterID *string `json:"clusterIdentifier,omitempty"` + ClusterZoneName *string `json:"clusterZoneName,omitempty"` + AutoUpdate *bool `json:"autoUpdate,omitempty"` + AutoScale *AutoScaleGKE `json:"autoScale,omitempty"` + Location *string `json:"location,omitempty"` + + forceSendFields []string + nullFields []string +} + +type AutoScaleGKE struct { + AutoScale // embedding + Labels []*AutoScaleLabel `json:"labels,omitempty"` + + forceSendFields []string + nullFields []string +} + +// endregion + +// region DockerSwarmIntegration structs + +type DockerSwarmIntegration struct { + MasterHost *string `json:"masterHost,omitempty"` + MasterPort *int `json:"masterPort,omitempty"` + + forceSendFields []string + nullFields []string +} + +// endregion + +// endregion + +// region API Operation structs + +// CreateGroupInput contains the Elastigroup description required when making a request to create an Elastigroup. +type CreateGroupInput struct { + Group *Group `json:"group,omitempty"` +} + +// CreateGroupOutput contains a definition of the created Elastigroup, including the generated Group ID. +type CreateGroupOutput struct { + Group *Group `json:"group,omitempty"` +} + +// DeleteGroupInput contains the required input to delete an existing Elastigroup. +type DeleteGroupInput struct { + GroupID *string `json:"groupId,omitempty"` +} + +// DeleteGroupOutput describes the response a deleted group. Empty at this time. +type DeleteGroupOutput struct{} + +// ImportGKEClusterInput describes the input required when importing an existing GKE cluster into Elastigroup, if it exists. +type ImportGKEClusterInput struct { + ClusterID *string `json:"clusterID,omitempty"` + ClusterZoneName *string `json:"clusterZoneName,omitempty"` + DryRun *bool `json:"dryRun,omitempty"` + Group *ImportGKEGroup `json:"group,omitempty"` +} + +// ImportGKEClusterOutput contains a description of the Elastigroup and the imported GKE cluster. +type ImportGKEClusterOutput struct { + Group *Group `json:"group,omitempty"` +} + +// Instance describes an individual instance's status and is returned by a Status request +type Instance struct { + CreatedAt *time.Time `json:"createdAt,omitempty"` + InstanceName *string `json:"instanceName,omitempty"` + LifeCycle *string `json:"lifeCycle,omitempty"` + MachineType *string `json:"machineType,omitempty"` + PrivateIP *string `json:"privateIpAddress,omitempty"` + PublicIP *string `json:"publicIpAddress,omitempty"` + StatusName *string `json:"statusName,omitempty"` + UpdatedAt *time.Time `json:"updatedAt,omitempty"` + Zone *string `json:"zone,omitempty"` +} + +// ListGroupsInput describes the input required when making a request to list all groups in an account. +type ListGroupsInput struct{} + +// ListGroupsOutput contains an array of groups. +type ListGroupsOutput struct { + Groups []*Group `json:"groups,omitempty"` +} + +// ReadGroupInput describes the input required when making a request to list a single Elastigroup. +type ReadGroupInput struct { + GroupID *string `json:"groupId,omitempty"` +} + +// ReadGroupOutput contains a description of the requested Elastigroup, if it exists. +type ReadGroupOutput struct { + Group *Group `json:"group,omitempty"` +} + +// StatusGroupInput describes the required input when making a request to see an Elastigroup's status. +type StatusGroupInput struct { + GroupID *string `json:"groupId,omitempty"` +} + +// StatusGroupOutput describes the status of the instances in the Elastigroup. +type StatusGroupOutput struct { + Instances []*Instance `json:"instances,omitempty"` +} + +// UpdateGroupInput contains a description of one or more valid attributes that will be applied to an existing Elastigroup. +type UpdateGroupInput struct { + Group *Group `json:"group,omitempty"` +} + +// UpdateGroupOutPut contains a description of the updated Elastigroup, if successful. +type UpdateGroupOutput struct { + Group *Group `json:"group,omitempty"` +} + +// endregion + +// region API Operations + +// Create creates a new Elastigroup using GCE resources. +func (s *ServiceOp) Create(ctx context.Context, input *CreateGroupInput) (*CreateGroupOutput, error) { + r := client.NewRequest(http.MethodPost, "/gcp/gce/group") + r.Obj = input + + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + gs, err := groupsFromHttpResponse(resp) + if err != nil { + return nil, err + } + + output := new(CreateGroupOutput) + if len(gs) > 0 { + output.Group = gs[0] + } + + return output, nil +} + +// Read returns the configuration of a single existing Elastigroup. +func (s *ServiceOp) Read(ctx context.Context, input *ReadGroupInput) (*ReadGroupOutput, error) { + path, err := uritemplates.Expand("/gcp/gce/group/{groupId}", uritemplates.Values{ + "groupId": spotinst.StringValue(input.GroupID), + }) + if err != nil { + return nil, err + } + + r := client.NewRequest(http.MethodGet, path) + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + gs, err := groupsFromHttpResponse(resp) + if err != nil { + return nil, err + } + + output := new(ReadGroupOutput) + if len(gs) > 0 { + output.Group = gs[0] + } + + return output, nil +} + +// Update modifies the configuration of a single existing Elastigroup. +func (s *ServiceOp) Update(ctx context.Context, input *UpdateGroupInput) (*UpdateGroupOutput, error) { + path, err := uritemplates.Expand("/gcp/gce/group/{groupId}", uritemplates.Values{ + "groupId": spotinst.StringValue(input.Group.ID), + }) + if err != nil { + return nil, err + } + + // We do NOT need the ID anymore, so let's drop it. + input.Group.ID = nil + + r := client.NewRequest(http.MethodPut, path) + r.Obj = input + + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + gs, err := groupsFromHttpResponse(resp) + if err != nil { + return nil, err + } + + output := new(UpdateGroupOutput) + if len(gs) > 0 { + output.Group = gs[0] + } + + return output, nil +} + +// Delete removes a single existing Elastigroup and destroys all associated GCE resources. +func (s *ServiceOp) Delete(ctx context.Context, input *DeleteGroupInput) (*DeleteGroupOutput, error) { + path, err := uritemplates.Expand("/gcp/gce/group/{groupId}", uritemplates.Values{ + "groupId": spotinst.StringValue(input.GroupID), + }) + if err != nil { + return nil, err + } + + r := client.NewRequest(http.MethodDelete, path) + + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return &DeleteGroupOutput{}, nil +} + +// List returns the configuration of all existing Elastigroups in a given Spotinst GCE account. +func (s *ServiceOp) List(ctx context.Context, input *ListGroupsInput) (*ListGroupsOutput, error) { + r := client.NewRequest(http.MethodGet, "/gcp/gce/group") + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + gs, err := groupsFromHttpResponse(resp) + if err != nil { + return nil, err + } + + return &ListGroupsOutput{Groups: gs}, nil +} + +// ImportGKECluster imports an existing GKE cluster into Elastigroup. +func (s *ServiceOp) ImportGKECluster(ctx context.Context, input *ImportGKEClusterInput) (*ImportGKEClusterOutput, error) { + r := client.NewRequest(http.MethodPost, "/gcp/gce/group/gke/import") + + r.Params["clusterId"] = []string{spotinst.StringValue(input.ClusterID)} + r.Params["zone"] = []string{spotinst.StringValue(input.ClusterZoneName)} + r.Params["dryRun"] = []string{strconv.FormatBool(spotinst.BoolValue(input.DryRun))} + + body := &ImportGKEClusterInput{Group: input.Group} + r.Obj = body + + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + gs, err := groupsFromHttpResponse(resp) + if err != nil { + return nil, err + } + + output := new(ImportGKEClusterOutput) + if len(gs) > 0 { + output.Group = gs[0] + } + + return output, nil +} + +// Status describes the current status of the instances in a specific Elastigroup +func (s *ServiceOp) Status(ctx context.Context, input *StatusGroupInput) (*StatusGroupOutput, error) { + path, err := uritemplates.Expand("/gcp/gce/group/{groupId}/status", uritemplates.Values{ + "groupId": spotinst.StringValue(input.GroupID), + }) + if err != nil { + return nil, err + } + + r := client.NewRequest(http.MethodGet, path) + resp, err := client.RequireOK(s.Client.Do(ctx, r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + is, err := instancesFromHttpResponse(resp) + if err != nil { + return nil, err + } + + return &StatusGroupOutput{Instances: is}, nil +} + +// endregion + +// region Unmarshallers + +// groupFromJSON unmarshalls a single group +func groupFromJSON(in []byte) (*Group, error) { + b := new(Group) + if err := json.Unmarshal(in, b); err != nil { + return nil, err + } + return b, nil +} + +// groupsFromJSON unmarshalls an array of groups +func groupsFromJSON(in []byte) ([]*Group, error) { + var rw client.Response + if err := json.Unmarshal(in, &rw); err != nil { + return nil, err + } + out := make([]*Group, len(rw.Response.Items)) + if len(out) == 0 { + return out, nil + } + for i, rb := range rw.Response.Items { + b, err := groupFromJSON(rb) + if err != nil { + return nil, err + } + out[i] = b + } + return out, nil +} + +// groupFromJSON reads a list of one or more groups from an http response +func groupsFromHttpResponse(resp *http.Response) ([]*Group, error) { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + return groupsFromJSON(body) +} + +// instanceFromJSON unmarshalls a single group +func instanceFromJSON(in []byte) (*Instance, error) { + b := new(Instance) + if err := json.Unmarshal(in, b); err != nil { + return nil, err + } + return b, nil +} + +// instancesFromJSON unmarshalls an array of instances +func instancesFromJSON(in []byte) ([]*Instance, error) { + var rw client.Response + if err := json.Unmarshal(in, &rw); err != nil { + return nil, err + } + out := make([]*Instance, len(rw.Response.Items)) + if len(out) == 0 { + return out, nil + } + for i, rb := range rw.Response.Items { + b, err := instanceFromJSON(rb) + if err != nil { + return nil, err + } + out[i] = b + } + return out, nil +} + +// instancesFromHttpResponse reads a list of one or more instances from an http response +func instancesFromHttpResponse(resp *http.Response) ([]*Instance, error) { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + return instancesFromJSON(body) +} + +// endregion + +// region Group setters + +func (o Group) MarshalJSON() ([]byte, error) { + type noMethod Group + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +// SetID sets the group ID attribute +func (o *Group) SetID(v *string) *Group { + if o.ID = v; o.ID == nil { + o.nullFields = append(o.nullFields, "ID") + } + return o +} + +// SetName sets the group name +func (o *Group) SetName(v *string) *Group { + if o.Name = v; o.Name == nil { + o.nullFields = append(o.nullFields, "Name") + } + return o +} + +// SetDescription sets the description for the group +func (o *Group) SetDescription(v *string) *Group { + if o.Description = v; o.Description == nil { + o.nullFields = append(o.nullFields, "Description") + } + return o +} + +// SetNodeImage sets image that will be used for the node VMs +func (o *Group) SetNodeImage(v *string) *Group { + if o.NodeImage = v; o.NodeImage == nil { + o.nullFields = append(o.nullFields, "NodeImage") + } + return o +} + +// SetCapacity sets the Capacity object +func (o *Group) SetCapacity(v *Capacity) *Group { + if o.Capacity = v; o.Capacity == nil { + o.nullFields = append(o.nullFields, "Capacity") + } + return o +} + +// SetCompute sets the Compute object +func (o *Group) SetCompute(v *Compute) *Group { + if o.Compute = v; o.Compute == nil { + o.nullFields = append(o.nullFields, "Compute") + } + return o +} + +// SetScaling sets the Scaling object +func (o *Group) SetScaling(v *Scaling) *Group { + if o.Scaling = v; o.Scaling == nil { + o.nullFields = append(o.nullFields, "Scaling") + } + return o +} + +func (o *Group) SetScheduling(v *Scheduling) *Group { + if o.Scheduling = v; o.Scheduling == nil { + o.nullFields = append(o.nullFields, "Scheduling") + } + return o +} + +// SetStrategy sets the Strategy object +func (o *Group) SetStrategy(v *Strategy) *Group { + if o.Strategy = v; o.Strategy == nil { + o.nullFields = append(o.nullFields, "Strategy") + } + return o +} + +// SetIntegration sets the integrations for the group +func (o *Group) SetIntegration(v *Integration) *Group { + if o.Integration = v; o.Integration == nil { + o.nullFields = append(o.nullFields, "Integration") + } + return o +} + +// endregion + +// region AutoScale setters + +func (o AutoScale) MarshalJSON() ([]byte, error) { + type noMethod AutoScale + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *AutoScale) SetIsEnabled(v *bool) *AutoScale { + if o.IsEnabled = v; o.IsEnabled == nil { + o.nullFields = append(o.nullFields, "IsEnabled") + } + return o +} + +func (o *AutoScale) SetIsAutoConfig(v *bool) *AutoScale { + if o.IsAutoConfig = v; o.IsAutoConfig == nil { + o.nullFields = append(o.nullFields, "IsAutoConfig") + } + return o +} + +func (o *AutoScale) SetCooldown(v *int) *AutoScale { + if o.Cooldown = v; o.Cooldown == nil { + o.nullFields = append(o.nullFields, "Cooldown") + } + return o +} + +func (o *AutoScale) SetHeadroom(v *AutoScaleHeadroom) *AutoScale { + if o.Headroom = v; o.Headroom == nil { + o.nullFields = append(o.nullFields, "Headroom") + } + return o +} + +func (o *AutoScale) SetDown(v *AutoScaleDown) *AutoScale { + if o.Down = v; o.Down == nil { + o.nullFields = append(o.nullFields, "Down") + } + return o +} + +// region AutoScaleDown + +func (o AutoScaleDown) MarshalJSON() ([]byte, error) { + type noMethod AutoScaleDown + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *AutoScaleDown) SetEvaluationPeriods(v *int) *AutoScaleDown { + if o.EvaluationPeriods = v; o.EvaluationPeriods == nil { + o.nullFields = append(o.nullFields, "EvaluationPeriods") + } + return o +} + +// endregion + +// region AutoScaleHeadroom + +func (o AutoScaleHeadroom) MarshalJSON() ([]byte, error) { + type noMethod AutoScaleHeadroom + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *AutoScaleHeadroom) SetCPUPerUnit(v *int) *AutoScaleHeadroom { + if o.CPUPerUnit = v; o.CPUPerUnit == nil { + o.nullFields = append(o.nullFields, "CPUPerUnit") + } + return o +} + +func (o *AutoScaleHeadroom) SetMemoryPerUnit(v *int) *AutoScaleHeadroom { + if o.MemoryPerUnit = v; o.MemoryPerUnit == nil { + o.nullFields = append(o.nullFields, "MemoryPerUnit") + } + return o +} + +func (o *AutoScaleHeadroom) SetNumOfUnits(v *int) *AutoScaleHeadroom { + if o.NumOfUnits = v; o.NumOfUnits == nil { + o.nullFields = append(o.nullFields, "NumOfUnits") + } + return o +} + +// endregion + +// region AutoScaleLabel + +func (o AutoScaleLabel) MarshalJSON() ([]byte, error) { + type noMethod AutoScaleLabel + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *AutoScaleLabel) SetKey(v *string) *AutoScaleLabel { + if o.Key = v; o.Key == nil { + o.nullFields = append(o.nullFields, "Key") + } + return o +} + +func (o *AutoScaleLabel) SetValue(v *string) *AutoScaleLabel { + if o.Value = v; o.Value == nil { + o.nullFields = append(o.nullFields, "Value") + } + return o +} + +// endregion + +// endregion + +// region Capacity setters + +func (o Capacity) MarshalJSON() ([]byte, error) { + type noMethod Capacity + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +// SetMaximum sets the Maximum number of VMs in the group. +func (o *Capacity) SetMaximum(v *int) *Capacity { + if o.Maximum = v; o.Maximum == nil { + o.nullFields = append(o.nullFields, "Maximum") + } + return o +} + +// SetMinimum sets the minimum number of VMs in the group +func (o *Capacity) SetMinimum(v *int) *Capacity { + if o.Minimum = v; o.Minimum == nil { + o.nullFields = append(o.nullFields, "Minimum") + } + return o +} + +// SetTarget sets the desired number of running VMs in the group. +func (o *Capacity) SetTarget(v *int) *Capacity { + if o.Target = v; o.Target == nil { + o.nullFields = append(o.nullFields, "Target") + } + return o +} + +// endregion + +// region Compute setters + +func (o Compute) MarshalJSON() ([]byte, error) { + type noMethod Compute + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +// SetAvailabilityZones sets the list of availability zones for group resources. +func (o *Compute) SetAvailabilityZones(v []string) *Compute { + if o.AvailabilityZones = v; o.AvailabilityZones == nil { + o.nullFields = append(o.nullFields, "AvailabilityZones") + } + return o +} + +// SetGPU sets the GPU object +func (o *Compute) SetGPU(v *GPU) *Compute { + if o.GPU = v; o.GPU == nil { + o.nullFields = append(o.nullFields, "GPU") + } + return o +} + +// SetHealth sets the health check attributes for the group +func (o *Compute) SetHealth(v *Health) *Compute { + if o.Health = v; o.Health == nil { + o.nullFields = append(o.nullFields, "Health") + } + return o +} + +// SetInstanceTypes sets the instance types for the group. +func (o *Compute) SetInstanceTypes(v *InstanceTypes) *Compute { + if o.InstanceTypes = v; o.InstanceTypes == nil { + o.nullFields = append(o.nullFields, "InstanceTypes") + } + return o +} + +// SetLaunchSpecification sets the launch configuration of the group. +func (o *Compute) SetLaunchConfiguration(v *LaunchSpecification) *Compute { + if o.LaunchSpecification = v; o.LaunchSpecification == nil { + o.nullFields = append(o.nullFields, "LaunchSpecification") + } + return o +} + +// SetSubnets sets the subnets used by the group. +func (o *Compute) SetSubnets(v []*Subnet) *Compute { + if o.Subnets = v; o.Subnets == nil { + o.nullFields = append(o.nullFields, "Subnets") + } + return o +} + +// region GPU Setters + +func (o GPU) MarshalJSON() ([]byte, error) { + type noMethod GPU + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +// SetType sets the type of gpu +func (o *GPU) SetType(v *string) *GPU { + if o.Type = v; o.Type == nil { + o.nullFields = append(o.nullFields, "Type") + } + return o +} + +// SetCount sets the number of this type of gpu +func (o *GPU) SetCount(v *int) *GPU { + if o.Count = v; o.Count == nil { + o.nullFields = append(o.nullFields, "Count") + } + return o +} + +// endregion + +// region Health setters + +func (o Health) MarshalJSON() ([]byte, error) { + type noMethod Health + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +// SetGracePeriod sets the grace period time for the groups health check +func (o *Health) SetGracePeriod(v *int) *Health { + fmt.Printf("o: %v\n", o) + if o.GracePeriod = v; o.GracePeriod == nil { + o.nullFields = append(o.nullFields, "GracePeriod") + } + return o +} + +// SetHealthCheckType sets the type of helath check to perform +func (o *Health) SetHealthCheckType(v *string) *Health { + if o.HealthCheckType = v; o.HealthCheckType == nil { + o.nullFields = append(o.nullFields, "HealthCheckType") + } + return o +} + +// SetAutoHealing sets autohealing to true or false +func (o *Health) SetAutoHealing(v *bool) *Health { + if o.AutoHealing = v; o.AutoHealing == nil { + o.nullFields = append(o.nullFields, "AutoHealing") + } + return o +} + +func (o *Health) SetUnhealthyDuration(v *int) *Health { + if o.UnhealthyDuration = v; o.UnhealthyDuration == nil { + o.nullFields = append(o.nullFields, "UnhealthyDuration") + } + return o +} + +// endregion + +// region InstanceTypes setters + +func (o InstanceTypes) MarshalJSON() ([]byte, error) { + type noMethod InstanceTypes + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +// SetCustom sets the memory and vCPU attributes for Custom Instance types +func (o *InstanceTypes) SetCustom(v []*CustomInstance) *InstanceTypes { + if o.Custom = v; o.Custom == nil { + o.nullFields = append(o.nullFields, "Custom") + } + return o +} + +// SetMemoryGiB sets the memory amount for a Custom Instance in intervals of 2, min 10 +func (o *CustomInstance) SetMemoryGiB(v *int) *CustomInstance { + if o.MemoryGiB = v; o.MemoryGiB == nil { + o.nullFields = append(o.nullFields, "MemoryGiB") + } + return o +} + +// SetVCPU sets sets the number of vCPUs to use in a Custom instance type +func (o *CustomInstance) SetVCPU(v *int) *CustomInstance { + if o.VCPU = v; o.VCPU == nil { + o.nullFields = append(o.nullFields, "VCPU") + } + return o +} + +// SetOnDemand sets the kind of on demand instances to use for the group. +func (o *InstanceTypes) SetOnDemand(v *string) *InstanceTypes { + if o.OnDemand = v; o.OnDemand == nil { + o.nullFields = append(o.nullFields, "OnDemand") + } + return o +} + +// SetPreemptible sets the kind of premeptible instances to use with the group. +func (o *InstanceTypes) SetPreemptible(v []string) *InstanceTypes { + if o.Preemptible = v; o.Preemptible == nil { + o.nullFields = append(o.nullFields, "Preemptible") + } + return o +} + +// endregion + +// region LaunchSpecification setters + +func (o LaunchSpecification) MarshalJSON() ([]byte, error) { + type noMethod LaunchSpecification + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +// SetBackendServices sets the backend services to use with the group. +func (o *LaunchSpecification) SetBackendServiceConfig(v *BackendServiceConfig) *LaunchSpecification { + if o.BackendServiceConfig = v; o.BackendServiceConfig == nil { + o.nullFields = append(o.nullFields, "BackendServiceConfig") + } + return o +} + +// SetDisks sets the list of disks used by the group +func (o *LaunchSpecification) SetDisks(v []*Disk) *LaunchSpecification { + if o.Disks = v; o.Disks == nil { + o.nullFields = append(o.nullFields, "Disks") + } + return o +} + +// SetLabels sets the labels to be used with the group +func (o *LaunchSpecification) SetLabels(v []*Label) *LaunchSpecification { + if o.Labels = v; o.Labels == nil { + o.nullFields = append(o.nullFields, "Labels") + } + return o +} + +// SetIPForwarding sets whether to use IP forwarding for this group. +func (o *LaunchSpecification) SetIPForwarding(v *bool) *LaunchSpecification { + if o.IPForwarding = v; o.IPForwarding == nil { + o.nullFields = append(o.nullFields, "IPForwarding") + } + return o +} + +// SetNetworkInterfaces sets number and kinds of network interfaces used by the group. +func (o *LaunchSpecification) SetNetworkInterfaces(v []*NetworkInterface) *LaunchSpecification { + if o.NetworkInterfaces = v; o.NetworkInterfaces == nil { + o.nullFields = append(o.nullFields, "NetworkInterfaces") + } + return o +} + +// SetMetadata sets metadata for the group. +func (o *LaunchSpecification) SetMetadata(v []*Metadata) *LaunchSpecification { + if o.Metadata = v; o.Metadata == nil { + o.nullFields = append(o.nullFields, "Metadata") + } + return o +} + +// SetServiceAccount sets the service account used by the instances in the group +func (o *LaunchSpecification) SetServiceAccount(v *string) *LaunchSpecification { + if o.ServiceAccount = v; o.ServiceAccount == nil { + o.nullFields = append(o.nullFields, "ServiceAccount") + } + return o +} + +// SetStartupScript sets the startup script to be executed when the instance launches. +func (o *LaunchSpecification) SetStartupScript(v *string) *LaunchSpecification { + if o.StartupScript = v; o.StartupScript == nil { + o.nullFields = append(o.nullFields, "StartupScript") + } + return o +} + +// SetShutdownScript sets the script that will run when draining instances before termination +func (o *LaunchSpecification) SetShutdownScript(v *string) *LaunchSpecification { + if o.ShutdownScript = v; o.ShutdownScript == nil { + o.nullFields = append(o.nullFields, "ShutdownScript") + } + return o +} + +// SetTags sets the list of tags +func (o *LaunchSpecification) SetTags(v []string) *LaunchSpecification { + if o.Tags = v; o.Tags == nil { + o.nullFields = append(o.nullFields, "Tags") + } + return o +} + +// SetInstanceNamePrefix sets an instance name prefix to be used for all launched instances and their boot disk. +func (o *LaunchSpecification) SetInstanceNamePrefix(v *string) *LaunchSpecification { + if o.InstanceNamePrefix = v; o.InstanceNamePrefix == nil { + o.nullFields = append(o.nullFields, "InstanceNamePrefix") + } + return o +} + +// region BackendServiceConfig setters + +func (o BackendServiceConfig) MarshalJSON() ([]byte, error) { + type noMethod BackendServiceConfig + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +// SetBackendServices sets the backend service list +func (o *BackendServiceConfig) SetBackendServices(v []*BackendService) *BackendServiceConfig { + if o.BackendServices = v; o.BackendServices == nil { + o.nullFields = append(o.nullFields, "BackendServices") + } + return o +} + +// region Backend Service setters + +func (o BackendService) MarshalJSON() ([]byte, error) { + type noMethod BackendService + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +// SetBackendServiceName sets the name of the backend service. +func (o *BackendService) SetBackendServiceName(v *string) *BackendService { + if o.BackendServiceName = v; o.BackendServiceName == nil { + o.nullFields = append(o.nullFields, "BackendServiceName") + } + return o +} + +// SetLocationType sets the location type +func (o *BackendService) SetLocationType(v *string) *BackendService { + if o.LocationType = v; o.LocationType == nil { + o.nullFields = append(o.nullFields, "LocationType") + } + return o +} + +// SetScheme sets the scheme +func (o *BackendService) SetScheme(v *string) *BackendService { + if o.Scheme = v; o.Scheme == nil { + o.nullFields = append(o.nullFields, "Scheme") + } + return o +} + +// SetNamedPorts sets the named port object +func (o *BackendService) SetNamedPorts(v *NamedPorts) *BackendService { + if o.NamedPorts = v; o.NamedPorts == nil { + o.nullFields = append(o.nullFields, "NamedPort") + } + return o +} + +// region NamedPort setters + +func (o NamedPorts) MarshalJSON() ([]byte, error) { + type noMethod NamedPorts + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +// SetNamedPorts sets the name of the NamedPorts +func (o *NamedPorts) SetName(v *string) *NamedPorts { + if o.Name = v; o.Name == nil { + o.nullFields = append(o.nullFields, "Name") + } + return o +} + +// SetPorts sets the list of ports in the NamedPorts +func (o *NamedPorts) SetPorts(v []int) *NamedPorts { + if o.Ports = v; o.Ports == nil { + o.nullFields = append(o.nullFields, "Ports") + } + return o +} + +// endregion + +// endregion + +// endregion + +// region Disk setters + +func (o Disk) MarshalJSON() ([]byte, error) { + type noMethod Disk + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +// SetAutoDelete sets option to have disks autodelete +func (o *Disk) SetAutoDelete(v *bool) *Disk { + if o.AutoDelete = v; o.AutoDelete == nil { + o.nullFields = append(o.nullFields, "AutoDelete") + } + return o +} + +// SetBoot sets the boot option +func (o *Disk) SetBoot(v *bool) *Disk { + if o.Boot = v; o.Boot == nil { + o.nullFields = append(o.nullFields, "Boot") + } + return o +} + +// SetDeviceName sets the device name +func (o *Disk) SetDeviceName(v *string) *Disk { + if o.DeviceName = v; o.DeviceName == nil { + o.nullFields = append(o.nullFields, "DeviceName") + } + return o +} + +// SetInitializeParams sets the initialization paramters object +func (o *Disk) SetInitializeParams(v *InitializeParams) *Disk { + if o.InitializeParams = v; o.InitializeParams == nil { + o.nullFields = append(o.nullFields, "InitializeParams") + } + return o +} + +// SetInterface sets the interface +func (o *Disk) SetInterface(v *string) *Disk { + if o.Interface = v; o.Interface == nil { + o.nullFields = append(o.nullFields, "Interface") + } + return o +} + +// SetMode sets the mode +func (o *Disk) SetMode(v *string) *Disk { + if o.Mode = v; o.Mode == nil { + o.nullFields = append(o.nullFields, "Mode") + } + return o +} + +// SetSource sets the source +func (o *Disk) SetSource(v *string) *Disk { + if o.Source = v; o.Source == nil { + o.nullFields = append(o.nullFields, "Source") + } + return o +} + +// SetType sets the type of disk +func (o *Disk) SetType(v *string) *Disk { + if o.Type = v; o.Type == nil { + o.nullFields = append(o.nullFields, "Type") + } + return o +} + +// region InitializeParams setters + +func (o InitializeParams) MarshalJSON() ([]byte, error) { + type noMethod InitializeParams + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +// SetDiskSizeGB sets the disk size in gigabytes, in multiples of 2 +func (o *InitializeParams) SetDiskSizeGB(v *int) *InitializeParams { + if o.DiskSizeGB = v; o.DiskSizeGB == nil { + o.nullFields = append(o.nullFields, "DiskSizeGB") + } + return o +} + +// SetDiskType sets the type of disk +func (o *InitializeParams) SetDiskType(v *string) *InitializeParams { + if o.DiskType = v; o.DiskType == nil { + o.nullFields = append(o.nullFields, "DiskType") + } + return o +} + +// SetSourceImage sets the source image to use +func (o *InitializeParams) SetSourceImage(v *string) *InitializeParams { + if o.SourceImage = v; o.SourceImage == nil { + o.nullFields = append(o.nullFields, "SourceImage") + } + return o +} + +// endregion + +// endregion + +// region Label setters + +func (o Label) MarshalJSON() ([]byte, error) { + type noMethod Label + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +// SetKey sets the key for the label +func (o *Label) SetKey(v *string) *Label { + if o.Key = v; o.Key == nil { + o.nullFields = append(o.nullFields, "Key") + } + return o +} + +// SetValue sets the value for the label +func (o *Label) SetValue(v *string) *Label { + if o.Value = v; o.Value == nil { + o.nullFields = append(o.nullFields, "Value") + } + return o +} + +// endregion + +// region NetworkInterface setters + +func (o NetworkInterface) MarshalJSON() ([]byte, error) { + type noMethod NetworkInterface + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +// SetAccessConfigs creates a list of one or more access configuration objects +func (o *NetworkInterface) SetAccessConfigs(v []*AccessConfig) *NetworkInterface { + if o.AccessConfigs = v; o.AccessConfigs == nil { + o.nullFields = append(o.nullFields, "AccessConfigs") + } + return o +} + +// SetAliasIPRanges sets a list of alias IP range objects +func (o *NetworkInterface) SetAliasIPRanges(v []*AliasIPRange) *NetworkInterface { + if o.AliasIPRanges = v; o.AliasIPRanges == nil { + o.nullFields = append(o.nullFields, "AliasIPRanges") + } + return o +} + +// SetNetwork sets the name of the network +func (o *NetworkInterface) SetNetwork(v *string) *NetworkInterface { + if o.Network = v; o.Network == nil { + o.nullFields = append(o.nullFields, "Network") + } + return o +} + +// SetProjectId sets the project identifier of the network. +func (o *NetworkInterface) SetProjectId(v *string) *NetworkInterface { + if o.ProjectID = v; o.ProjectID == nil { + o.nullFields = append(o.nullFields, "ProjectID") + } + return o +} + +// region AccessConfig setters + +func (o AccessConfig) MarshalJSON() ([]byte, error) { + type noMethod AccessConfig + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +// SetName sets the name of the access configuration +func (o *AccessConfig) SetName(v *string) *AccessConfig { + if o.Name = v; o.Name == nil { + o.nullFields = append(o.nullFields, "Name") + } + return o +} + +// SetType sets the type of access configuration +func (o *AccessConfig) SetType(v *string) *AccessConfig { + if o.Type = v; o.Type == nil { + o.nullFields = append(o.nullFields, "Type") + } + return o +} + +// endregion + +// region AliasIPRange setters + +func (o AliasIPRange) MarshalJSON() ([]byte, error) { + type noMethod AliasIPRange + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +// SetIPCIDRRange sets the ip/cidr range +func (o *AliasIPRange) SetIPCIDRRange(v *string) *AliasIPRange { + if o.IPCIDRRange = v; o.IPCIDRRange == nil { + o.nullFields = append(o.nullFields, "IPCIDRRange") + } + return o +} + +// SetSubnetworkRangeName sets the name of the subnetwork range +func (o *AliasIPRange) SetSubnetworkRangeName(v *string) *AliasIPRange { + if o.SubnetworkRangeName = v; o.SubnetworkRangeName == nil { + o.nullFields = append(o.nullFields, "SubnetworkRangeName") + } + return o +} + +// endregion + +// endregion + +// region Metadata setters + +func (o Metadata) MarshalJSON() ([]byte, error) { + type noMethod Metadata + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +// SetKey sets the metadata key +func (o *Metadata) SetKey(v *string) *Metadata { + if o.Key = v; o.Key == nil { + o.nullFields = append(o.nullFields, "Key") + } + return o +} + +// SetValue sets the metadata value +func (o *Metadata) SetValue(v *string) *Metadata { + if o.Value = v; o.Value == nil { + o.nullFields = append(o.nullFields, "Value") + } + return o +} + +// endregion + +// endregion + +// region Subnet setters + +func (o Subnet) MarshalJSON() ([]byte, error) { + type noMethod Subnet + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +// SetRegion sets the region the subnet is in. +func (o *Subnet) SetRegion(v *string) *Subnet { + if o.Region = v; o.Region == nil { + o.nullFields = append(o.nullFields, "Region") + } + return o +} + +// SetSubnetNames sets the list of subnets names to use +func (o *Subnet) SetSubnetNames(v []string) *Subnet { + if o.SubnetNames = v; o.SubnetNames == nil { + o.nullFields = append(o.nullFields, "SubnetNames") + } + return o +} + +// endregion + +// endregion + +// region ImportGKE setters + +func (o ImportGKEGroup) MarshalJSON() ([]byte, error) { + type noMethod ImportGKEGroup + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +// SetAvailabilityZones sets the availability zones for the gke group +func (o *ImportGKEGroup) SetAvailabilityZones(v []string) *ImportGKEGroup { + if o.AvailabilityZones = v; o.AvailabilityZones == nil { + o.nullFields = append(o.nullFields, "AvailabilityZones") + } + return o +} + +// SetCapacity sets the capacity for a gke group +func (o *ImportGKEGroup) SetCapacity(v *CapacityGKE) *ImportGKEGroup { + if o.Capacity = v; o.Capacity == nil { + o.nullFields = append(o.nullFields, "Capacity") + } + return o +} + +// SetInstanceTypes sets the instance types for the group. +func (o *ImportGKEGroup) SetInstanceTypes(v *InstanceTypesGKE) *ImportGKEGroup { + if o.InstanceTypes = v; o.InstanceTypes == nil { + o.nullFields = append(o.nullFields, "InstanceTypes") + } + return o +} + +// SetName sets the group name +func (o *ImportGKEGroup) SetName(v *string) *ImportGKEGroup { + if o.Name = v; o.Name == nil { + o.nullFields = append(o.nullFields, "Name") + } + return o +} + +// SetPreemptiblePercentage sets the preemptible percentage when importing a gke cluster into Elastigroup. +func (o *ImportGKEGroup) SetPreemptiblePercentage(v *int) *ImportGKEGroup { + if o.PreemptiblePercentage = v; o.PreemptiblePercentage == nil { + o.nullFields = append(o.nullFields, "PreemptiblePercentage") + } + return o +} + +// SetNodeImage sets the node image for the imported gke group. +func (o *ImportGKEGroup) SetNodeImage(v *string) *ImportGKEGroup { + if o.NodeImage = v; o.NodeImage == nil { + o.nullFields = append(o.nullFields, "NodeImage") + } + return o +} + +func (o InstanceTypesGKE) MarshalJSON() ([]byte, error) { + type noMethod InstanceTypesGKE + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +// SetOnDemand sets the instance types when importing a gke group +func (o *InstanceTypesGKE) SetOnDemand(v *string) *InstanceTypesGKE { + if o.OnDemand = v; o.OnDemand == nil { + o.nullFields = append(o.nullFields, "OnDemand") + } + return o +} + +// SetPreemptible sets the list of preemptible instance types +func (o *InstanceTypesGKE) SetPreemptible(v []string) *InstanceTypesGKE { + if o.Preemptible = v; o.Preemptible == nil { + o.nullFields = append(o.nullFields, "Preemptible") + } + return o +} + +// endregion + +// region Integration setters + +func (o Integration) MarshalJSON() ([]byte, error) { + type noMethod Integration + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +// SetGKEIntegration sets the GKE integration +func (o *Integration) SetGKE(v *GKEIntegration) *Integration { + if o.GKE = v; o.GKE == nil { + o.nullFields = append(o.nullFields, "GKE") + } + return o +} + +// SetDockerSwarm sets the DockerSwarm integration +func (o *Integration) SetDockerSwarm(v *DockerSwarmIntegration) *Integration { + if o.DockerSwarm = v; o.DockerSwarm == nil { + o.nullFields = append(o.nullFields, "DockerSwarm") + } + return o +} + +// region GKE integration setters + +func (o GKEIntegration) MarshalJSON() ([]byte, error) { + type noMethod GKEIntegration + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +// SetAutoUpdate sets the autoupdate flag +func (o *GKEIntegration) SetAutoUpdate(v *bool) *GKEIntegration { + if o.AutoUpdate = v; o.AutoUpdate == nil { + o.nullFields = append(o.nullFields, "AutoUpdate") + } + return o +} + +// SetAutoScale sets the AutoScale configuration used with the GKE integration +func (o *GKEIntegration) SetAutoScale(v *AutoScaleGKE) *GKEIntegration { + if o.AutoScale = v; o.AutoScale == nil { + o.nullFields = append(o.nullFields, "AutoScale") + } + return o +} + +// SetLocation sets the location that the cluster is located in +func (o *GKEIntegration) SetLocation(v *string) *GKEIntegration { + if o.Location = v; o.Location == nil { + o.nullFields = append(o.nullFields, "Location") + } + return o +} + +// SetClusterID sets the cluster ID +func (o *GKEIntegration) SetClusterID(v *string) *GKEIntegration { + if o.ClusterID = v; o.ClusterID == nil { + o.nullFields = append(o.nullFields, "ClusterID") + } + return o +} + +// region GKE AutoScaling setters + +func (o AutoScaleGKE) MarshalJSON() ([]byte, error) { + type noMethod AutoScaleGKE + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +// SetLabels sets the AutoScale labels for the GKE integration +func (o *AutoScaleGKE) SetLabels(v []*AutoScaleLabel) *AutoScaleGKE { + if o.Labels = v; o.Labels == nil { + o.nullFields = append(o.nullFields, "Labels") + } + return o +} + +// endregion + +// endregion + +// region DockerSwarm integration setters + +func (o DockerSwarmIntegration) MarshalJSON() ([]byte, error) { + type noMethod DockerSwarmIntegration + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +// SetMasterPort sets the master port +func (o *DockerSwarmIntegration) SetMasterPort(v *int) *DockerSwarmIntegration { + if o.MasterPort = v; o.MasterPort == nil { + o.nullFields = append(o.nullFields, "MasterPort") + } + return o +} + +// SetMasterHost sets the master host +func (o *DockerSwarmIntegration) SetMasterHost(v *string) *DockerSwarmIntegration { + if o.MasterHost = v; o.MasterHost == nil { + o.nullFields = append(o.nullFields, "MasterHost") + } + return o +} + +// endregion + +// endregion + +// region Scaling Policy setters + +func (o Scaling) MarshalJSON() ([]byte, error) { + type noMethod Scaling + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +// SetUp sets the scaling policy to usewhen increasing the number of instances in a group. +func (o *Scaling) SetUp(v []*ScalingPolicy) *Scaling { + if o.Up = v; o.Up == nil { + o.nullFields = append(o.nullFields, "Up") + } + return o +} + +// SetDown sets the scaling policy to use when decreasing the number of instances in a group. +func (o *Scaling) SetDown(v []*ScalingPolicy) *Scaling { + if o.Down = v; o.Down == nil { + o.nullFields = append(o.nullFields, "Down") + } + return o +} + +// region ScalingPolicy setters + +func (o ScalingPolicy) MarshalJSON() ([]byte, error) { + type noMethod ScalingPolicy + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +// SetAction sets the action to perform when scaling +func (o *ScalingPolicy) SetAction(v *Action) *ScalingPolicy { + if o.Action = v; o.Action == nil { + o.nullFields = append(o.nullFields, "Action") + } + return o +} + +// SetCooldown sets the cooldown time in seconds before triggered events can start +func (o *ScalingPolicy) SetCooldown(v *int) *ScalingPolicy { + if o.Cooldown = v; o.Cooldown == nil { + o.nullFields = append(o.nullFields, "Cooldown") + } + return o +} + +// SetDimensions sets the list of dimension objects +func (o *ScalingPolicy) SetDimensions(v []*Dimension) *ScalingPolicy { + if o.Dimensions = v; o.Dimensions == nil { + o.nullFields = append(o.nullFields, "Dimensions") + } + return o +} + +// SetEvaluationPeriods sets the number of periods over which data is compared +func (o *ScalingPolicy) SetEvaluationPeriods(v *int) *ScalingPolicy { + if o.EvaluationPeriods = v; o.EvaluationPeriods == nil { + o.nullFields = append(o.nullFields, "EvaluationPeriods") + } + return o +} + +// SetMetricName sets the name of the metric to compare +func (o *ScalingPolicy) SetMetricName(v *string) *ScalingPolicy { + if o.MetricName = v; o.MetricName == nil { + o.nullFields = append(o.nullFields, "MetricName") + } + return o +} + +// SetNamespace sets the namespace for the associated metric +func (o *ScalingPolicy) SetNamespace(v *string) *ScalingPolicy { + if o.Namespace = v; o.Namespace == nil { + o.nullFields = append(o.nullFields, "Namespace") + } + return o +} + +// SetOperator sets the operator (gte, lte) +func (o *ScalingPolicy) SetOperator(v *string) *ScalingPolicy { + if o.Operator = v; o.Operator == nil { + o.nullFields = append(o.nullFields, "Operator") + } + return o +} + +// SetPeriod sets the period in seconds over which the statistic is applied +func (o *ScalingPolicy) SetPeriod(v *int) *ScalingPolicy { + if o.Period = v; o.Period == nil { + o.nullFields = append(o.nullFields, "Period") + } + return o +} + +// SetPolicyName sets the name of the scaling policy +func (o *ScalingPolicy) SetPolicyName(v *string) *ScalingPolicy { + if o.PolicyName = v; o.PolicyName == nil { + o.nullFields = append(o.nullFields, "PolicyName") + } + return o +} + +// SetSource sets the source of the metric (spectrum, stackdriver) +func (o *ScalingPolicy) SetSource(v *string) *ScalingPolicy { + if o.Source = v; o.Source == nil { + o.nullFields = append(o.nullFields, "Source") + } + return o +} + +// SetStatistic sets the metric aggregator to return (average, sum, min, max) +func (o *ScalingPolicy) SetStatistic(v *string) *ScalingPolicy { + if o.Statistic = v; o.Statistic == nil { + o.nullFields = append(o.nullFields, "Statistic") + } + return o +} + +// SetThreshold sets the value against which the metric is compared +func (o *ScalingPolicy) SetThreshold(v *float64) *ScalingPolicy { + if o.Threshold = v; o.Threshold == nil { + o.nullFields = append(o.nullFields, "Threshold") + } + return o +} + +// SetUnit sets the unit for the associated metric +func (o *ScalingPolicy) SetUnit(v *string) *ScalingPolicy { + if o.Unit = v; o.Unit == nil { + o.nullFields = append(o.nullFields, "Unit") + } + return o +} + +// region Action setters + +func (o Action) MarshalJSON() ([]byte, error) { + type noMethod Action + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +// SetAdjustment sets the number associated with the action type +func (o *Action) SetAdjustment(v *int) *Action { + if o.Adjustment = v; o.Adjustment == nil { + o.nullFields = append(o.nullFields, "Adjustment") + } + return o +} + +// SetType sets the type of action to take when scaling (adjustment) +func (o *Action) SetType(v *string) *Action { + if o.Type = v; o.Type == nil { + o.nullFields = append(o.nullFields, "Type") + } + return o +} + +// endregion + +// region Dimension setters + +func (o Dimension) MarshalJSON() ([]byte, error) { + type noMethod Dimension + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +// SetName sets the name of the dimension +func (o *Dimension) SetName(v *string) *Dimension { + if o.Name = v; o.Name == nil { + o.nullFields = append(o.nullFields, "Name") + } + return o +} + +// SetValue sets the value of the dimension +func (o *Dimension) SetValue(v *string) *Dimension { + if o.Value = v; o.Value == nil { + o.nullFields = append(o.nullFields, "Value") + } + return o +} + +// endregion + +// endregion + +// endregion + +// region Scheduling + +func (o Scheduling) MarshalJSON() ([]byte, error) { + type noMethod Scheduling + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Scheduling) SetTasks(v []*Task) *Scheduling { + if o.Tasks = v; o.Tasks == nil { + o.nullFields = append(o.nullFields, "Tasks") + } + return o +} + +// endregion + +// region Task + +func (o Task) MarshalJSON() ([]byte, error) { + type noMethod Task + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Task) SetIsEnabled(v *bool) *Task { + if o.IsEnabled = v; o.IsEnabled == nil { + o.nullFields = append(o.nullFields, "IsEnabled") + } + return o +} + +func (o *Task) SetType(v *string) *Task { + if o.Type = v; o.Type == nil { + o.nullFields = append(o.nullFields, "Type") + } + return o +} + +func (o *Task) SetCronExpression(v *string) *Task { + if o.CronExpression = v; o.CronExpression == nil { + o.nullFields = append(o.nullFields, "CronExpression") + } + return o +} + +func (o *Task) SetTargetCapacity(v *int) *Task { + if o.TargetCapacity = v; o.TargetCapacity == nil { + o.nullFields = append(o.nullFields, "TargetCapacity") + } + return o +} + +func (o *Task) SetMinCapacity(v *int) *Task { + if o.MinCapacity = v; o.MinCapacity == nil { + o.nullFields = append(o.nullFields, "MinCapacity") + } + return o +} + +func (o *Task) SetMaxCapacity(v *int) *Task { + if o.MaxCapacity = v; o.MaxCapacity == nil { + o.nullFields = append(o.nullFields, "MaxCapacity") + } + return o +} + +// endregion + +// region Strategy setters + +func (o Strategy) MarshalJSON() ([]byte, error) { + type noMethod Strategy + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +// SetDrainingTimeout sets the time to keep an instance alive after detaching it from the group +func (o *Strategy) SetDrainingTimeout(v *int) *Strategy { + if o.DrainingTimeout = v; o.DrainingTimeout == nil { + o.nullFields = append(o.nullFields, "DrainingTimeout") + } + return o +} + +// SetFallbackToOnDemand sets the option to fallback to on demand instances if preemptible instances arent available +func (o *Strategy) SetFallbackToOnDemand(v *bool) *Strategy { + if o.FallbackToOnDemand = v; o.FallbackToOnDemand == nil { + o.nullFields = append(o.nullFields, "FallbackToOnDemand") + } + return o +} + +// SetPreemptiblePercentage sets the ratio of preemptible instances to use in the group +func (o *Strategy) SetPreemptiblePercentage(v *int) *Strategy { + if o.PreemptiblePercentage = v; o.PreemptiblePercentage == nil { + o.nullFields = append(o.nullFields, "PreemptiblePercentage") + } + return o +} + +// SetOnDemandCount sets the number of on demand instances to use in the group. +func (o *Strategy) SetOnDemandCount(v *int) *Strategy { + if o.OnDemandCount = v; o.OnDemandCount == nil { + o.nullFields = append(o.nullFields, "OnDemandCount") + } + return o +} + +func (o *Strategy) SetProvisioningModel(v *string) *Strategy { + if o.ProvisioningModel = v; o.ProvisioningModel == nil { + o.nullFields = append(o.nullFields, "ProvisioningModel") + } + return o +} + +// endregion diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gcp/service.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gcp/service.go new file mode 100644 index 000000000000..cb14107364f7 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gcp/service.go @@ -0,0 +1,38 @@ +package gcp + +import ( + "context" + + "github.com/spotinst/spotinst-sdk-go/spotinst" + "github.com/spotinst/spotinst-sdk-go/spotinst/client" + "github.com/spotinst/spotinst-sdk-go/spotinst/session" +) + +// Service provides the API operation methods for making requests to endpoints +// of the Spotinst API. See this package's package overview docs for details on +// the service. +type Service interface { + Create(context.Context, *CreateGroupInput) (*CreateGroupOutput, error) + Read(context.Context, *ReadGroupInput) (*ReadGroupOutput, error) + Update(context.Context, *UpdateGroupInput) (*UpdateGroupOutput, error) + Delete(context.Context, *DeleteGroupInput) (*DeleteGroupOutput, error) + List(context.Context, *ListGroupsInput) (*ListGroupsOutput, error) + ImportGKECluster(context.Context, *ImportGKEClusterInput) (*ImportGKEClusterOutput, error) + Status(context.Context, *StatusGroupInput) (*StatusGroupOutput, error) +} + +type ServiceOp struct { + Client *client.Client +} + +var _ Service = &ServiceOp{} + +func New(sess *session.Session, cfgs ...*spotinst.Config) *ServiceOp { + cfg := &spotinst.Config{} + cfg.Merge(sess.Config) + cfg.Merge(cfgs...) + + return &ServiceOp{ + Client: client.New(sess.Config), + } +} diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gcp/tag.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gcp/tag.go new file mode 100644 index 000000000000..b39119548e03 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gcp/tag.go @@ -0,0 +1,31 @@ +package gcp + +import "github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil" + +type Tag struct { + Key *string `json:"tagKey,omitempty"` + Value *string `json:"tagValue,omitempty"` + + forceSendFields []string + nullFields []string +} + +func (o Tag) MarshalJSON() ([]byte, error) { + type noMethod Tag + raw := noMethod(o) + return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields) +} + +func (o *Tag) SetKey(v *string) *Tag { + if o.Key = v; o.Key == nil { + o.nullFields = append(o.nullFields, "Key") + } + return o +} + +func (o *Tag) SetValue(v *string) *Tag { + if o.Value = v; o.Value == nil { + o.nullFields = append(o.nullFields, "Value") + } + return o +} diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client/client.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client/client.go new file mode 100644 index 000000000000..4e2db7c1cb7c --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client/client.go @@ -0,0 +1,93 @@ +package client + +import ( + "context" + "net/http" + "net/http/httputil" + "net/url" + + "github.com/spotinst/spotinst-sdk-go/spotinst" +) + +// Client provides a client to the API. +type Client struct { + config *spotinst.Config +} + +// New returns a new client. +func New(cfg *spotinst.Config) *Client { + if cfg == nil { + cfg = spotinst.DefaultConfig() + } + return &Client{cfg} +} + +// NewRequest is used to create a new request. +func NewRequest(method, path string) *Request { + return &Request{ + method: method, + url: &url.URL{ + Path: path, + }, + header: make(http.Header), + Params: make(url.Values), + } +} + +// Do runs a request with our client. +func (c *Client) Do(ctx context.Context, r *Request) (*http.Response, error) { + req, err := r.toHTTP(ctx, c.config) + if err != nil { + return nil, err + } + c.logRequest(req) + resp, err := c.config.HTTPClient.Do(req) + c.logResponse(resp) + return resp, err +} + +func (c *Client) logf(format string, args ...interface{}) { + if c.config.Logger != nil { + c.config.Logger.Printf(format, args...) + } +} + +const logReqMsg = `SPOTINST: Request "%s %s" details: +---[ REQUEST ]--------------------------------------- +%s +-----------------------------------------------------` + +func (c *Client) logRequest(req *http.Request) { + if c.config.Logger != nil && req != nil { + out, err := httputil.DumpRequestOut(req, true) + if err == nil { + c.logf(logReqMsg, req.Method, req.URL, string(out)) + } + } +} + +const logRespMsg = `SPOTINST: Response "%s %s" details: +---[ RESPONSE ]---------------------------------------- +%s +-------------------------------------------------------` + +func (c *Client) logResponse(resp *http.Response) { + if c.config.Logger != nil && resp != nil { + out, err := httputil.DumpResponse(resp, true) + if err == nil { + c.logf(logRespMsg, resp.Request.Method, resp.Request.URL, string(out)) + } + } +} + +// Do runs a request with our client. +func (c *Client) DoOrg(ctx context.Context, r *Request) (*http.Response, error) { + req, err := r.toHTTPOrg(ctx, c.config) + if err != nil { + return nil, err + } + c.logRequest(req) + resp, err := c.config.HTTPClient.Do(req) + c.logResponse(resp) + return resp, err +} diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client/request.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client/request.go new file mode 100644 index 000000000000..f00a68fcf8c9 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client/request.go @@ -0,0 +1,119 @@ +package client + +import ( + "bytes" + "context" + "encoding/json" + "io" + "net/http" + "net/url" + + "github.com/spotinst/spotinst-sdk-go/spotinst" +) + +type Request struct { + Obj interface{} + Params url.Values + url *url.URL + method string + body io.Reader + header http.Header +} + +// toHTTP converts the request to an HTTP request. +func (r *Request) toHTTP(ctx context.Context, cfg *spotinst.Config) (*http.Request, error) { + // Set the user credentials. + creds, err := cfg.Credentials.Get() + if err != nil { + return nil, err + } + if creds.Token != "" { + r.header.Set("Authorization", "Bearer "+creds.Token) + } + if creds.Account != "" { + r.Params.Set("accountId", creds.Account) + } + + // Encode the query parameters. + r.url.RawQuery = r.Params.Encode() + + // Check if we should encode the body. + if r.body == nil && r.Obj != nil { + if b, err := EncodeBody(r.Obj); err != nil { + return nil, err + } else { + r.body = b + } + } + + // Create the HTTP request. + req, err := http.NewRequest(r.method, r.url.RequestURI(), r.body) + if err != nil { + return nil, err + } + + // Set request base URL. + req.URL.Host = cfg.BaseURL.Host + req.URL.Scheme = cfg.BaseURL.Scheme + + // Set request headers. + req.Host = cfg.BaseURL.Host + req.Header = r.header + req.Header.Set("Content-Type", cfg.ContentType) + req.Header.Add("Accept", cfg.ContentType) + req.Header.Add("User-Agent", cfg.UserAgent) + + return req.WithContext(ctx), nil +} + +// EncodeBody is used to encode a request body +func EncodeBody(obj interface{}) (io.Reader, error) { + buf := bytes.NewBuffer(nil) + if err := json.NewEncoder(buf).Encode(obj); err != nil { + return nil, err + } + return buf, nil +} + +// toHTTP converts the request to an HTTP request. +func (r *Request) toHTTPOrg(ctx context.Context, cfg *spotinst.Config) (*http.Request, error) { + // Set the user credentials. + creds, err := cfg.Credentials.Get() + if err != nil { + return nil, err + } + if creds.Token != "" { + r.header.Set("Authorization", "Bearer "+creds.Token) + } + + // Encode the query parameters. + r.url.RawQuery = r.Params.Encode() + + // Check if we should encode the body. + if r.body == nil && r.Obj != nil { + if b, err := EncodeBody(r.Obj); err != nil { + return nil, err + } else { + r.body = b + } + } + + // Create the HTTP request. + req, err := http.NewRequest(r.method, r.url.RequestURI(), r.body) + if err != nil { + return nil, err + } + + // Set request base URL. + req.URL.Host = cfg.BaseURL.Host + req.URL.Scheme = cfg.BaseURL.Scheme + + // Set request headers. + req.Host = cfg.BaseURL.Host + req.Header = r.header + req.Header.Set("Content-Type", cfg.ContentType) + req.Header.Add("Accept", cfg.ContentType) + req.Header.Add("User-Agent", cfg.UserAgent) + + return req.WithContext(ctx), nil +} diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client/response.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client/response.go new file mode 100644 index 000000000000..0956ea42b9d0 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client/response.go @@ -0,0 +1,110 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "strconv" +) + +type Response struct { + Request struct { + ID string `json:"id"` + } `json:"request"` + Response struct { + Errors []responseError `json:"errors"` + Items []json.RawMessage `json:"items"` + } `json:"response"` +} + +type responseError struct { + Code string `json:"code"` + Message string `json:"message"` + Field string `json:"field"` +} + +type Error struct { + Response *http.Response `json:"-"` + Code string `json:"code"` + Message string `json:"message"` + Field string `json:"field"` + RequestID string `json:"requestId"` +} + +func (e Error) Error() string { + msg := fmt.Sprintf("%v %v: %d (request: %q) %v: %v", + e.Response.Request.Method, e.Response.Request.URL, + e.Response.StatusCode, e.RequestID, e.Code, e.Message) + + if e.Field != "" { + msg = fmt.Sprintf("%s (field: %v)", msg, e.Field) + } + + return msg +} + +type Errors []Error + +func (es Errors) Error() string { + var stack string + for _, e := range es { + stack += e.Error() + "\n" + } + return stack +} + +// DecodeBody is used to JSON decode a body +func DecodeBody(resp *http.Response, out interface{}) error { + return json.NewDecoder(resp.Body).Decode(out) +} + +// RequireOK is used to verify response status code is a successful one (200 OK) +func RequireOK(resp *http.Response, err error) (*http.Response, error) { + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + return nil, extractError(resp) + } + return resp, nil +} + +// extractError is used to extract inner/logical errors from the response +func extractError(resp *http.Response) error { + buf := bytes.NewBuffer(nil) + + // TeeReader returns a Reader that writes to b what it reads from r.Body. + reader := io.TeeReader(resp.Body, buf) + defer resp.Body.Close() + resp.Body = ioutil.NopCloser(buf) + + var out Response + if err := json.NewDecoder(reader).Decode(&out); err != nil { + return err + } + + var errors Errors + if errs := out.Response.Errors; len(errs) > 0 { + for _, err := range errs { + errors = append(errors, Error{ + Response: resp, + RequestID: out.Request.ID, + Code: err.Code, + Message: err.Message, + Field: err.Field, + }) + } + } else { + errors = append(errors, Error{ + Response: resp, + RequestID: out.Request.ID, + Code: strconv.Itoa(resp.StatusCode), + Message: http.StatusText(resp.StatusCode), + }) + } + + return errors +} diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/config.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/config.go new file mode 100644 index 000000000000..7639fe3c3a07 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/config.go @@ -0,0 +1,183 @@ +package spotinst + +import ( + "net" + "net/http" + "net/url" + "runtime" + "strings" + "time" + + "github.com/spotinst/spotinst-sdk-go/spotinst/credentials" + "github.com/spotinst/spotinst-sdk-go/spotinst/log" + "github.com/spotinst/spotinst-sdk-go/spotinst/util/useragent" +) + +const ( + // defaultBaseURL is the default base URL of the Spotinst API. + // It is used e.g. when initializing a new Client without a specific address. + defaultBaseURL = "https://api.spotinst.io" + + // defaultContentType is the default content type to use when making HTTP calls. + defaultContentType = "application/json" +) + +// A Config provides Configuration to a service client instance. +type Config struct { + // The base URL the SDK's HTTP client will use when invoking HTTP requests. + BaseURL *url.URL + + // The HTTP Client the SDK's API clients will use to invoke HTTP requests. + // + // Defaults to a DefaultHTTPClient allowing API clients to create copies of + // the HTTP client for service specific customizations. + HTTPClient *http.Client + + // The credentials object to use when signing requests. + // + // Defaults to a chain of credential providers to search for credentials in + // environment variables and shared credential file. + Credentials *credentials.Credentials + + // The logger writer interface to write logging messages to. + // + // Defaults to standard out. + Logger log.Logger + + // The User-Agent and Content-Type HTTP headers to set when invoking HTTP + // requests. + UserAgent, ContentType string +} + +// DefaultBaseURL returns the default base URL. +func DefaultBaseURL() *url.URL { + baseURL, _ := url.Parse(defaultBaseURL) + return baseURL +} + +// DefaultUserAgent returns the default User-Agent header. +func DefaultUserAgent() string { + return useragent.New( + SDKName, + SDKVersion, + runtime.Version(), + runtime.GOOS, + runtime.GOARCH).String() +} + +// DefaultContentType returns the default Content-Type header. +func DefaultContentType() string { + return defaultContentType +} + +// DefaultTransport returns a new http.Transport with similar default values to +// http.DefaultTransport. Do not use this for transient transports as it can +// leak file descriptors over time. Only use this for transports that will be +// re-used for the same host(s). +func DefaultTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + TLSHandshakeTimeout: 10 * time.Second, + DisableKeepAlives: false, + MaxIdleConnsPerHost: 1, + } +} + +// DefaultHTTPClient returns a new http.Client with similar default values to +// http.Client, but with a non-shared Transport, idle connections disabled, and +// KeepAlives disabled. +func DefaultHTTPClient() *http.Client { + return &http.Client{ + Transport: DefaultTransport(), + } +} + +// DefaultConfig returns a default configuration for the client. By default this +// will pool and reuse idle connections to API. If you have a long-lived client +// object, this is the desired behavior and should make the most efficient use +// of the connections to API. +func DefaultConfig() *Config { + return &Config{ + BaseURL: DefaultBaseURL(), + HTTPClient: DefaultHTTPClient(), + UserAgent: DefaultUserAgent(), + ContentType: DefaultContentType(), + Credentials: credentials.NewChainCredentials( + new(credentials.EnvProvider), + new(credentials.FileProvider), + ), + } +} + +// WithBaseURL defines the base URL of the Spotinst API. +func (c *Config) WithBaseURL(rawurl string) *Config { + baseURL, _ := url.Parse(rawurl) + c.BaseURL = baseURL + return c +} + +// WithHTTPClient defines the HTTP client. +func (c *Config) WithHTTPClient(client *http.Client) *Config { + c.HTTPClient = client + return c +} + +// WithCredentials defines the credentials. +func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { + c.Credentials = creds + return c +} + +// WithUserAgent defines the user agent. +func (c *Config) WithUserAgent(ua string) *Config { + c.UserAgent = strings.TrimSpace(strings.Join([]string{ua, c.UserAgent}, " ")) + return c +} + +// WithContentType defines the content type. +func (c *Config) WithContentType(ct string) *Config { + c.ContentType = ct + return c +} + +// WithLogger defines the logger for informational messages, e.g. requests +// and their response times. It is nil by default. +func (c *Config) WithLogger(logger log.Logger) *Config { + c.Logger = logger + return c +} + +// Merge merges the passed in configs into the existing config object. +func (c *Config) Merge(cfgs ...*Config) { + for _, cfg := range cfgs { + mergeConfigs(c, cfg) + } +} + +func mergeConfigs(c1, c2 *Config) { + if c2 == nil { + return + } + if c2.BaseURL != nil { + c1.BaseURL = c2.BaseURL + } + if c2.Credentials != nil { + c1.Credentials = c2.Credentials + } + if c2.HTTPClient != nil { + c1.HTTPClient = c2.HTTPClient + } + if c2.UserAgent != "" { + c1.UserAgent = c2.UserAgent + } + if c2.ContentType != "" { + c1.ContentType = c2.ContentType + } + if c2.Logger != nil { + c1.Logger = c2.Logger + } +} diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/credentials.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/credentials.go new file mode 100644 index 000000000000..fcea42bd9e4b --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/credentials.go @@ -0,0 +1,67 @@ +package credentials + +import ( + "errors" + "sync" +) + +// ErrNoValidTokenFound is returned when there is no valid token. +var ErrNoValidTokenFound = errors.New("spotinst: no valid token found") + +// A Credentials provides synchronous safe retrieval of Spotinst credentials. +// Credentials will cache the credentials value. +// +// Credentials is safe to use across multiple goroutines and will manage the +// synchronous state so the Providers do not need to implement their own +// synchronization. +// +// The first Credentials.Get() will always call Provider.Retrieve() to get the +// first instance of the credentials Value. All calls to Get() after that will +// return the cached credentials Value. +type Credentials struct { + provider Provider + mu sync.Mutex + forceRefresh bool + creds Value +} + +// NewCredentials returns a pointer to a new Credentials with the provider set. +func NewCredentials(provider Provider) *Credentials { + return &Credentials{ + provider: provider, + forceRefresh: true, + } +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value. If the credentials Value is empty +// the Provider's Retrieve() will be called to refresh the credentials. +func (c *Credentials) Get() (Value, error) { + c.mu.Lock() + defer c.mu.Unlock() + + if c.creds.Token == "" || c.forceRefresh { + creds, err := c.provider.Retrieve() + if err != nil { + return Value{}, err + } + if creds.Token == "" { + return Value{ProviderName: creds.ProviderName}, ErrNoValidTokenFound + } + c.creds = creds + c.forceRefresh = false + } + + return c.creds, nil +} + +// Refresh refreshes the credentials and forces them to be retrieved on the next +// call to Get(). +func (c *Credentials) Refresh() { + c.mu.Lock() + defer c.mu.Unlock() + + c.forceRefresh = true +} diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider.go new file mode 100644 index 000000000000..1468ff3fd85c --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider.go @@ -0,0 +1,44 @@ +package credentials + +import "fmt" + +// A Value is the Spotinst credentials value for individual credential fields. +type Value struct { + // Spotinst API token. + Token string `ini:"token" json:"token"` + + // Spotinst account ID. + Account string `ini:"account" json:"account"` + + // Provider used to get credentials. + ProviderName string `ini:"-" json:"-"` +} + +// A Provider is the interface for any component which will provide credentials +// Value. +// +// The Provider should not need to implement its own mutexes, because that will +// be managed by Credentials. +type Provider interface { + fmt.Stringer + + // Refresh returns nil if it successfully retrieved the value. Error is + // returned if the value were not obtainable, or empty. + Retrieve() (Value, error) +} + +// IsEmpty if all fields of a Value are empty. +func (v *Value) IsEmpty() bool { return v.Token == "" && v.Account == "" } + +// IsComplete if all fields of a Value are set. +func (v *Value) IsComplete() bool { return v.Token != "" && v.Account != "" } + +// Merge merges the passed in Value into the existing Value object. +func (v *Value) Merge(v2 Value) { + if v.Token == "" { + v.Token = v2.Token + } + if v.Account == "" { + v.Account = v2.Account + } +} diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_chain.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_chain.go new file mode 100644 index 000000000000..32b568048838 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_chain.go @@ -0,0 +1,117 @@ +package credentials + +import ( + "errors" + "fmt" + + "github.com/spotinst/spotinst-sdk-go/spotinst/featureflag" +) + +// ErrNoValidProvidersFoundInChain is returned when there are no valid credentials +// providers in the ChainProvider. +var ErrNoValidProvidersFoundInChain = errors.New("spotinst: no valid " + + "credentials providers in chain") + +// A ChainProvider will search for a provider which returns credentials. +// +// The ChainProvider provides a way of chaining multiple providers together which +// will pick the first available using priority order of the Providers in the list. +// +// If none of the Providers retrieve valid credentials Value, ChainProvider's +// Retrieve() will return the error ErrNoValidProvidersFoundInChain. +// +// If a Provider is found which returns valid credentials Value ChainProvider +// will cache that Provider for all calls until Retrieve is called again. +// +// Example of ChainProvider to be used with an EnvCredentialsProvider and +// FileCredentialsProvider. In this example EnvProvider will first check if any +// credentials are available via the environment variables. If there are none +// ChainProvider will check the next Provider in the list, FileProvider in this +// case. If FileCredentialsProvider does not return any credentials ChainProvider +// will return the error ErrNoValidProvidersFoundInChain. +// +// creds := credentials.NewChainCredentials( +// new(credentials.EnvProvider), +// new(credentials.FileProvider), +// ) +type ChainProvider struct { + Providers []Provider +} + +// NewChainCredentials returns a pointer to a new Credentials object +// wrapping a chain of providers. +func NewChainCredentials(providers ...Provider) *Credentials { + return NewCredentials(&ChainProvider{ + Providers: providers, + }) +} + +// Retrieve returns the credentials value or error if no provider returned +// without error. +func (c *ChainProvider) Retrieve() (Value, error) { + var value Value + var errs errorList + + for _, p := range c.Providers { + v, err := p.Retrieve() + if err == nil { + if featureflag.MergeCredentialsChain.Enabled() { + value.Merge(v) + if value.IsComplete() { + return value, nil + } + } else { + value = v + break + } + } else { + errs = append(errs, err) + } + } + + if value.Token == "" { + err := ErrNoValidProvidersFoundInChain + if len(errs) > 0 { + err = errs + } + + return Value{ProviderName: c.String()}, err + } + + return value, nil +} + +// String returns the string representation of the provider. +func (c *ChainProvider) String() string { + var out string + for i, provider := range c.Providers { + out += provider.String() + if i < len(c.Providers)-1 { + out += " " + } + } + return out +} + +// An error list that satisfies the error interface. +type errorList []error + +// Error returns the string representation of the error. +// +// Satisfies the error interface. +func (e errorList) Error() string { + msg := "" + if size := len(e); size > 0 { + for i := 0; i < size; i++ { + msg += fmt.Sprintf("%s", e[i].Error()) + + // Check the next index to see if it is within the slice. If it is, + // append a newline. We do this, because unit tests could be broken + // with the additional '\n'. + if i+1 < size { + msg += "\n" + } + } + } + return msg +} diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_env.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_env.go new file mode 100644 index 000000000000..92b1088ee052 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_env.go @@ -0,0 +1,56 @@ +package credentials + +import ( + "fmt" + "os" +) + +const ( + // EnvCredentialsProviderName specifies the name of the Env provider. + EnvCredentialsProviderName = "EnvCredentialsProvider" + + // EnvCredentialsVarToken specifies the name of the environment variable + // points to the Spotinst Token. + EnvCredentialsVarToken = "SPOTINST_TOKEN" + + // EnvCredentialsVarAccount specifies the name of the environment variable + // points to the Spotinst account ID. + EnvCredentialsVarAccount = "SPOTINST_ACCOUNT" +) + +// ErrEnvCredentialsNotFound is returned when no credentials can be found in the +// process's environment. +var ErrEnvCredentialsNotFound = fmt.Errorf("spotinst: %s and %s not found "+ + "in environment", EnvCredentialsVarToken, EnvCredentialsVarAccount) + +// A EnvProvider retrieves credentials from the environment variables of the +// running process. +// +// Environment variables used: +// * Token : SPOTINST_TOKEN +// * Account : SPOTINST_ACCOUNT +type EnvProvider struct{} + +// NewEnvCredentials returns a pointer to a new Credentials object wrapping the +// environment variable provider. +func NewEnvCredentials() *Credentials { + return NewCredentials(&EnvProvider{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvProvider) Retrieve() (Value, error) { + value := Value{ + Token: os.Getenv(EnvCredentialsVarToken), + Account: os.Getenv(EnvCredentialsVarAccount), + ProviderName: EnvCredentialsProviderName, + } + + if value.IsEmpty() { + return value, ErrEnvCredentialsNotFound + } + + return value, nil +} + +// String returns the string representation of the provider. +func (e *EnvProvider) String() string { return EnvCredentialsProviderName } diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_file.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_file.go new file mode 100644 index 000000000000..6b7236de3b9d --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_file.go @@ -0,0 +1,207 @@ +package credentials + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + + "gopkg.in/ini.v1" +) + +const ( + // FileCredentialsProviderName specifies the name of the File provider. + FileCredentialsProviderName = "FileCredentialsProvider" + + // FileCredentialsEnvVarFile specifies the name of the environment variable + // points to the location of the credentials file. + FileCredentialsEnvVarFile = "SPOTINST_CREDENTIALS_FILE" + + // FileCredentialsEnvVarProfile specifies the name of the environment variable + // points to a profile name to use when loading credentials. + FileCredentialsEnvVarProfile = "SPOTINST_CREDENTIALS_PROFILE" +) + +var ( + // ErrFileCredentialsLoadFailed is returned when the provider is unable to load + // credentials from the credentials file. + ErrFileCredentialsLoadFailed = errors.New("spotinst: failed to load credentials file") + + // ErrFileCredentialsNotFound is returned when the loaded credentials + // are empty. + ErrFileCredentialsNotFound = errors.New("spotinst: credentials file or profile is empty") +) + +// DefaultProfile returns the SDK's default profile name to use when loading +// credentials. +func DefaultProfile() string { + return "default" +} + +// DefaultFilename returns the SDK's default file path for the credentials file. +// +// Builds the config file path based on the OS's platform. +// - Linux/Unix : $HOME/.spotinst/credentials +// - Windows : %USERPROFILE%\.spotinst\credentials +func DefaultFilename() string { + return filepath.Join(userHomeDir(), ".spotinst", "credentials") +} + +// A FileProvider retrieves credentials from the current user's home directory. +type FileProvider struct { + // Profile to load. + Profile string + + // Path to the credentials file. + // + // If empty will look for FileCredentialsEnvVarFile env variable. If the + // env value is empty will default to current user's home directory. + // - Linux/Unix : $HOME/.spotinst/credentials + // - Windows : %USERPROFILE%\.spotinst\credentials + Filename string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewFileCredentials returns a pointer to a new Credentials object wrapping the +// file provider. +func NewFileCredentials(profile, filename string) *Credentials { + return NewCredentials(&FileProvider{ + Profile: profile, + Filename: filename, + }) +} + +// Retrieve reads and extracts the shared credentials from the current users home +// directory. +func (p *FileProvider) Retrieve() (Value, error) { + p.retrieved = false + + value, err := p.loadCredentials(p.profile(), p.filename()) + if err != nil { + return value, err + } + + if len(value.ProviderName) == 0 { + value.ProviderName = FileCredentialsProviderName + } + + p.retrieved = true + return value, nil +} + +// String returns the string representation of the provider. +func (p *FileProvider) String() string { return FileCredentialsProviderName } + +// profile returns the profile to use to read the user credentials. +func (p *FileProvider) profile() string { + if p.Profile == "" { + if p.Profile = os.Getenv(FileCredentialsEnvVarProfile); p.Profile != "" { + return p.Profile + } + + p.Profile = DefaultProfile() + } + + return p.Profile +} + +// filename returns the filename to use to read the user credentials. +func (p *FileProvider) filename() string { + if p.Filename == "" { + if p.Filename = os.Getenv(FileCredentialsEnvVarFile); p.Filename != "" { + return p.Filename + } + + p.Filename = DefaultFilename() + } + + return p.Filename +} + +// loadCredentials loads the credentials from the file pointed to by filename. +// The credentials retrieved from the profile will be returned or error. Error +// will be returned if it fails to read from the file, or the data is invalid. +func (p *FileProvider) loadCredentials(profile, filename string) (Value, error) { + var value Value + var iniErr, jsonErr error + + if value, iniErr = p.loadCredentialsINI(profile, filename); iniErr != nil { + if value, jsonErr = p.loadCredentialsJSON(profile, filename); jsonErr != nil { + return value, fmt.Errorf("%v: %v", ErrFileCredentialsLoadFailed, iniErr) + } + } + + if value.IsEmpty() { + return value, ErrFileCredentialsNotFound + } + + return value, nil +} + +func (p *FileProvider) loadCredentialsINI(profile, filename string) (Value, error) { + var value Value + + config, err := ini.Load(filename) + if err != nil { + return value, err + } + + value, err = getCredentialsFromINIProfile(profile, config) + if err != nil { + return value, err + } + + // Try to complete missing fields with default profile. + if profile != DefaultProfile() && !value.IsComplete() { + defaultValue, err := getCredentialsFromINIProfile(DefaultProfile(), config) + if err == nil { + value.Merge(defaultValue) + } + } + + return value, nil +} + +func getCredentialsFromINIProfile(profile string, config *ini.File) (Value, error) { + var value Value + + section, err := config.GetSection(profile) + if err != nil { + return value, err + } + + if err := section.StrictMapTo(&value); err != nil { + return value, err + } + + return value, nil +} + +func (p *FileProvider) loadCredentialsJSON(profile, filename string) (Value, error) { + var value Value + + f, err := os.Open(filename) + if err != nil { + return value, err + } + defer f.Close() + + if err := json.NewDecoder(f).Decode(&value); err != nil { + return value, err + } + + return value, nil +} + +func userHomeDir() string { + if runtime.GOOS == "windows" { // Windows + return os.Getenv("USERPROFILE") + } + + // *nix + return os.Getenv("HOME") +} diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_static.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_static.go new file mode 100644 index 000000000000..2ed6b3db8572 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_static.go @@ -0,0 +1,38 @@ +package credentials + +import ( + "errors" +) + +// StaticCredentialsProviderName specifies the name of the Static provider. +const StaticCredentialsProviderName = "StaticCredentialsProvider" + +// ErrStaticCredentialsEmpty is returned when static credentials are empty. +var ErrStaticCredentialsEmpty = errors.New("spotinst: static credentials are empty") + +// A StaticProvider is a set of credentials which are set programmatically. +type StaticProvider struct { + Value +} + +// NewStaticCredentials returns a pointer to a new Credentials object wrapping +// a static credentials value provider. +func NewStaticCredentials(token, account string) *Credentials { + return NewCredentials(&StaticProvider{Value: Value{ + ProviderName: StaticCredentialsProviderName, + Token: token, + Account: account, + }}) +} + +// Retrieve returns the credentials or error if the credentials are invalid. +func (s *StaticProvider) Retrieve() (Value, error) { + if s.IsEmpty() { + return s.Value, ErrStaticCredentialsEmpty + } + + return s.Value, nil +} + +// String returns the string representation of the provider. +func (s *StaticProvider) String() string { return StaticCredentialsProviderName } diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/featureflag/featureflag.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/featureflag/featureflag.go new file mode 100644 index 000000000000..c065c35fd670 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/featureflag/featureflag.go @@ -0,0 +1,119 @@ +package featureflag + +import ( + "fmt" + "strconv" + "strings" + "sync" +) + +// All registered feature flags. +var ( + flagsMutex sync.Mutex + flags = make(map[string]FeatureFlag) +) + +// FeatureFlag indicates whether a given feature is enabled or not. +type FeatureFlag interface { + fmt.Stringer + + // Name returns the name of the feature flag. + Name() string + + // Enabled returns true if the feature is enabled. + Enabled() bool +} + +// featureFlag represents a feature being gated. +type featureFlag struct { + name string + enabled bool +} + +// New returns a new feature flag. +func New(name string, enabled bool) FeatureFlag { + flagsMutex.Lock() + defer flagsMutex.Unlock() + + ff, ok := flags[name] + if !ok { + ff = &featureFlag{name: name} + flags[name] = ff + } + + ff.(*featureFlag).enabled = enabled + return ff +} + +// Name returns the name of the feature flag. +func (f *featureFlag) Name() string { return f.name } + +// Enabled returns true if the feature is enabled. +func (f *featureFlag) Enabled() bool { return f.enabled } + +// String returns the string representation of the feature flag. +func (f *featureFlag) String() string { return fmt.Sprintf("%s=%t", f.name, f.enabled) } + +// Set parses and stores features from a string like "feature1=true,feature2=false". +func Set(features string) { + for _, s := range strings.Split(strings.TrimSpace(features), ",") { + if len(s) == 0 { + continue + } + + segments := strings.SplitN(s, "=", 2) + name := strings.TrimSpace(segments[0]) + + enabled := true + if len(segments) > 1 { + value := strings.TrimSpace(segments[1]) + enabled, _ = strconv.ParseBool(value) // ignore errors and fallback to `false` + } + + New(name, enabled) + } +} + +// Get returns a specific feature flag by name. +func Get(name string) FeatureFlag { + flagsMutex.Lock() + defer flagsMutex.Unlock() + + f, ok := flags[name] + if !ok { + f = new(featureFlag) + } + + return &featureFlag{ + name: name, + enabled: f.Enabled(), + } +} + +// All returns a list of all known feature flags. +func All() FeatureFlags { + flagsMutex.Lock() + defer flagsMutex.Unlock() + + features := make(FeatureFlags, 0, len(flags)) + for name, flag := range flags { + features = append(features, &featureFlag{ + name: name, + enabled: flag.Enabled(), + }) + } + + return features +} + +// FeatureFlags defines a list of feature flags. +type FeatureFlags []FeatureFlag + +// String returns the string representation of a list of feature flags. +func (f FeatureFlags) String() string { + features := make([]string, len(f)) + for i, ff := range f { + features[i] = ff.String() + } + return strings.Join(features, ",") +} diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/featureflag/features.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/featureflag/features.go new file mode 100644 index 000000000000..04a5e132ff67 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/featureflag/features.go @@ -0,0 +1,24 @@ +package featureflag + +import "os" + +// Default features. +var ( + // Toggle the usage of merging credentials in chain provider. + // + // This feature allows users to configure their credentials using multiple + // providers. For example, a token can be statically configured using a file, + // while the account can be dynamically configured via environment variables. + MergeCredentialsChain = New("MergeCredentialsChain", false) +) + +// EnvVar is the name of the environment variable to read feature flags from. +// The value should be a comma-separated list of K=V flags, while V is optional. +const EnvVar = "SPOTINST_FEATURE_FLAGS" + +// setFromEnv reads an environment variable and sets features from its value. +func setFromEnv() { Set(os.Getenv(EnvVar)) } + +func init() { + setFromEnv() +} diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/log/log.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/log/log.go new file mode 100644 index 000000000000..f207ac14f7fa --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/log/log.go @@ -0,0 +1,25 @@ +package log + +import ( + "log" + "os" +) + +// DefaultStdLogger represents the default logger which will write log messages +// to stdout, and use same formatting runes as the stdlib log.Logger. +var DefaultStdLogger Logger = log.New(os.Stderr, "", log.LstdFlags) + +// Logger specifies the interface for all log operations. +type Logger interface { + Printf(format string, args ...interface{}) +} + +// The LoggerFunc type is an adapter to allow the use of ordinary functions as +// Logger. If f is a function with the appropriate signature, LoggerFunc(f) is +// a Logger that calls f. +type LoggerFunc func(format string, args ...interface{}) + +// Printf calls f(format, args). +func (f LoggerFunc) Printf(format string, args ...interface{}) { + f(format, args...) +} diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/session/session.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/session/session.go new file mode 100644 index 000000000000..1911a4553185 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/session/session.go @@ -0,0 +1,22 @@ +package session + +import ( + "github.com/spotinst/spotinst-sdk-go/spotinst" +) + +// A Session provides a central location to create service clients. +// +// Sessions are safe to create service clients concurrently, but it is not safe +// to mutate the Session concurrently. +type Session struct { + Config *spotinst.Config +} + +// New creates a new instance of Session. Once the Session is created it +// can be mutated to modify the Config. The Session is safe to be read +// concurrently, but it should not be written to concurrently. +func New(cfgs ...*spotinst.Config) *Session { + s := &Session{Config: spotinst.DefaultConfig()} + s.Config.Merge(cfgs...) + return s +} diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/types.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/types.go new file mode 100644 index 000000000000..2c10b87c0a7a --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/types.go @@ -0,0 +1,357 @@ +package spotinst + +import "time" + +// String returns a pointer to of the string value passed in. +func String(v string) *string { + return &v +} + +// StringValue returns the value of the string pointer passed in or +// "" if the pointer is nil. +func StringValue(v *string) string { + if v != nil { + return *v + } + return "" +} + +// StringSlice converts a slice of string values into a slice of +// string pointers +func StringSlice(src []string) []*string { + dst := make([]*string, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// StringValueSlice converts a slice of string pointers into a slice of +// string values +func StringValueSlice(src []*string) []string { + dst := make([]string, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// StringMap converts a string map of string values into a string +// map of string pointers +func StringMap(src map[string]string) map[string]*string { + dst := make(map[string]*string) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// StringValueMap converts a string map of string pointers into a string +// map of string values +func StringValueMap(src map[string]*string) map[string]string { + dst := make(map[string]string) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Bool returns a pointer to of the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolValue returns the value of the bool pointer passed in or +// false if the pointer is nil. +func BoolValue(v *bool) bool { + if v != nil { + return *v + } + return false +} + +// BoolSlice converts a slice of bool values into a slice of +// bool pointers +func BoolSlice(src []bool) []*bool { + dst := make([]*bool, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// BoolValueSlice converts a slice of bool pointers into a slice of +// bool values +func BoolValueSlice(src []*bool) []bool { + dst := make([]bool, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// BoolMap converts a string map of bool values into a string +// map of bool pointers +func BoolMap(src map[string]bool) map[string]*bool { + dst := make(map[string]*bool) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// BoolValueMap converts a string map of bool pointers into a string +// map of bool values +func BoolValueMap(src map[string]*bool) map[string]bool { + dst := make(map[string]bool) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int returns a pointer to of the int value passed in. +func Int(v int) *int { + return &v +} + +// IntValue returns the value of the int pointer passed in or +// 0 if the pointer is nil. +func IntValue(v *int) int { + if v != nil { + return *v + } + return 0 +} + +// IntSlice converts a slice of int values into a slice of +// int pointers. +func IntSlice(src []int) []*int { + dst := make([]*int, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// IntValueSlice converts a slice of int pointers into a slice of +// int values. +func IntValueSlice(src []*int) []int { + dst := make([]int, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// IntMap converts a string map of int values into a string +// map of int pointers. +func IntMap(src map[string]int) map[string]*int { + dst := make(map[string]*int) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// IntValueMap converts a string map of int pointers into a string +// map of int values. +func IntValueMap(src map[string]*int) map[string]int { + dst := make(map[string]int) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int64 returns a pointer to of the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Value returns the value of the int64 pointer passed in or +// 0 if the pointer is nil. +func Int64Value(v *int64) int64 { + if v != nil { + return *v + } + return 0 +} + +// Int64Slice converts a slice of int64 values into a slice of +// int64 pointers. +func Int64Slice(src []int64) []*int64 { + dst := make([]*int64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int64ValueSlice converts a slice of int64 pointers into a slice of +// int64 values. +func Int64ValueSlice(src []*int64) []int64 { + dst := make([]int64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int64Map converts a string map of int64 values into a string +// map of int64 pointers. +func Int64Map(src map[string]int64) map[string]*int64 { + dst := make(map[string]*int64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int64ValueMap converts a string map of int64 pointers into a string +// map of int64 values. +func Int64ValueMap(src map[string]*int64) map[string]int64 { + dst := make(map[string]int64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float64 returns a pointer to of the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Value returns the value of the float64 pointer passed in or +// 0 if the pointer is nil. +func Float64Value(v *float64) float64 { + if v != nil { + return *v + } + return 0 +} + +// Float64Slice converts a slice of float64 values into a slice of +// float64 pointers. +func Float64Slice(src []float64) []*float64 { + dst := make([]*float64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float64ValueSlice converts a slice of float64 pointers into a slice of +// float64 values. +func Float64ValueSlice(src []*float64) []float64 { + dst := make([]float64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float64Map converts a string map of float64 values into a string +// map of float64 pointers. +func Float64Map(src map[string]float64) map[string]*float64 { + dst := make(map[string]*float64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float64ValueMap converts a string map of float64 pointers into a string +// map of float64 values. +func Float64ValueMap(src map[string]*float64) map[string]float64 { + dst := make(map[string]float64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Time returns a pointer to of the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeValue returns the value of the time.Time pointer passed in or +// time.Time{} if the pointer is nil. +func TimeValue(v *time.Time) time.Time { + if v != nil { + return *v + } + return time.Time{} +} + +// TimeSlice converts a slice of time.Time values into a slice of +// time.Time pointers. +func TimeSlice(src []time.Time) []*time.Time { + dst := make([]*time.Time, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// TimeValueSlice converts a slice of time.Time pointers into a slice of +// time.Time values. +func TimeValueSlice(src []*time.Time) []time.Time { + dst := make([]time.Time, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// TimeMap converts a string map of time.Time values into a string +// map of time.Time pointers. +func TimeMap(src map[string]time.Time) map[string]*time.Time { + dst := make(map[string]*time.Time) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// TimeValueMap converts a string map of time.Time pointers into a string +// map of time.Time values. +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { + dst := make(map[string]time.Time) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil/json.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil/json.go new file mode 100644 index 000000000000..e6a3ae6642a8 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil/json.go @@ -0,0 +1,237 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jsonutil + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "unsafe" +) + +// MarshalJSON returns a JSON encoding of schema containing only selected fields. +// A field is selected if any of the following is true: +// - it has a non-empty value +// - its field name is present in forceSendFields and it is not a nil pointer or nil interface +// - its field name is present in nullFields. +// +// The JSON key for each selected field is taken from the field's json: struct tag. +func MarshalJSON(schema interface{}, forceSendFields, nullFields []string) ([]byte, error) { + mustInclude := make(map[string]struct{}) + for _, f := range forceSendFields { + mustInclude[f] = struct{}{} + } + + useNull := make(map[string]struct{}) + for _, f := range nullFields { + useNull[f] = struct{}{} + } + + dataMap, err := schemaToMap(schema, mustInclude, useNull) + if err != nil { + return nil, err + } + + return json.Marshal(dataMap) +} + +func schemaToMap(schema interface{}, mustInclude, useNull map[string]struct{}) (map[string]interface{}, error) { + m := make(map[string]interface{}) + s := reflect.ValueOf(schema) + st := s.Type() + + for i := 0; i < s.NumField(); i++ { + sv := s.Field(i) + sf := st.Field(i) + + isUnexported := sf.PkgPath != "" + if sf.Anonymous { + t := sf.Type + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if isUnexported && t.Kind() != reflect.Struct { + // Ignore embedded fields of unexported non-struct types. + continue + } + + // Allow access to unexported fields by creating an addressable copy. + sfe := reflect.New(sf.Type).Elem() + sfe.Set(sv) + + // Get a copy of `forceSendFields` slice. + var forceSendFields []string + if f := sfe.FieldByName("forceSendFields"); f.IsValid() { + i := reflect.NewAt(f.Type(), unsafe.Pointer(f.UnsafeAddr())).Elem().Interface() + if v, ok := i.([]string); ok { + forceSendFields = v + } + } + + // Get a copy of `nullFields` slice. + var nullFields []string + if f := sfe.FieldByName("nullFields"); f.IsValid() { + i := reflect.NewAt(f.Type(), unsafe.Pointer(f.UnsafeAddr())).Elem().Interface() + if v, ok := i.([]string); ok { + nullFields = v + } + } + + // Marshal the embedded field. + b, err := MarshalJSON(sv.Interface(), forceSendFields, nullFields) + if err != nil { + return nil, fmt.Errorf("failed to marshal anonymous field %q: %v", sf.Name, err) + } + + // Append all fields to the output map. + im := make(map[string]interface{}) + json.Unmarshal(b, &im) + for k, v := range im { + m[k] = v + } + + // Nothing else to do. + continue + } else if isUnexported { + // Ignore unexported non-embedded fields. + continue + } + + jsonTag := sf.Tag.Get("json") + if jsonTag == "" { + continue + } + + tag, err := parseJSONTag(jsonTag) + if err != nil { + return nil, err + } + if tag.ignore { + continue + } + + if _, ok := useNull[sf.Name]; ok { + if !isEmptyValue(sv) { + return nil, fmt.Errorf("field %q in `nullFields` has non-empty value", sf.Name) + } + m[tag.apiName] = nil + continue + } + if !includeField(sv, sf, mustInclude) { + continue + } + + // nil maps are treated as empty maps. + if sf.Type.Kind() == reflect.Map && sv.IsNil() { + m[tag.apiName] = map[string]string{} + continue + } + + // nil slices are treated as empty slices. + if sf.Type.Kind() == reflect.Slice && sv.IsNil() { + m[tag.apiName] = []bool{} + continue + } + + if tag.stringFormat { + m[tag.apiName] = formatAsString(sv, sf.Type.Kind()) + } else { + m[tag.apiName] = sv.Interface() + } + } + + return m, nil +} + +// formatAsString returns a string representation of v, dereferencing it first if possible. +func formatAsString(v reflect.Value, kind reflect.Kind) string { + if kind == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + + return fmt.Sprintf("%v", v.Interface()) +} + +// jsonTag represents a restricted version of the struct tag format used by encoding/json. +// It is used to describe the JSON encoding of fields in a Schema struct. +type jsonTag struct { + apiName string + stringFormat bool + ignore bool +} + +// parseJSONTag parses a restricted version of the struct tag format used by encoding/json. +// The format of the tag must match that generated by the Schema.writeSchemaStruct method +// in the api generator. +func parseJSONTag(val string) (*jsonTag, error) { + if val == "-" { + return &jsonTag{ignore: true}, nil + } + + i := strings.Index(val, ",") + if i == -1 || val[:i] == "" { + return nil, fmt.Errorf("malformed json tag: %s", val) + } + + tag := &jsonTag{ + apiName: val[:i], + } + + switch val[i+1:] { + case "omitempty": + case "omitempty,string": + tag.stringFormat = true + default: + return nil, fmt.Errorf("malformed json tag: %s", val) + } + + return tag, nil +} + +// Reports whether the struct field "f" with value "v" should be included in JSON output. +func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string]struct{}) bool { + // The regular JSON encoding of a nil pointer is "null", which means "delete this field". + // Therefore, we could enable field deletion by honoring pointer fields' presence in the mustInclude set. + // However, many fields are not pointers, so there would be no way to delete these fields. + // Rather than partially supporting field deletion, we ignore mustInclude for nil pointer fields. + // Deletion will be handled by a separate mechanism. + if f.Type.Kind() == reflect.Ptr && v.IsNil() { + return false + } + + // The "any" type is represented as an interface{}. If this interface + // is nil, there is no reasonable representation to send. We ignore + // these fields, for the same reasons as given above for pointers. + if f.Type.Kind() == reflect.Interface && v.IsNil() { + return false + } + + _, ok := mustInclude[f.Name] + return ok || !isEmptyValue(v) +} + +// isEmptyValue reports whether v is the empty value for its type. This +// implementation is based on that of the encoding/json package, but its +// correctness does not depend on it being identical. What's important is that +// this function return false in situations where v should not be sent as part +// of a PATCH operation. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/stringutil/stringutil.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/stringutil/stringutil.go new file mode 100644 index 000000000000..87d89e7f6afb --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/stringutil/stringutil.go @@ -0,0 +1,69 @@ +package stringutil + +import ( + "bytes" + "fmt" + "io" + "reflect" +) + +// Stringify attempts to create a reasonable string representation of types. +// It does things like resolve pointers to their values and omits struct +// fields with nil values. +func Stringify(message interface{}) string { + var buf bytes.Buffer + v := reflect.ValueOf(message) + stringifyValue(&buf, v) + return buf.String() +} + +// stringifyValue was heavily inspired by the goprotobuf library. +func stringifyValue(w io.Writer, val reflect.Value) { + if val.Kind() == reflect.Ptr && val.IsNil() { + w.Write([]byte("")) + return + } + v := reflect.Indirect(val) + switch v.Kind() { + case reflect.String: + fmt.Fprintf(w, `"%s"`, v) + case reflect.Slice: + w.Write([]byte{'['}) + for i := 0; i < v.Len(); i++ { + if i > 0 { + w.Write([]byte{' '}) + } + stringifyValue(w, v.Index(i)) + } + w.Write([]byte{']'}) + return + case reflect.Struct: + if v.Type().Name() != "" { + w.Write([]byte(v.Type().String())) + } + w.Write([]byte{'{'}) + var sep bool + for i := 0; i < v.NumField(); i++ { + fv := v.Field(i) + if fv.Kind() == reflect.Ptr && fv.IsNil() { + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + continue + } + if sep { + w.Write([]byte(", ")) + } else { + sep = true + } + w.Write([]byte(v.Type().Field(i).Name)) + w.Write([]byte{':'}) + stringifyValue(w, fv) + } + w.Write([]byte{'}'}) + default: + if v.CanInterface() { + fmt.Fprint(w, v.Interface()) + } + } +} diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates/LICENSE b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates/LICENSE new file mode 100644 index 000000000000..de9c88cb65cb --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates/LICENSE @@ -0,0 +1,18 @@ +Copyright (c) 2013 Joshua Tacoma + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates/uritemplates.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates/uritemplates.go new file mode 100644 index 000000000000..6f2e00ab20dc --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates/uritemplates.go @@ -0,0 +1,360 @@ +// Copyright 2013 Joshua Tacoma. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uritemplates is a level 4 implementation of RFC 6570 (URI +// Template, http://tools.ietf.org/html/rfc6570). +// +// To use uritemplates, parse a template string and expand it with a value +// map: +// +// template, _ := uritemplates.Parse("https://api.github.com/repos{/user,repo}") +// values := make(map[string]interface{}) +// values["user"] = "jtacoma" +// values["repo"] = "uritemplates" +// expanded, _ := template.ExpandString(values) +// fmt.Printf(expanded) +package uritemplates + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]") + reserved = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]") + validname = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$") + hex = []byte("0123456789ABCDEF") +) + +func pctEncode(src []byte) []byte { + dst := make([]byte, len(src)*3) + for i, b := range src { + buf := dst[i*3 : i*3+3] + buf[0] = 0x25 + buf[1] = hex[b/16] + buf[2] = hex[b%16] + } + return dst +} + +func escape(s string, allowReserved bool) (escaped string) { + if allowReserved { + escaped = string(reserved.ReplaceAllFunc([]byte(s), pctEncode)) + } else { + escaped = string(unreserved.ReplaceAllFunc([]byte(s), pctEncode)) + } + return escaped +} + +type Values map[string]interface{} + +// A Template is a parsed representation of a URI template. +type Template struct { + raw string + parts []templatePart +} + +// Parse parses a URI template string into a Template object. +func Parse(rawtemplate string) (template *Template, err error) { + template = new(Template) + template.raw = rawtemplate + split := strings.Split(rawtemplate, "{") + template.parts = make([]templatePart, len(split)*2-1) + for i, s := range split { + if i == 0 { + if strings.Contains(s, "}") { + err = errors.New("unexpected }") + break + } + template.parts[i].raw = s + } else { + subsplit := strings.Split(s, "}") + if len(subsplit) != 2 { + err = errors.New("malformed template") + break + } + expression := subsplit[0] + template.parts[i*2-1], err = parseExpression(expression) + if err != nil { + break + } + template.parts[i*2].raw = subsplit[1] + } + } + if err != nil { + template = nil + } + return template, err +} + +type templatePart struct { + raw string + terms []templateTerm + first string + sep string + named bool + ifemp string + allowReserved bool +} + +type templateTerm struct { + name string + explode bool + truncate int +} + +func parseExpression(expression string) (result templatePart, err error) { + switch expression[0] { + case '+': + result.sep = "," + result.allowReserved = true + expression = expression[1:] + case '.': + result.first = "." + result.sep = "." + expression = expression[1:] + case '/': + result.first = "/" + result.sep = "/" + expression = expression[1:] + case ';': + result.first = ";" + result.sep = ";" + result.named = true + expression = expression[1:] + case '?': + result.first = "?" + result.sep = "&" + result.named = true + result.ifemp = "=" + expression = expression[1:] + case '&': + result.first = "&" + result.sep = "&" + result.named = true + result.ifemp = "=" + expression = expression[1:] + case '#': + result.first = "#" + result.sep = "," + result.allowReserved = true + expression = expression[1:] + default: + result.sep = "," + } + rawterms := strings.Split(expression, ",") + result.terms = make([]templateTerm, len(rawterms)) + for i, raw := range rawterms { + result.terms[i], err = parseTerm(raw) + if err != nil { + break + } + } + return result, err +} + +func parseTerm(term string) (result templateTerm, err error) { + if strings.HasSuffix(term, "*") { + result.explode = true + term = term[:len(term)-1] + } + split := strings.Split(term, ":") + if len(split) == 1 { + result.name = term + } else if len(split) == 2 { + result.name = split[0] + var parsed int64 + parsed, err = strconv.ParseInt(split[1], 10, 0) + result.truncate = int(parsed) + } else { + err = errors.New("multiple colons in same term") + } + if !validname.MatchString(result.name) { + err = errors.New("not a valid name: " + result.name) + } + if result.explode && result.truncate > 0 { + err = errors.New("both explode and prefix modifers on same term") + } + return result, err +} + +// Expand expands a URI template with a set of values to produce a string. +func (self *Template) Expand(value interface{}) (string, error) { + values, ismap := value.(Values) + if !ismap { + if m, ismap := struct2map(value); !ismap { + return "", errors.New("expected Values, struct, or pointer to struct") + } else { + return self.Expand(m) + } + } + var buf bytes.Buffer + for _, p := range self.parts { + err := p.expand(&buf, values) + if err != nil { + return "", err + } + } + return buf.String(), nil +} + +func (self *templatePart) expand(buf *bytes.Buffer, values Values) error { + if len(self.raw) > 0 { + buf.WriteString(self.raw) + return nil + } + var zeroLen = buf.Len() + buf.WriteString(self.first) + var firstLen = buf.Len() + for _, term := range self.terms { + value, exists := values[term.name] + if !exists { + continue + } + if buf.Len() != firstLen { + buf.WriteString(self.sep) + } + switch v := value.(type) { + case string: + self.expandString(buf, term, v) + case []interface{}: + self.expandArray(buf, term, v) + case map[string]interface{}: + if term.truncate > 0 { + return errors.New("cannot truncate a map expansion") + } + self.expandMap(buf, term, v) + default: + if m, ismap := struct2map(value); ismap { + if term.truncate > 0 { + return errors.New("cannot truncate a map expansion") + } + self.expandMap(buf, term, m) + } else { + str := fmt.Sprintf("%v", value) + self.expandString(buf, term, str) + } + } + } + if buf.Len() == firstLen { + original := buf.Bytes()[:zeroLen] + buf.Reset() + buf.Write(original) + } + return nil +} + +func (self *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) { + if self.named { + buf.WriteString(name) + if empty { + buf.WriteString(self.ifemp) + } else { + buf.WriteString("=") + } + } +} + +func (self *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) { + if len(s) > t.truncate && t.truncate > 0 { + s = s[:t.truncate] + } + self.expandName(buf, t.name, len(s) == 0) + buf.WriteString(escape(s, self.allowReserved)) +} + +func (self *templatePart) expandArray(buf *bytes.Buffer, t templateTerm, a []interface{}) { + if len(a) == 0 { + return + } else if !t.explode { + self.expandName(buf, t.name, false) + } + for i, value := range a { + if t.explode && i > 0 { + buf.WriteString(self.sep) + } else if i > 0 { + buf.WriteString(",") + } + var s string + switch v := value.(type) { + case string: + s = v + default: + s = fmt.Sprintf("%v", v) + } + if len(s) > t.truncate && t.truncate > 0 { + s = s[:t.truncate] + } + if self.named && t.explode { + self.expandName(buf, t.name, len(s) == 0) + } + buf.WriteString(escape(s, self.allowReserved)) + } +} + +func (self *templatePart) expandMap(buf *bytes.Buffer, t templateTerm, m map[string]interface{}) { + if len(m) == 0 { + return + } + if !t.explode { + self.expandName(buf, t.name, len(m) == 0) + } + var firstLen = buf.Len() + for k, value := range m { + if firstLen != buf.Len() { + if t.explode { + buf.WriteString(self.sep) + } else { + buf.WriteString(",") + } + } + var s string + switch v := value.(type) { + case string: + s = v + default: + s = fmt.Sprintf("%v", v) + } + if t.explode { + buf.WriteString(escape(k, self.allowReserved)) + buf.WriteRune('=') + buf.WriteString(escape(s, self.allowReserved)) + } else { + buf.WriteString(escape(k, self.allowReserved)) + buf.WriteRune(',') + buf.WriteString(escape(s, self.allowReserved)) + } + } +} + +func struct2map(v interface{}) (map[string]interface{}, bool) { + value := reflect.ValueOf(v) + switch value.Type().Kind() { + case reflect.Ptr: + return struct2map(value.Elem().Interface()) + case reflect.Struct: + m := make(map[string]interface{}) + for i := 0; i < value.NumField(); i++ { + tag := value.Type().Field(i).Tag + var name string + if strings.Contains(string(tag), ":") { + name = tag.Get("uri") + } else { + name = strings.TrimSpace(string(tag)) + } + if len(name) == 0 { + name = value.Type().Field(i).Name + } + m[name] = value.Field(i).Interface() + } + return m, true + } + return nil, false +} diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates/utils.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates/utils.go new file mode 100644 index 000000000000..bde51c857bf6 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates/utils.go @@ -0,0 +1,9 @@ +package uritemplates + +func Expand(path string, values Values) (string, error) { + template, err := Parse(path) + if err != nil { + return "", err + } + return template.Expand(values) +} diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/useragent/useragent.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/useragent/useragent.go new file mode 100644 index 000000000000..7fd9561dfe6d --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/useragent/useragent.go @@ -0,0 +1,50 @@ +package useragent + +import ( + "fmt" + "strings" +) + +// UserAgent represents a User-Agent header. +type UserAgent struct { + // Product identifier; its name or development codename. + Product string `json:"product"` + // Version number of the product. + Version string `json:"version"` + // Zero or more comments containing more details. + Comment []string `json:"comment"` +} + +// UserAgents represents one or more UserAgents. +type UserAgents []UserAgent + +// New returns a UserAgent. +func New(product, version string, comment ...string) UserAgent { + return UserAgent{ + Product: product, + Version: version, + Comment: comment, + } +} + +// String returns the string representation of UserAgent. +func (ua UserAgent) String() string { + s := fmt.Sprintf("%s/%s", ua.Product, ua.Version) + + if len(ua.Comment) > 0 { + s += fmt.Sprintf(" (%s)", strings.Join(ua.Comment, "; ")) + } + + return s +} + +// String concatenates all the user-defined UserAgents. +func (uas UserAgents) String() string { + ss := make([]string, len(uas)) + + for i, ua := range uas { + ss[i] = ua.String() + } + + return strings.Join(ss, " ") +} diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/version.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/version.go new file mode 100644 index 000000000000..166f3f233ac0 --- /dev/null +++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/version.go @@ -0,0 +1,7 @@ +package spotinst + +// SDKVersion is the current version of the SDK. +const SDKVersion = "1.357.0" + +// SDKName is the name of the SDK. +const SDKName = "spotinst-sdk-go" diff --git a/cluster-autoscaler/vendor/github.com/stretchr/objx/README.md b/cluster-autoscaler/vendor/github.com/stretchr/objx/README.md index 246660b21a9f..78dc1f8b03ed 100644 --- a/cluster-autoscaler/vendor/github.com/stretchr/objx/README.md +++ b/cluster-autoscaler/vendor/github.com/stretchr/objx/README.md @@ -4,20 +4,20 @@ [![Maintainability](https://api.codeclimate.com/v1/badges/1d64bc6c8474c2074f2b/maintainability)](https://codeclimate.com/github/stretchr/objx/maintainability) [![Test Coverage](https://api.codeclimate.com/v1/badges/1d64bc6c8474c2074f2b/test_coverage)](https://codeclimate.com/github/stretchr/objx/test_coverage) [![Sourcegraph](https://sourcegraph.com/github.com/stretchr/objx/-/badge.svg)](https://sourcegraph.com/github.com/stretchr/objx) -[![GoDoc](https://godoc.org/github.com/stretchr/objx?status.svg)](https://godoc.org/github.com/stretchr/objx) +[![GoDoc](https://pkg.go.dev/badge/github.com/stretchr/objx?utm_source=godoc)](https://pkg.go.dev/github.com/stretchr/objx) Objx - Go package for dealing with maps, slices, JSON and other data. Get started: - Install Objx with [one line of code](#installation), or [update it with another](#staying-up-to-date) -- Check out the API Documentation http://godoc.org/github.com/stretchr/objx +- Check out the API Documentation http://pkg.go.dev/github.com/stretchr/objx ## Overview Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes a powerful `Get` method (among others) that allows you to easily and quickly get access to data within the map, without having to worry too much about type assertions, missing data, default values etc. ### Pattern -Objx uses a preditable pattern to make access data from within `map[string]interface{}` easy. Call one of the `objx.` functions to create your `objx.Map` to get going: +Objx uses a predictable pattern to make access data from within `map[string]interface{}` easy. Call one of the `objx.` functions to create your `objx.Map` to get going: m, err := objx.FromJSON(json) @@ -74,7 +74,7 @@ To update Objx to the latest version, run: go get -u github.com/stretchr/objx ### Supported go versions -We support the lastest three major Go versions, which are 1.10, 1.11 and 1.12 at the moment. +We currently support the three recent major Go versions. ## Contributing Please feel free to submit issues, fork the repository and send pull requests! diff --git a/cluster-autoscaler/vendor/github.com/stretchr/objx/Taskfile.yml b/cluster-autoscaler/vendor/github.com/stretchr/objx/Taskfile.yml index 7746f516da20..8a79e8d674c0 100644 --- a/cluster-autoscaler/vendor/github.com/stretchr/objx/Taskfile.yml +++ b/cluster-autoscaler/vendor/github.com/stretchr/objx/Taskfile.yml @@ -1,7 +1,4 @@ -version: '2' - -env: - GOFLAGS: -mod=vendor +version: '3' tasks: default: diff --git a/cluster-autoscaler/vendor/github.com/stretchr/objx/accessors.go b/cluster-autoscaler/vendor/github.com/stretchr/objx/accessors.go index 4c6045588637..72f1d1c1ce3d 100644 --- a/cluster-autoscaler/vendor/github.com/stretchr/objx/accessors.go +++ b/cluster-autoscaler/vendor/github.com/stretchr/objx/accessors.go @@ -14,17 +14,17 @@ const ( // For example, `location.address.city` PathSeparator string = "." - // arrayAccesRegexString is the regex used to extract the array number + // arrayAccessRegexString is the regex used to extract the array number // from the access path - arrayAccesRegexString = `^(.+)\[([0-9]+)\]$` + arrayAccessRegexString = `^(.+)\[([0-9]+)\]$` // mapAccessRegexString is the regex used to extract the map key // from the access path mapAccessRegexString = `^([^\[]*)\[([^\]]+)\](.*)$` ) -// arrayAccesRegex is the compiled arrayAccesRegexString -var arrayAccesRegex = regexp.MustCompile(arrayAccesRegexString) +// arrayAccessRegex is the compiled arrayAccessRegexString +var arrayAccessRegex = regexp.MustCompile(arrayAccessRegexString) // mapAccessRegex is the compiled mapAccessRegexString var mapAccessRegex = regexp.MustCompile(mapAccessRegexString) @@ -37,11 +37,11 @@ var mapAccessRegex = regexp.MustCompile(mapAccessRegexString) // // Get can only operate directly on map[string]interface{} and []interface. // -// Example +// # Example // // To access the title of the third chapter of the second book, do: // -// o.Get("books[1].chapters[2].title") +// o.Get("books[1].chapters[2].title") func (m Map) Get(selector string) *Value { rawObj := access(m, selector, nil, false) return &Value{data: rawObj} @@ -52,26 +52,26 @@ func (m Map) Get(selector string) *Value { // // Set can only operate directly on map[string]interface{} and []interface // -// Example +// # Example // // To set the title of the third chapter of the second book, do: // -// o.Set("books[1].chapters[2].title","Time to Go") +// o.Set("books[1].chapters[2].title","Time to Go") func (m Map) Set(selector string, value interface{}) Map { access(m, selector, value, true) return m } -// getIndex returns the index, which is hold in s by two braches. -// It also returns s withour the index part, e.g. name[1] will return (1, name). +// getIndex returns the index, which is hold in s by two branches. +// It also returns s without the index part, e.g. name[1] will return (1, name). // If no index is found, -1 is returned func getIndex(s string) (int, string) { - arrayMatches := arrayAccesRegex.FindStringSubmatch(s) + arrayMatches := arrayAccessRegex.FindStringSubmatch(s) if len(arrayMatches) > 0 { // Get the key into the map selector := arrayMatches[1] // Get the index into the array at the key - // We know this cannt fail because arrayMatches[2] is an int for sure + // We know this can't fail because arrayMatches[2] is an int for sure index, _ := strconv.Atoi(arrayMatches[2]) return index, selector } diff --git a/cluster-autoscaler/vendor/github.com/stretchr/objx/conversions.go b/cluster-autoscaler/vendor/github.com/stretchr/objx/conversions.go index 080aa46e4723..01c63d7d3bbf 100644 --- a/cluster-autoscaler/vendor/github.com/stretchr/objx/conversions.go +++ b/cluster-autoscaler/vendor/github.com/stretchr/objx/conversions.go @@ -15,7 +15,7 @@ import ( const SignatureSeparator = "_" // URLValuesSliceKeySuffix is the character that is used to -// specify a suffic for slices parsed by URLValues. +// specify a suffix for slices parsed by URLValues. // If the suffix is set to "[i]", then the index of the slice // is used in place of i // Ex: Suffix "[]" would have the form a[]=b&a[]=c @@ -30,7 +30,7 @@ const ( ) // SetURLValuesSliceKeySuffix sets the character that is used to -// specify a suffic for slices parsed by URLValues. +// specify a suffix for slices parsed by URLValues. // If the suffix is set to "[i]", then the index of the slice // is used in place of i // Ex: Suffix "[]" would have the form a[]=b&a[]=c diff --git a/cluster-autoscaler/vendor/github.com/stretchr/objx/doc.go b/cluster-autoscaler/vendor/github.com/stretchr/objx/doc.go index 6d6af1a83abf..b170af74b39a 100644 --- a/cluster-autoscaler/vendor/github.com/stretchr/objx/doc.go +++ b/cluster-autoscaler/vendor/github.com/stretchr/objx/doc.go @@ -1,19 +1,19 @@ /* -Objx - Go package for dealing with maps, slices, JSON and other data. +Package objx provides utilities for dealing with maps, slices, JSON and other data. -Overview +# Overview Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes a powerful `Get` method (among others) that allows you to easily and quickly get access to data within the map, without having to worry too much about type assertions, missing data, default values etc. -Pattern +# Pattern -Objx uses a preditable pattern to make access data from within `map[string]interface{}` easy. +Objx uses a predictable pattern to make access data from within `map[string]interface{}` easy. Call one of the `objx.` functions to create your `objx.Map` to get going: - m, err := objx.FromJSON(json) + m, err := objx.FromJSON(json) NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, the rest will be optimistic and try to figure things out without panicking. @@ -21,46 +21,46 @@ the rest will be optimistic and try to figure things out without panicking. Use `Get` to access the value you're interested in. You can use dot and array notation too: - m.Get("places[0].latlng") + m.Get("places[0].latlng") Once you have sought the `Value` you're interested in, you can use the `Is*` methods to determine its type. - if m.Get("code").IsStr() { // Your code... } + if m.Get("code").IsStr() { // Your code... } Or you can just assume the type, and use one of the strong type methods to extract the real value: - m.Get("code").Int() + m.Get("code").Int() If there's no value there (or if it's the wrong type) then a default value will be returned, or you can be explicit about the default value. - Get("code").Int(-1) + Get("code").Int(-1) If you're dealing with a slice of data as a value, Objx provides many useful methods for iterating, manipulating and selecting that data. You can find out more by exploring the index below. -Reading data +# Reading data A simple example of how to use Objx: - // Use MustFromJSON to make an objx.Map from some JSON - m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) + // Use MustFromJSON to make an objx.Map from some JSON + m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) - // Get the details - name := m.Get("name").Str() - age := m.Get("age").Int() + // Get the details + name := m.Get("name").Str() + age := m.Get("age").Int() - // Get their nickname (or use their name if they don't have one) - nickname := m.Get("nickname").Str(name) + // Get their nickname (or use their name if they don't have one) + nickname := m.Get("nickname").Str(name) -Ranging +# Ranging Since `objx.Map` is a `map[string]interface{}` you can treat it as such. For example, to `range` the data, do what you would expect: - m := objx.MustFromJSON(json) - for key, value := range m { - // Your code... - } + m := objx.MustFromJSON(json) + for key, value := range m { + // Your code... + } */ package objx diff --git a/cluster-autoscaler/vendor/github.com/stretchr/objx/map.go b/cluster-autoscaler/vendor/github.com/stretchr/objx/map.go index a64712a08b50..ab9f9ae67cbf 100644 --- a/cluster-autoscaler/vendor/github.com/stretchr/objx/map.go +++ b/cluster-autoscaler/vendor/github.com/stretchr/objx/map.go @@ -47,17 +47,16 @@ func New(data interface{}) Map { // // The arguments follow a key, value pattern. // -// // Returns nil if any key argument is non-string or if there are an odd number of arguments. // -// Example +// # Example // // To easily create Maps: // -// m := objx.MSI("name", "Mat", "age", 29, "subobj", objx.MSI("active", true)) +// m := objx.MSI("name", "Mat", "age", 29, "subobj", objx.MSI("active", true)) // -// // creates an Map equivalent to -// m := objx.Map{"name": "Mat", "age": 29, "subobj": objx.Map{"active": true}} +// // creates an Map equivalent to +// m := objx.Map{"name": "Mat", "age": 29, "subobj": objx.Map{"active": true}} func MSI(keyAndValuePairs ...interface{}) Map { newMap := Map{} keyAndValuePairsLen := len(keyAndValuePairs) diff --git a/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_compare.go index b774da88d86c..4d4b4aad6fe8 100644 --- a/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -28,6 +28,8 @@ var ( uint32Type = reflect.TypeOf(uint32(1)) uint64Type = reflect.TypeOf(uint64(1)) + uintptrType = reflect.TypeOf(uintptr(1)) + float32Type = reflect.TypeOf(float32(1)) float64Type = reflect.TypeOf(float64(1)) @@ -308,11 +310,11 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { case reflect.Struct: { // All structs enter here. We're not interested in most types. - if !canConvert(obj1Value, timeType) { + if !obj1Value.CanConvert(timeType) { break } - // time.Time can compared! + // time.Time can be compared! timeObj1, ok := obj1.(time.Time) if !ok { timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time) @@ -328,7 +330,7 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { case reflect.Slice: { // We only care about the []byte type. - if !canConvert(obj1Value, bytesType) { + if !obj1Value.CanConvert(bytesType) { break } @@ -345,6 +347,26 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true } + case reflect.Uintptr: + { + uintptrObj1, ok := obj1.(uintptr) + if !ok { + uintptrObj1 = obj1Value.Convert(uintptrType).Interface().(uintptr) + } + uintptrObj2, ok := obj2.(uintptr) + if !ok { + uintptrObj2 = obj2Value.Convert(uintptrType).Interface().(uintptr) + } + if uintptrObj1 > uintptrObj2 { + return compareGreater, true + } + if uintptrObj1 == uintptrObj2 { + return compareEqual, true + } + if uintptrObj1 < uintptrObj2 { + return compareLess, true + } + } } return compareEqual, false diff --git a/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go b/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go deleted file mode 100644 index da867903e2fa..000000000000 --- a/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build go1.17 -// +build go1.17 - -// TODO: once support for Go 1.16 is dropped, this file can be -// merged/removed with assertion_compare_go1.17_test.go and -// assertion_compare_legacy.go - -package assert - -import "reflect" - -// Wrapper around reflect.Value.CanConvert, for compatibility -// reasons. -func canConvert(value reflect.Value, to reflect.Type) bool { - return value.CanConvert(to) -} diff --git a/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go b/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go deleted file mode 100644 index 1701af2a3c89..000000000000 --- a/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build !go1.17 -// +build !go1.17 - -// TODO: once support for Go 1.16 is dropped, this file can be -// merged/removed with assertion_compare_go1.17_test.go and -// assertion_compare_can_convert.go - -package assert - -import "reflect" - -// Older versions of Go does not have the reflect.Value.CanConvert -// method. -func canConvert(value reflect.Value, to reflect.Type) bool { - return false -} diff --git a/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_format.go b/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_format.go index 84dbd6c790b9..3ddab109ad9e 100644 --- a/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -1,7 +1,4 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ +// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT. package assert @@ -107,7 +104,7 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...) } -// EqualValuesf asserts that two objects are equal or convertable to the same types +// EqualValuesf asserts that two objects are equal or convertible to the same types // and equal. // // assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") @@ -616,6 +613,16 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf return NotErrorIs(t, err, target, append([]interface{}{msg}, args...)...) } +// NotImplementsf asserts that an object does not implement the specified interface. +// +// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotImplements(t, interfaceObject, object, append([]interface{}{msg}, args...)...) +} + // NotNilf asserts that the specified object is not nil. // // assert.NotNilf(t, err, "error message %s", "formatted") @@ -660,10 +667,12 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...) } -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubsetf asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") +// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -747,10 +756,11 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg return Same(t, expected, actual, append([]interface{}{msg}, args...)...) } -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subsetf asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") +// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_forward.go index b1d94aec53cc..a84e09bd4090 100644 --- a/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -1,7 +1,4 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ +// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT. package assert @@ -189,7 +186,7 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface return EqualExportedValuesf(a.t, expected, actual, msg, args...) } -// EqualValues asserts that two objects are equal or convertable to the same types +// EqualValues asserts that two objects are equal or convertible to the same types // and equal. // // a.EqualValues(uint32(123), int32(123)) @@ -200,7 +197,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn return EqualValues(a.t, expected, actual, msgAndArgs...) } -// EqualValuesf asserts that two objects are equal or convertable to the same types +// EqualValuesf asserts that two objects are equal or convertible to the same types // and equal. // // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") @@ -1221,6 +1218,26 @@ func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...in return NotErrorIsf(a.t, err, target, msg, args...) } +// NotImplements asserts that an object does not implement the specified interface. +// +// a.NotImplements((*MyInterface)(nil), new(MyObject)) +func (a *Assertions) NotImplements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotImplements(a.t, interfaceObject, object, msgAndArgs...) +} + +// NotImplementsf asserts that an object does not implement the specified interface. +// +// a.NotImplementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +func (a *Assertions) NotImplementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotImplementsf(a.t, interfaceObject, object, msg, args...) +} + // NotNil asserts that the specified object is not nil. // // a.NotNil(err) @@ -1309,10 +1326,12 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri return NotSamef(a.t, expected, actual, msg, args...) } -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubset asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// a.NotSubset([1, 3, 4], [1, 2]) +// a.NotSubset({"x": 1, "y": 2}, {"z": 3}) func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1320,10 +1339,12 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs return NotSubset(a.t, list, subset, msgAndArgs...) } -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubsetf asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") +// a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1483,10 +1504,11 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, return Samef(a.t, expected, actual, msg, args...) } -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subset asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// a.Subset([1, 2, 3], [1, 2]) +// a.Subset({"x": 1, "y": 2}, {"x": 1}) func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1494,10 +1516,11 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... return Subset(a.t, list, subset, msgAndArgs...) } -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subsetf asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") +// a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() diff --git a/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertions.go b/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertions.go index a55d1bba926c..0b7570f21c63 100644 --- a/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/assertions.go @@ -19,7 +19,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/pmezard/go-difflib/difflib" - yaml "gopkg.in/yaml.v3" + "gopkg.in/yaml.v3" ) //go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl" @@ -110,7 +110,12 @@ func copyExportedFields(expected interface{}) interface{} { return result.Interface() case reflect.Array, reflect.Slice: - result := reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len()) + var result reflect.Value + if expectedKind == reflect.Array { + result = reflect.New(reflect.ArrayOf(expectedValue.Len(), expectedType.Elem())).Elem() + } else { + result = reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len()) + } for i := 0; i < expectedValue.Len(); i++ { index := expectedValue.Index(i) if isNil(index) { @@ -140,6 +145,8 @@ func copyExportedFields(expected interface{}) interface{} { // structures. // // This function does no assertion of any kind. +// +// Deprecated: Use [EqualExportedValues] instead. func ObjectsExportedFieldsAreEqual(expected, actual interface{}) bool { expectedCleaned := copyExportedFields(expected) actualCleaned := copyExportedFields(actual) @@ -153,17 +160,40 @@ func ObjectsAreEqualValues(expected, actual interface{}) bool { return true } - actualType := reflect.TypeOf(actual) - if actualType == nil { + expectedValue := reflect.ValueOf(expected) + actualValue := reflect.ValueOf(actual) + if !expectedValue.IsValid() || !actualValue.IsValid() { return false } - expectedValue := reflect.ValueOf(expected) - if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { + + expectedType := expectedValue.Type() + actualType := actualValue.Type() + if !expectedType.ConvertibleTo(actualType) { + return false + } + + if !isNumericType(expectedType) || !isNumericType(actualType) { // Attempt comparison after type conversion - return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual) + return reflect.DeepEqual( + expectedValue.Convert(actualType).Interface(), actual, + ) } - return false + // If BOTH values are numeric, there are chances of false positives due + // to overflow or underflow. So, we need to make sure to always convert + // the smaller type to a larger type before comparing. + if expectedType.Size() >= actualType.Size() { + return actualValue.Convert(expectedType).Interface() == expected + } + + return expectedValue.Convert(actualType).Interface() == actual +} + +// isNumericType returns true if the type is one of: +// int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, +// float32, float64, complex64, complex128 +func isNumericType(t reflect.Type) bool { + return t.Kind() >= reflect.Int && t.Kind() <= reflect.Complex128 } /* CallerInfo is necessary because the assert functions use the testing object @@ -266,7 +296,7 @@ func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { // Aligns the provided message so that all lines after the first line start at the same location as the first line. // Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab). -// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the +// The longestLabelLen parameter specifies the length of the longest label in the output (required because this is the // basis on which the alignment occurs). func indentMessageLines(message string, longestLabelLen int) string { outBuf := new(bytes.Buffer) @@ -382,6 +412,25 @@ func Implements(t TestingT, interfaceObject interface{}, object interface{}, msg return true } +// NotImplements asserts that an object does not implement the specified interface. +// +// assert.NotImplements(t, (*MyInterface)(nil), new(MyObject)) +func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + interfaceType := reflect.TypeOf(interfaceObject).Elem() + + if object == nil { + return Fail(t, fmt.Sprintf("Cannot check if nil does not implement %v", interfaceType), msgAndArgs...) + } + if reflect.TypeOf(object).Implements(interfaceType) { + return Fail(t, fmt.Sprintf("%T implements %v", object, interfaceType), msgAndArgs...) + } + + return true +} + // IsType asserts that the specified objects are of the same type. func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -496,7 +545,7 @@ func samePointers(first, second interface{}) bool { // representations appropriate to be presented to the user. // // If the values are not of like type, the returned strings will be prefixed -// with the type name, and the value will be enclosed in parenthesis similar +// with the type name, and the value will be enclosed in parentheses similar // to a type conversion in the Go grammar. func formatUnequalValues(expected, actual interface{}) (e string, a string) { if reflect.TypeOf(expected) != reflect.TypeOf(actual) { @@ -523,7 +572,7 @@ func truncatingFormat(data interface{}) string { return value } -// EqualValues asserts that two objects are equal or convertable to the same types +// EqualValues asserts that two objects are equal or convertible to the same types // and equal. // // assert.EqualValues(t, uint32(123), int32(123)) @@ -566,12 +615,19 @@ func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs .. return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) } + if aType.Kind() == reflect.Ptr { + aType = aType.Elem() + } + if bType.Kind() == reflect.Ptr { + bType = bType.Elem() + } + if aType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...) + return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...) } if bType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...) + return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...) } expected = copyExportedFields(expected) @@ -620,17 +676,6 @@ func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { return Fail(t, "Expected value not to be nil.", msgAndArgs...) } -// containsKind checks if a specified kind in the slice of kinds. -func containsKind(kinds []reflect.Kind, kind reflect.Kind) bool { - for i := 0; i < len(kinds); i++ { - if kind == kinds[i] { - return true - } - } - - return false -} - // isNil checks if a specified object is nil or not, without Failing. func isNil(object interface{}) bool { if object == nil { @@ -638,16 +683,13 @@ func isNil(object interface{}) bool { } value := reflect.ValueOf(object) - kind := value.Kind() - isNilableKind := containsKind( - []reflect.Kind{ - reflect.Chan, reflect.Func, - reflect.Interface, reflect.Map, - reflect.Ptr, reflect.Slice, reflect.UnsafePointer}, - kind) - - if isNilableKind && value.IsNil() { - return true + switch value.Kind() { + case + reflect.Chan, reflect.Func, + reflect.Interface, reflect.Map, + reflect.Ptr, reflect.Slice, reflect.UnsafePointer: + + return value.IsNil() } return false @@ -731,16 +773,14 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { } -// getLen try to get length of object. -// return (false, 0) if impossible. -func getLen(x interface{}) (ok bool, length int) { +// getLen tries to get the length of an object. +// It returns (0, false) if impossible. +func getLen(x interface{}) (length int, ok bool) { v := reflect.ValueOf(x) defer func() { - if e := recover(); e != nil { - ok = false - } + ok = recover() == nil }() - return true, v.Len() + return v.Len(), true } // Len asserts that the specified object has specific length. @@ -751,13 +791,13 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) if h, ok := t.(tHelper); ok { h.Helper() } - ok, l := getLen(object) + l, ok := getLen(object) if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...) + return Fail(t, fmt.Sprintf("\"%v\" could not be applied builtin len()", object), msgAndArgs...) } if l != length { - return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) + return Fail(t, fmt.Sprintf("\"%v\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) } return true } @@ -919,10 +959,11 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) } -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subset asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// assert.Subset(t, [1, 2, 3], [1, 2]) +// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1}) func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -975,10 +1016,12 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok return true } -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubset asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// assert.NotSubset(t, [1, 3, 4], [1, 2]) +// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1439,7 +1482,7 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd h.Helper() } if math.IsNaN(epsilon) { - return Fail(t, "epsilon must not be NaN") + return Fail(t, "epsilon must not be NaN", msgAndArgs...) } actualEpsilon, err := calcRelativeError(expected, actual) if err != nil { @@ -1458,19 +1501,26 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m if h, ok := t.(tHelper); ok { h.Helper() } - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Slice || - reflect.TypeOf(expected).Kind() != reflect.Slice { + + if expected == nil || actual == nil { return Fail(t, "Parameters must be slice", msgAndArgs...) } - actualSlice := reflect.ValueOf(actual) expectedSlice := reflect.ValueOf(expected) + actualSlice := reflect.ValueOf(actual) - for i := 0; i < actualSlice.Len(); i++ { - result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon) - if !result { - return result + if expectedSlice.Type().Kind() != reflect.Slice { + return Fail(t, "Expected value must be slice", msgAndArgs...) + } + + expectedLen := expectedSlice.Len() + if !IsType(t, expected, actual) || !Len(t, actual, expectedLen) { + return false + } + + for i := 0; i < expectedLen; i++ { + if !InEpsilon(t, expectedSlice.Index(i).Interface(), actualSlice.Index(i).Interface(), epsilon, "at index %d", i) { + return false } } @@ -1870,23 +1920,18 @@ func (c *CollectT) Errorf(format string, args ...interface{}) { } // FailNow panics. -func (c *CollectT) FailNow() { +func (*CollectT) FailNow() { panic("Assertion failed") } -// Reset clears the collected errors. -func (c *CollectT) Reset() { - c.errors = nil +// Deprecated: That was a method for internal usage that should not have been published. Now just panics. +func (*CollectT) Reset() { + panic("Reset() is deprecated") } -// Copy copies the collected errors to the supplied t. -func (c *CollectT) Copy(t TestingT) { - if tt, ok := t.(tHelper); ok { - tt.Helper() - } - for _, err := range c.errors { - t.Errorf("%v", err) - } +// Deprecated: That was a method for internal usage that should not have been published. Now just panics. +func (*CollectT) Copy(TestingT) { + panic("Copy() is deprecated") } // EventuallyWithT asserts that given condition will be met in waitFor time, @@ -1912,8 +1957,8 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time h.Helper() } - collect := new(CollectT) - ch := make(chan bool, 1) + var lastFinishedTickErrs []error + ch := make(chan []error, 1) timer := time.NewTimer(waitFor) defer timer.Stop() @@ -1924,19 +1969,25 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time for tick := ticker.C; ; { select { case <-timer.C: - collect.Copy(t) + for _, err := range lastFinishedTickErrs { + t.Errorf("%v", err) + } return Fail(t, "Condition never satisfied", msgAndArgs...) case <-tick: tick = nil - collect.Reset() go func() { + collect := new(CollectT) + defer func() { + ch <- collect.errors + }() condition(collect) - ch <- len(collect.errors) == 0 }() - case v := <-ch: - if v { + case errs := <-ch: + if len(errs) == 0 { return true } + // Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached. + lastFinishedTickErrs = errs tick = ticker.C } } diff --git a/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/http_assertions.go b/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/http_assertions.go index d8038c28a758..861ed4b7ced0 100644 --- a/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ b/cluster-autoscaler/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -12,7 +12,7 @@ import ( // an error if building a new request fails. func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) { w := httptest.NewRecorder() - req, err := http.NewRequest(method, url, nil) + req, err := http.NewRequest(method, url, http.NoBody) if err != nil { return -1, err } @@ -32,12 +32,12 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, value } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent if !isSuccessCode { - Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...) } return isSuccessCode @@ -54,12 +54,12 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, valu } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect if !isRedirectCode { - Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...) } return isRedirectCode @@ -76,12 +76,12 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } isErrorCode := code >= http.StatusBadRequest if !isErrorCode { - Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...) } return isErrorCode @@ -98,12 +98,12 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, va } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } successful := code == statuscode if !successful { - Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code), msgAndArgs...) } return successful @@ -113,7 +113,10 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, va // empty string if building a new request fails. func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string { w := httptest.NewRecorder() - req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) + if len(values) > 0 { + url += "?" + values.Encode() + } + req, err := http.NewRequest(method, url, http.NoBody) if err != nil { return "" } @@ -135,7 +138,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, contains := strings.Contains(body, fmt.Sprint(str)) if !contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) } return contains @@ -155,7 +158,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url strin contains := strings.Contains(body, fmt.Sprint(str)) if contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) } return !contains diff --git a/cluster-autoscaler/vendor/github.com/stretchr/testify/mock/mock.go b/cluster-autoscaler/vendor/github.com/stretchr/testify/mock/mock.go index f4b42e44ffe9..213bde2ea636 100644 --- a/cluster-autoscaler/vendor/github.com/stretchr/testify/mock/mock.go +++ b/cluster-autoscaler/vendor/github.com/stretchr/testify/mock/mock.go @@ -18,6 +18,9 @@ import ( "github.com/stretchr/testify/assert" ) +// regex for GCCGO functions +var gccgoRE = regexp.MustCompile(`\.pN\d+_`) + // TestingT is an interface wrapper around *testing.T type TestingT interface { Logf(format string, args ...interface{}) @@ -111,7 +114,7 @@ func (c *Call) Return(returnArguments ...interface{}) *Call { return c } -// Panic specifies if the functon call should fail and the panic message +// Panic specifies if the function call should fail and the panic message // // Mock.On("DoSomething").Panic("test panic") func (c *Call) Panic(msg string) *Call { @@ -123,21 +126,21 @@ func (c *Call) Panic(msg string) *Call { return c } -// Once indicates that that the mock should only return the value once. +// Once indicates that the mock should only return the value once. // // Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Once() func (c *Call) Once() *Call { return c.Times(1) } -// Twice indicates that that the mock should only return the value twice. +// Twice indicates that the mock should only return the value twice. // // Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Twice() func (c *Call) Twice() *Call { return c.Times(2) } -// Times indicates that that the mock should only return the indicated number +// Times indicates that the mock should only return the indicated number // of times. // // Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Times(5) @@ -455,9 +458,8 @@ func (m *Mock) Called(arguments ...interface{}) Arguments { // For Ex: github_com_docker_libkv_store_mock.WatchTree.pN39_github_com_docker_libkv_store_mock.Mock // uses interface information unlike golang github.com/docker/libkv/store/mock.(*Mock).WatchTree // With GCCGO we need to remove interface information starting from pN
. - re := regexp.MustCompile("\\.pN\\d+_") - if re.MatchString(functionPath) { - functionPath = re.Split(functionPath, -1)[0] + if gccgoRE.MatchString(functionPath) { + functionPath = gccgoRE.Split(functionPath, -1)[0] } parts := strings.Split(functionPath, ".") functionName := parts[len(parts)-1] @@ -474,7 +476,7 @@ func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Argumen found, call := m.findExpectedCall(methodName, arguments...) if found < 0 { - // expected call found but it has already been called with repeatable times + // expected call found, but it has already been called with repeatable times if call != nil { m.mutex.Unlock() m.fail("\nassert: mock: The method has been called over %d times.\n\tEither do one more Mock.On(\"%s\").Return(...), or remove extra call.\n\tThis call was unexpected:\n\t\t%s\n\tat: %s", call.totalCalls, methodName, callString(methodName, arguments, true), assert.CallerInfo()) @@ -563,7 +565,7 @@ func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Argumen Assertions */ -type assertExpectationser interface { +type assertExpectationiser interface { AssertExpectations(TestingT) bool } @@ -580,7 +582,7 @@ func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool { t.Logf("Deprecated mock.AssertExpectationsForObjects(myMock.Mock) use mock.AssertExpectationsForObjects(myMock)") obj = m } - m := obj.(assertExpectationser) + m := obj.(assertExpectationiser) if !m.AssertExpectations(t) { t.Logf("Expectations didn't match for Mock: %+v", reflect.TypeOf(m)) return false @@ -592,6 +594,9 @@ func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool { // AssertExpectations asserts that everything specified with On and Return was // in fact called as expected. Calls may have occurred in any order. func (m *Mock) AssertExpectations(t TestingT) bool { + if s, ok := t.(interface{ Skipped() bool }); ok && s.Skipped() { + return true + } if h, ok := t.(tHelper); ok { h.Helper() } @@ -606,8 +611,8 @@ func (m *Mock) AssertExpectations(t TestingT) bool { satisfied, reason := m.checkExpectation(expectedCall) if !satisfied { failedExpectations++ + t.Logf(reason) } - t.Logf(reason) } if failedExpectations != 0 { @@ -758,25 +763,33 @@ const ( Anything = "mock.Anything" ) -// AnythingOfTypeArgument is a string that contains the type of an argument +// AnythingOfTypeArgument contains the type of an argument +// for use when type checking. Used in Diff and Assert. +// +// Deprecated: this is an implementation detail that must not be used. Use [AnythingOfType] instead. +type AnythingOfTypeArgument = anythingOfTypeArgument + +// anythingOfTypeArgument is a string that contains the type of an argument // for use when type checking. Used in Diff and Assert. -type AnythingOfTypeArgument string +type anythingOfTypeArgument string -// AnythingOfType returns an AnythingOfTypeArgument object containing the -// name of the type to check for. Used in Diff and Assert. +// AnythingOfType returns a special value containing the +// name of the type to check for. The type name will be matched against the type name returned by [reflect.Type.String]. +// +// Used in Diff and Assert. // // For example: // // Assert(t, AnythingOfType("string"), AnythingOfType("int")) func AnythingOfType(t string) AnythingOfTypeArgument { - return AnythingOfTypeArgument(t) + return anythingOfTypeArgument(t) } // IsTypeArgument is a struct that contains the type of an argument // for use when type checking. This is an alternative to AnythingOfType. // Used in Diff and Assert. type IsTypeArgument struct { - t interface{} + t reflect.Type } // IsType returns an IsTypeArgument object containing the type to check for. @@ -786,7 +799,7 @@ type IsTypeArgument struct { // For example: // Assert(t, IsType(""), IsType(0)) func IsType(t interface{}) *IsTypeArgument { - return &IsTypeArgument{t: t} + return &IsTypeArgument{t: reflect.TypeOf(t)} } // FunctionalOptionsArgument is a struct that contains the type and value of an functional option argument @@ -950,53 +963,55 @@ func (args Arguments) Diff(objects []interface{}) (string, int) { differences++ output = fmt.Sprintf("%s\t%d: FAIL: %s not matched by %s\n", output, i, actualFmt, matcher) } - } else if reflect.TypeOf(expected) == reflect.TypeOf((*AnythingOfTypeArgument)(nil)).Elem() { - // type checking - if reflect.TypeOf(actual).Name() != string(expected.(AnythingOfTypeArgument)) && reflect.TypeOf(actual).String() != string(expected.(AnythingOfTypeArgument)) { - // not match - differences++ - output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actualFmt) - } - } else if reflect.TypeOf(expected) == reflect.TypeOf((*IsTypeArgument)(nil)) { - t := expected.(*IsTypeArgument).t - if reflect.TypeOf(t) != reflect.TypeOf(actual) { - differences++ - output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, reflect.TypeOf(t).Name(), reflect.TypeOf(actual).Name(), actualFmt) - } - } else if reflect.TypeOf(expected) == reflect.TypeOf((*FunctionalOptionsArgument)(nil)) { - t := expected.(*FunctionalOptionsArgument).value + } else { + switch expected := expected.(type) { + case anythingOfTypeArgument: + // type checking + if reflect.TypeOf(actual).Name() != string(expected) && reflect.TypeOf(actual).String() != string(expected) { + // not match + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actualFmt) + } + case *IsTypeArgument: + actualT := reflect.TypeOf(actual) + if actualT != expected.t { + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected.t.Name(), actualT.Name(), actualFmt) + } + case *FunctionalOptionsArgument: + t := expected.value - var name string - tValue := reflect.ValueOf(t) - if tValue.Len() > 0 { - name = "[]" + reflect.TypeOf(tValue.Index(0).Interface()).String() - } + var name string + tValue := reflect.ValueOf(t) + if tValue.Len() > 0 { + name = "[]" + reflect.TypeOf(tValue.Index(0).Interface()).String() + } - tName := reflect.TypeOf(t).Name() - if name != reflect.TypeOf(actual).String() && tValue.Len() != 0 { - differences++ - output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, tName, reflect.TypeOf(actual).Name(), actualFmt) - } else { - if ef, af := assertOpts(t, actual); ef == "" && af == "" { + tName := reflect.TypeOf(t).Name() + if name != reflect.TypeOf(actual).String() && tValue.Len() != 0 { + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, tName, reflect.TypeOf(actual).Name(), actualFmt) + } else { + if ef, af := assertOpts(t, actual); ef == "" && af == "" { + // match + output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, tName, tName) + } else { + // not match + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, af, ef) + } + } + + default: + if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) { // match - output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, tName, tName) + output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, actualFmt, expectedFmt) } else { // not match differences++ - output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, af, ef) + output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, actualFmt, expectedFmt) } } - } else { - // normal checking - - if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) { - // match - output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, actualFmt, expectedFmt) - } else { - // not match - differences++ - output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, actualFmt, expectedFmt) - } } } diff --git a/cluster-autoscaler/vendor/github.com/stretchr/testify/require/require.go b/cluster-autoscaler/vendor/github.com/stretchr/testify/require/require.go index 63f852147675..506a82f80777 100644 --- a/cluster-autoscaler/vendor/github.com/stretchr/testify/require/require.go +++ b/cluster-autoscaler/vendor/github.com/stretchr/testify/require/require.go @@ -1,7 +1,4 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ +// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT. package require @@ -235,7 +232,7 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, t.FailNow() } -// EqualValues asserts that two objects are equal or convertable to the same types +// EqualValues asserts that two objects are equal or convertible to the same types // and equal. // // assert.EqualValues(t, uint32(123), int32(123)) @@ -249,7 +246,7 @@ func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArg t.FailNow() } -// EqualValuesf asserts that two objects are equal or convertable to the same types +// EqualValuesf asserts that two objects are equal or convertible to the same types // and equal. // // assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") @@ -1546,6 +1543,32 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf t.FailNow() } +// NotImplements asserts that an object does not implement the specified interface. +// +// assert.NotImplements(t, (*MyInterface)(nil), new(MyObject)) +func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotImplements(t, interfaceObject, object, msgAndArgs...) { + return + } + t.FailNow() +} + +// NotImplementsf asserts that an object does not implement the specified interface. +// +// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotImplementsf(t, interfaceObject, object, msg, args...) { + return + } + t.FailNow() +} + // NotNil asserts that the specified object is not nil. // // assert.NotNil(t, err) @@ -1658,10 +1681,12 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, t.FailNow() } -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubset asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// assert.NotSubset(t, [1, 3, 4], [1, 2]) +// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1672,10 +1697,12 @@ func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...i t.FailNow() } -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubsetf asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") +// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1880,10 +1907,11 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg t.FailNow() } -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subset asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// assert.Subset(t, [1, 2, 3], [1, 2]) +// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1}) func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1894,10 +1922,11 @@ func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...inte t.FailNow() } -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subsetf asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") +// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/cluster-autoscaler/vendor/github.com/stretchr/testify/require/require_forward.go b/cluster-autoscaler/vendor/github.com/stretchr/testify/require/require_forward.go index 3b5b09330a43..eee8310a5fa9 100644 --- a/cluster-autoscaler/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/cluster-autoscaler/vendor/github.com/stretchr/testify/require/require_forward.go @@ -1,7 +1,4 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ +// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT. package require @@ -190,7 +187,7 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface EqualExportedValuesf(a.t, expected, actual, msg, args...) } -// EqualValues asserts that two objects are equal or convertable to the same types +// EqualValues asserts that two objects are equal or convertible to the same types // and equal. // // a.EqualValues(uint32(123), int32(123)) @@ -201,7 +198,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn EqualValues(a.t, expected, actual, msgAndArgs...) } -// EqualValuesf asserts that two objects are equal or convertable to the same types +// EqualValuesf asserts that two objects are equal or convertible to the same types // and equal. // // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") @@ -1222,6 +1219,26 @@ func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...in NotErrorIsf(a.t, err, target, msg, args...) } +// NotImplements asserts that an object does not implement the specified interface. +// +// a.NotImplements((*MyInterface)(nil), new(MyObject)) +func (a *Assertions) NotImplements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotImplements(a.t, interfaceObject, object, msgAndArgs...) +} + +// NotImplementsf asserts that an object does not implement the specified interface. +// +// a.NotImplementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +func (a *Assertions) NotImplementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotImplementsf(a.t, interfaceObject, object, msg, args...) +} + // NotNil asserts that the specified object is not nil. // // a.NotNil(err) @@ -1310,10 +1327,12 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri NotSamef(a.t, expected, actual, msg, args...) } -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubset asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// a.NotSubset([1, 3, 4], [1, 2]) +// a.NotSubset({"x": 1, "y": 2}, {"z": 3}) func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1321,10 +1340,12 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs NotSubset(a.t, list, subset, msgAndArgs...) } -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubsetf asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") +// a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1484,10 +1505,11 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, Samef(a.t, expected, actual, msg, args...) } -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subset asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// a.Subset([1, 2, 3], [1, 2]) +// a.Subset({"x": 1, "y": 2}, {"x": 1}) func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1495,10 +1517,11 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... Subset(a.t, list, subset, msgAndArgs...) } -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subsetf asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") +// a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() diff --git a/cluster-autoscaler/vendor/github.com/stretchr/testify/suite/suite.go b/cluster-autoscaler/vendor/github.com/stretchr/testify/suite/suite.go index 8b4202d8906d..18443a91c85d 100644 --- a/cluster-autoscaler/vendor/github.com/stretchr/testify/suite/suite.go +++ b/cluster-autoscaler/vendor/github.com/stretchr/testify/suite/suite.go @@ -58,7 +58,7 @@ func (suite *Suite) Require() *require.Assertions { suite.mu.Lock() defer suite.mu.Unlock() if suite.require == nil { - suite.require = require.New(suite.T()) + panic("'Require' must not be called before 'Run' or 'SetT'") } return suite.require } @@ -72,17 +72,19 @@ func (suite *Suite) Assert() *assert.Assertions { suite.mu.Lock() defer suite.mu.Unlock() if suite.Assertions == nil { - suite.Assertions = assert.New(suite.T()) + panic("'Assert' must not be called before 'Run' or 'SetT'") } return suite.Assertions } func recoverAndFailOnPanic(t *testing.T) { + t.Helper() r := recover() failOnPanic(t, r) } func failOnPanic(t *testing.T, r interface{}) { + t.Helper() if r != nil { t.Errorf("test panicked: %v\n%s", r, debug.Stack()) t.FailNow() @@ -96,19 +98,20 @@ func failOnPanic(t *testing.T, r interface{}) { func (suite *Suite) Run(name string, subtest func()) bool { oldT := suite.T() - if setupSubTest, ok := suite.s.(SetupSubTest); ok { - setupSubTest.SetupSubTest() - } + return oldT.Run(name, func(t *testing.T) { + suite.SetT(t) + defer suite.SetT(oldT) + + defer recoverAndFailOnPanic(t) + + if setupSubTest, ok := suite.s.(SetupSubTest); ok { + setupSubTest.SetupSubTest() + } - defer func() { - suite.SetT(oldT) if tearDownSubTest, ok := suite.s.(TearDownSubTest); ok { - tearDownSubTest.TearDownSubTest() + defer tearDownSubTest.TearDownSubTest() } - }() - return oldT.Run(name, func(t *testing.T) { - suite.SetT(t) subtest() }) } @@ -164,6 +167,8 @@ func Run(t *testing.T, suite TestingSuite) { suite.SetT(t) defer recoverAndFailOnPanic(t) defer func() { + t.Helper() + r := recover() if stats != nil { diff --git a/cluster-autoscaler/vendor/gopkg.in/ini.v1/.editorconfig b/cluster-autoscaler/vendor/gopkg.in/ini.v1/.editorconfig new file mode 100644 index 000000000000..4a2d9180f96f --- /dev/null +++ b/cluster-autoscaler/vendor/gopkg.in/ini.v1/.editorconfig @@ -0,0 +1,12 @@ +# http://editorconfig.org + +root = true + +[*] +charset = utf-8 +end_of_line = lf +insert_final_newline = true +trim_trailing_whitespace = true + +[*_test.go] +trim_trailing_whitespace = false diff --git a/cluster-autoscaler/vendor/gopkg.in/ini.v1/.gitignore b/cluster-autoscaler/vendor/gopkg.in/ini.v1/.gitignore new file mode 100644 index 000000000000..588388bda28d --- /dev/null +++ b/cluster-autoscaler/vendor/gopkg.in/ini.v1/.gitignore @@ -0,0 +1,7 @@ +testdata/conf_out.ini +ini.sublime-project +ini.sublime-workspace +testdata/conf_reflect.ini +.idea +/.vscode +.DS_Store diff --git a/cluster-autoscaler/vendor/gopkg.in/ini.v1/.golangci.yml b/cluster-autoscaler/vendor/gopkg.in/ini.v1/.golangci.yml new file mode 100644 index 000000000000..631e369254d3 --- /dev/null +++ b/cluster-autoscaler/vendor/gopkg.in/ini.v1/.golangci.yml @@ -0,0 +1,27 @@ +linters-settings: + staticcheck: + checks: [ + "all", + "-SA1019" # There are valid use cases of strings.Title + ] + nakedret: + max-func-lines: 0 # Disallow any unnamed return statement + +linters: + enable: + - deadcode + - errcheck + - gosimple + - govet + - ineffassign + - staticcheck + - structcheck + - typecheck + - unused + - varcheck + - nakedret + - gofmt + - rowserrcheck + - unconvert + - goimports + - unparam diff --git a/cluster-autoscaler/vendor/gopkg.in/ini.v1/LICENSE b/cluster-autoscaler/vendor/gopkg.in/ini.v1/LICENSE new file mode 100644 index 000000000000..d361bbcdf5c9 --- /dev/null +++ b/cluster-autoscaler/vendor/gopkg.in/ini.v1/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright 2014 Unknwon + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/cluster-autoscaler/vendor/gopkg.in/ini.v1/Makefile b/cluster-autoscaler/vendor/gopkg.in/ini.v1/Makefile new file mode 100644 index 000000000000..f3b0dae2d298 --- /dev/null +++ b/cluster-autoscaler/vendor/gopkg.in/ini.v1/Makefile @@ -0,0 +1,15 @@ +.PHONY: build test bench vet coverage + +build: vet bench + +test: + go test -v -cover -race + +bench: + go test -v -cover -test.bench=. -test.benchmem + +vet: + go vet + +coverage: + go test -coverprofile=c.out && go tool cover -html=c.out && rm c.out diff --git a/cluster-autoscaler/vendor/gopkg.in/ini.v1/README.md b/cluster-autoscaler/vendor/gopkg.in/ini.v1/README.md new file mode 100644 index 000000000000..30606d9700a8 --- /dev/null +++ b/cluster-autoscaler/vendor/gopkg.in/ini.v1/README.md @@ -0,0 +1,43 @@ +# INI + +[![GitHub Workflow Status](https://img.shields.io/github/checks-status/go-ini/ini/main?logo=github&style=for-the-badge)](https://github.com/go-ini/ini/actions?query=branch%3Amain) +[![codecov](https://img.shields.io/codecov/c/github/go-ini/ini/master?logo=codecov&style=for-the-badge)](https://codecov.io/gh/go-ini/ini) +[![GoDoc](https://img.shields.io/badge/GoDoc-Reference-blue?style=for-the-badge&logo=go)](https://pkg.go.dev/github.com/go-ini/ini?tab=doc) +[![Sourcegraph](https://img.shields.io/badge/view%20on-Sourcegraph-brightgreen.svg?style=for-the-badge&logo=sourcegraph)](https://sourcegraph.com/github.com/go-ini/ini) + +![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) + +Package ini provides INI file read and write functionality in Go. + +## Features + +- Load from multiple data sources(file, `[]byte`, `io.Reader` and `io.ReadCloser`) with overwrites. +- Read with recursion values. +- Read with parent-child sections. +- Read with auto-increment key names. +- Read with multiple-line values. +- Read with tons of helper methods. +- Read and convert values to Go types. +- Read and **WRITE** comments of sections and keys. +- Manipulate sections, keys and comments with ease. +- Keep sections and keys in order as you parse and save. + +## Installation + +The minimum requirement of Go is **1.13**. + +```sh +$ go get gopkg.in/ini.v1 +``` + +Please add `-u` flag to update in the future. + +## Getting Help + +- [Getting Started](https://ini.unknwon.io/docs/intro/getting_started) +- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) +- 中国大陆镜像:https://ini.unknwon.cn + +## License + +This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/cluster-autoscaler/vendor/gopkg.in/ini.v1/codecov.yml b/cluster-autoscaler/vendor/gopkg.in/ini.v1/codecov.yml new file mode 100644 index 000000000000..e02ec84bc05f --- /dev/null +++ b/cluster-autoscaler/vendor/gopkg.in/ini.v1/codecov.yml @@ -0,0 +1,16 @@ +coverage: + range: "60...95" + status: + project: + default: + threshold: 1% + informational: true + patch: + defualt: + only_pulls: true + informational: true + +comment: + layout: 'diff' + +github_checks: false diff --git a/cluster-autoscaler/vendor/gopkg.in/ini.v1/data_source.go b/cluster-autoscaler/vendor/gopkg.in/ini.v1/data_source.go new file mode 100644 index 000000000000..c3a541f1d1b5 --- /dev/null +++ b/cluster-autoscaler/vendor/gopkg.in/ini.v1/data_source.go @@ -0,0 +1,76 @@ +// Copyright 2019 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" +) + +var ( + _ dataSource = (*sourceFile)(nil) + _ dataSource = (*sourceData)(nil) + _ dataSource = (*sourceReadCloser)(nil) +) + +// dataSource is an interface that returns object which can be read and closed. +type dataSource interface { + ReadCloser() (io.ReadCloser, error) +} + +// sourceFile represents an object that contains content on the local file system. +type sourceFile struct { + name string +} + +func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { + return os.Open(s.name) +} + +// sourceData represents an object that contains content in memory. +type sourceData struct { + data []byte +} + +func (s *sourceData) ReadCloser() (io.ReadCloser, error) { + return ioutil.NopCloser(bytes.NewReader(s.data)), nil +} + +// sourceReadCloser represents an input stream with Close method. +type sourceReadCloser struct { + reader io.ReadCloser +} + +func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) { + return s.reader, nil +} + +func parseDataSource(source interface{}) (dataSource, error) { + switch s := source.(type) { + case string: + return sourceFile{s}, nil + case []byte: + return &sourceData{s}, nil + case io.ReadCloser: + return &sourceReadCloser{s}, nil + case io.Reader: + return &sourceReadCloser{ioutil.NopCloser(s)}, nil + default: + return nil, fmt.Errorf("error parsing data source: unknown type %q", s) + } +} diff --git a/cluster-autoscaler/vendor/gopkg.in/ini.v1/deprecated.go b/cluster-autoscaler/vendor/gopkg.in/ini.v1/deprecated.go new file mode 100644 index 000000000000..48b8e66d6d6f --- /dev/null +++ b/cluster-autoscaler/vendor/gopkg.in/ini.v1/deprecated.go @@ -0,0 +1,22 @@ +// Copyright 2019 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +var ( + // Deprecated: Use "DefaultSection" instead. + DEFAULT_SECTION = DefaultSection + // Deprecated: AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. + AllCapsUnderscore = SnackCase +) diff --git a/cluster-autoscaler/vendor/gopkg.in/ini.v1/error.go b/cluster-autoscaler/vendor/gopkg.in/ini.v1/error.go new file mode 100644 index 000000000000..f66bc94b8b69 --- /dev/null +++ b/cluster-autoscaler/vendor/gopkg.in/ini.v1/error.go @@ -0,0 +1,49 @@ +// Copyright 2016 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "fmt" +) + +// ErrDelimiterNotFound indicates the error type of no delimiter is found which there should be one. +type ErrDelimiterNotFound struct { + Line string +} + +// IsErrDelimiterNotFound returns true if the given error is an instance of ErrDelimiterNotFound. +func IsErrDelimiterNotFound(err error) bool { + _, ok := err.(ErrDelimiterNotFound) + return ok +} + +func (err ErrDelimiterNotFound) Error() string { + return fmt.Sprintf("key-value delimiter not found: %s", err.Line) +} + +// ErrEmptyKeyName indicates the error type of no key name is found which there should be one. +type ErrEmptyKeyName struct { + Line string +} + +// IsErrEmptyKeyName returns true if the given error is an instance of ErrEmptyKeyName. +func IsErrEmptyKeyName(err error) bool { + _, ok := err.(ErrEmptyKeyName) + return ok +} + +func (err ErrEmptyKeyName) Error() string { + return fmt.Sprintf("empty key name: %s", err.Line) +} diff --git a/cluster-autoscaler/vendor/gopkg.in/ini.v1/file.go b/cluster-autoscaler/vendor/gopkg.in/ini.v1/file.go new file mode 100644 index 000000000000..f8b22408be51 --- /dev/null +++ b/cluster-autoscaler/vendor/gopkg.in/ini.v1/file.go @@ -0,0 +1,541 @@ +// Copyright 2017 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "sync" +) + +// File represents a combination of one or more INI files in memory. +type File struct { + options LoadOptions + dataSources []dataSource + + // Should make things safe, but sometimes doesn't matter. + BlockMode bool + lock sync.RWMutex + + // To keep data in order. + sectionList []string + // To keep track of the index of a section with same name. + // This meta list is only used with non-unique section names are allowed. + sectionIndexes []int + + // Actual data is stored here. + sections map[string][]*Section + + NameMapper + ValueMapper +} + +// newFile initializes File object with given data sources. +func newFile(dataSources []dataSource, opts LoadOptions) *File { + if len(opts.KeyValueDelimiters) == 0 { + opts.KeyValueDelimiters = "=:" + } + if len(opts.KeyValueDelimiterOnWrite) == 0 { + opts.KeyValueDelimiterOnWrite = "=" + } + if len(opts.ChildSectionDelimiter) == 0 { + opts.ChildSectionDelimiter = "." + } + + return &File{ + BlockMode: true, + dataSources: dataSources, + sections: make(map[string][]*Section), + options: opts, + } +} + +// Empty returns an empty file object. +func Empty(opts ...LoadOptions) *File { + var opt LoadOptions + if len(opts) > 0 { + opt = opts[0] + } + + // Ignore error here, we are sure our data is good. + f, _ := LoadSources(opt, []byte("")) + return f +} + +// NewSection creates a new section. +func (f *File) NewSection(name string) (*Section, error) { + if len(name) == 0 { + return nil, errors.New("empty section name") + } + + if (f.options.Insensitive || f.options.InsensitiveSections) && name != DefaultSection { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if !f.options.AllowNonUniqueSections && inSlice(name, f.sectionList) { + return f.sections[name][0], nil + } + + f.sectionList = append(f.sectionList, name) + + // NOTE: Append to indexes must happen before appending to sections, + // otherwise index will have off-by-one problem. + f.sectionIndexes = append(f.sectionIndexes, len(f.sections[name])) + + sec := newSection(f, name) + f.sections[name] = append(f.sections[name], sec) + + return sec, nil +} + +// NewRawSection creates a new section with an unparseable body. +func (f *File) NewRawSection(name, body string) (*Section, error) { + section, err := f.NewSection(name) + if err != nil { + return nil, err + } + + section.isRawSection = true + section.rawBody = body + return section, nil +} + +// NewSections creates a list of sections. +func (f *File) NewSections(names ...string) (err error) { + for _, name := range names { + if _, err = f.NewSection(name); err != nil { + return err + } + } + return nil +} + +// GetSection returns section by given name. +func (f *File) GetSection(name string) (*Section, error) { + secs, err := f.SectionsByName(name) + if err != nil { + return nil, err + } + + return secs[0], err +} + +// HasSection returns true if the file contains a section with given name. +func (f *File) HasSection(name string) bool { + section, _ := f.GetSection(name) + return section != nil +} + +// SectionsByName returns all sections with given name. +func (f *File) SectionsByName(name string) ([]*Section, error) { + if len(name) == 0 { + name = DefaultSection + } + if f.options.Insensitive || f.options.InsensitiveSections { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + + secs := f.sections[name] + if len(secs) == 0 { + return nil, fmt.Errorf("section %q does not exist", name) + } + + return secs, nil +} + +// Section assumes named section exists and returns a zero-value when not. +func (f *File) Section(name string) *Section { + sec, err := f.GetSection(name) + if err != nil { + if name == "" { + name = DefaultSection + } + sec, _ = f.NewSection(name) + return sec + } + return sec +} + +// SectionWithIndex assumes named section exists and returns a new section when not. +func (f *File) SectionWithIndex(name string, index int) *Section { + secs, err := f.SectionsByName(name) + if err != nil || len(secs) <= index { + // NOTE: It's OK here because the only possible error is empty section name, + // but if it's empty, this piece of code won't be executed. + newSec, _ := f.NewSection(name) + return newSec + } + + return secs[index] +} + +// Sections returns a list of Section stored in the current instance. +func (f *File) Sections() []*Section { + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + + sections := make([]*Section, len(f.sectionList)) + for i, name := range f.sectionList { + sections[i] = f.sections[name][f.sectionIndexes[i]] + } + return sections +} + +// ChildSections returns a list of child sections of given section name. +func (f *File) ChildSections(name string) []*Section { + return f.Section(name).ChildSections() +} + +// SectionStrings returns list of section names. +func (f *File) SectionStrings() []string { + list := make([]string, len(f.sectionList)) + copy(list, f.sectionList) + return list +} + +// DeleteSection deletes a section or all sections with given name. +func (f *File) DeleteSection(name string) { + secs, err := f.SectionsByName(name) + if err != nil { + return + } + + for i := 0; i < len(secs); i++ { + // For non-unique sections, it is always needed to remove the first one so + // in the next iteration, the subsequent section continue having index 0. + // Ignoring the error as index 0 never returns an error. + _ = f.DeleteSectionWithIndex(name, 0) + } +} + +// DeleteSectionWithIndex deletes a section with given name and index. +func (f *File) DeleteSectionWithIndex(name string, index int) error { + if !f.options.AllowNonUniqueSections && index != 0 { + return fmt.Errorf("delete section with non-zero index is only allowed when non-unique sections is enabled") + } + + if len(name) == 0 { + name = DefaultSection + } + if f.options.Insensitive || f.options.InsensitiveSections { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + // Count occurrences of the sections + occurrences := 0 + + sectionListCopy := make([]string, len(f.sectionList)) + copy(sectionListCopy, f.sectionList) + + for i, s := range sectionListCopy { + if s != name { + continue + } + + if occurrences == index { + if len(f.sections[name]) <= 1 { + delete(f.sections, name) // The last one in the map + } else { + f.sections[name] = append(f.sections[name][:index], f.sections[name][index+1:]...) + } + + // Fix section lists + f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) + f.sectionIndexes = append(f.sectionIndexes[:i], f.sectionIndexes[i+1:]...) + + } else if occurrences > index { + // Fix the indices of all following sections with this name. + f.sectionIndexes[i-1]-- + } + + occurrences++ + } + + return nil +} + +func (f *File) reload(s dataSource) error { + r, err := s.ReadCloser() + if err != nil { + return err + } + defer r.Close() + + return f.parse(r) +} + +// Reload reloads and parses all data sources. +func (f *File) Reload() (err error) { + for _, s := range f.dataSources { + if err = f.reload(s); err != nil { + // In loose mode, we create an empty default section for nonexistent files. + if os.IsNotExist(err) && f.options.Loose { + _ = f.parse(bytes.NewBuffer(nil)) + continue + } + return err + } + if f.options.ShortCircuit { + return nil + } + } + return nil +} + +// Append appends one or more data sources and reloads automatically. +func (f *File) Append(source interface{}, others ...interface{}) error { + ds, err := parseDataSource(source) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + for _, s := range others { + ds, err = parseDataSource(s) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + } + return f.Reload() +} + +func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { + equalSign := DefaultFormatLeft + f.options.KeyValueDelimiterOnWrite + DefaultFormatRight + + if PrettyFormat || PrettyEqual { + equalSign = fmt.Sprintf(" %s ", f.options.KeyValueDelimiterOnWrite) + } + + // Use buffer to make sure target is safe until finish encoding. + buf := bytes.NewBuffer(nil) + lastSectionIdx := len(f.sectionList) - 1 + for i, sname := range f.sectionList { + sec := f.SectionWithIndex(sname, f.sectionIndexes[i]) + if len(sec.Comment) > 0 { + // Support multiline comments + lines := strings.Split(sec.Comment, LineBreak) + for i := range lines { + if lines[i][0] != '#' && lines[i][0] != ';' { + lines[i] = "; " + lines[i] + } else { + lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:]) + } + + if _, err := buf.WriteString(lines[i] + LineBreak); err != nil { + return nil, err + } + } + } + + if i > 0 || DefaultHeader || (i == 0 && strings.ToUpper(sec.name) != DefaultSection) { + if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil { + return nil, err + } + } else { + // Write nothing if default section is empty + if len(sec.keyList) == 0 { + continue + } + } + + isLastSection := i == lastSectionIdx + if sec.isRawSection { + if _, err := buf.WriteString(sec.rawBody); err != nil { + return nil, err + } + + if PrettySection && !isLastSection { + // Put a line between sections + if _, err := buf.WriteString(LineBreak); err != nil { + return nil, err + } + } + continue + } + + // Count and generate alignment length and buffer spaces using the + // longest key. Keys may be modified if they contain certain characters so + // we need to take that into account in our calculation. + alignLength := 0 + if PrettyFormat { + for _, kname := range sec.keyList { + keyLength := len(kname) + // First case will surround key by ` and second by """ + if strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters) { + keyLength += 2 + } else if strings.Contains(kname, "`") { + keyLength += 6 + } + + if keyLength > alignLength { + alignLength = keyLength + } + } + } + alignSpaces := bytes.Repeat([]byte(" "), alignLength) + + KeyList: + for _, kname := range sec.keyList { + key := sec.Key(kname) + if len(key.Comment) > 0 { + if len(indent) > 0 && sname != DefaultSection { + buf.WriteString(indent) + } + + // Support multiline comments + lines := strings.Split(key.Comment, LineBreak) + for i := range lines { + if lines[i][0] != '#' && lines[i][0] != ';' { + lines[i] = "; " + strings.TrimSpace(lines[i]) + } else { + lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:]) + } + + if _, err := buf.WriteString(lines[i] + LineBreak); err != nil { + return nil, err + } + } + } + + if len(indent) > 0 && sname != DefaultSection { + buf.WriteString(indent) + } + + switch { + case key.isAutoIncrement: + kname = "-" + case strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters): + kname = "`" + kname + "`" + case strings.Contains(kname, "`"): + kname = `"""` + kname + `"""` + } + + writeKeyValue := func(val string) (bool, error) { + if _, err := buf.WriteString(kname); err != nil { + return false, err + } + + if key.isBooleanType { + buf.WriteString(LineBreak) + return true, nil + } + + // Write out alignment spaces before "=" sign + if PrettyFormat { + buf.Write(alignSpaces[:alignLength-len(kname)]) + } + + // In case key value contains "\n", "`", "\"", "#" or ";" + if strings.ContainsAny(val, "\n`") { + val = `"""` + val + `"""` + } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") { + val = "`" + val + "`" + } else if len(strings.TrimSpace(val)) != len(val) { + val = `"` + val + `"` + } + if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil { + return false, err + } + return false, nil + } + + shadows := key.ValueWithShadows() + if len(shadows) == 0 { + if _, err := writeKeyValue(""); err != nil { + return nil, err + } + } + + for _, val := range shadows { + exitLoop, err := writeKeyValue(val) + if err != nil { + return nil, err + } else if exitLoop { + continue KeyList + } + } + + for _, val := range key.nestedValues { + if _, err := buf.WriteString(indent + " " + val + LineBreak); err != nil { + return nil, err + } + } + } + + if PrettySection && !isLastSection { + // Put a line between sections + if _, err := buf.WriteString(LineBreak); err != nil { + return nil, err + } + } + } + + return buf, nil +} + +// WriteToIndent writes content into io.Writer with given indention. +// If PrettyFormat has been set to be true, +// it will align "=" sign with spaces under each section. +func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) { + buf, err := f.writeToBuffer(indent) + if err != nil { + return 0, err + } + return buf.WriteTo(w) +} + +// WriteTo writes file content into io.Writer. +func (f *File) WriteTo(w io.Writer) (int64, error) { + return f.WriteToIndent(w, "") +} + +// SaveToIndent writes content to file system with given value indention. +func (f *File) SaveToIndent(filename, indent string) error { + // Note: Because we are truncating with os.Create, + // so it's safer to save to a temporary file location and rename after done. + buf, err := f.writeToBuffer(indent) + if err != nil { + return err + } + + return ioutil.WriteFile(filename, buf.Bytes(), 0666) +} + +// SaveTo writes content to file system. +func (f *File) SaveTo(filename string) error { + return f.SaveToIndent(filename, "") +} diff --git a/cluster-autoscaler/vendor/gopkg.in/ini.v1/helper.go b/cluster-autoscaler/vendor/gopkg.in/ini.v1/helper.go new file mode 100644 index 000000000000..f9d80a682a55 --- /dev/null +++ b/cluster-autoscaler/vendor/gopkg.in/ini.v1/helper.go @@ -0,0 +1,24 @@ +// Copyright 2019 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +func inSlice(str string, s []string) bool { + for _, v := range s { + if str == v { + return true + } + } + return false +} diff --git a/cluster-autoscaler/vendor/gopkg.in/ini.v1/ini.go b/cluster-autoscaler/vendor/gopkg.in/ini.v1/ini.go new file mode 100644 index 000000000000..99e7f86511a4 --- /dev/null +++ b/cluster-autoscaler/vendor/gopkg.in/ini.v1/ini.go @@ -0,0 +1,176 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package ini provides INI file read and write functionality in Go. +package ini + +import ( + "os" + "regexp" + "runtime" + "strings" +) + +const ( + // Maximum allowed depth when recursively substituing variable names. + depthValues = 99 +) + +var ( + // DefaultSection is the name of default section. You can use this var or the string literal. + // In most of cases, an empty string is all you need to access the section. + DefaultSection = "DEFAULT" + + // LineBreak is the delimiter to determine or compose a new line. + // This variable will be changed to "\r\n" automatically on Windows at package init time. + LineBreak = "\n" + + // Variable regexp pattern: %(variable)s + varPattern = regexp.MustCompile(`%\(([^)]+)\)s`) + + // DefaultHeader explicitly writes default section header. + DefaultHeader = false + + // PrettySection indicates whether to put a line between sections. + PrettySection = true + // PrettyFormat indicates whether to align "=" sign with spaces to produce pretty output + // or reduce all possible spaces for compact format. + PrettyFormat = true + // PrettyEqual places spaces around "=" sign even when PrettyFormat is false. + PrettyEqual = false + // DefaultFormatLeft places custom spaces on the left when PrettyFormat and PrettyEqual are both disabled. + DefaultFormatLeft = "" + // DefaultFormatRight places custom spaces on the right when PrettyFormat and PrettyEqual are both disabled. + DefaultFormatRight = "" +) + +var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test") + +func init() { + if runtime.GOOS == "windows" && !inTest { + LineBreak = "\r\n" + } +} + +// LoadOptions contains all customized options used for load data source(s). +type LoadOptions struct { + // Loose indicates whether the parser should ignore nonexistent files or return error. + Loose bool + // Insensitive indicates whether the parser forces all section and key names to lowercase. + Insensitive bool + // InsensitiveSections indicates whether the parser forces all section to lowercase. + InsensitiveSections bool + // InsensitiveKeys indicates whether the parser forces all key names to lowercase. + InsensitiveKeys bool + // IgnoreContinuation indicates whether to ignore continuation lines while parsing. + IgnoreContinuation bool + // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value. + IgnoreInlineComment bool + // SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs. + SkipUnrecognizableLines bool + // ShortCircuit indicates whether to ignore other configuration sources after loaded the first available configuration source. + ShortCircuit bool + // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. + // This type of keys are mostly used in my.cnf. + AllowBooleanKeys bool + // AllowShadows indicates whether to keep track of keys with same name under same section. + AllowShadows bool + // AllowNestedValues indicates whether to allow AWS-like nested values. + // Docs: http://docs.aws.amazon.com/cli/latest/topic/config-vars.html#nested-values + AllowNestedValues bool + // AllowPythonMultilineValues indicates whether to allow Python-like multi-line values. + // Docs: https://docs.python.org/3/library/configparser.html#supported-ini-file-structure + // Relevant quote: Values can also span multiple lines, as long as they are indented deeper + // than the first line of the value. + AllowPythonMultilineValues bool + // SpaceBeforeInlineComment indicates whether to allow comment symbols (\# and \;) inside value. + // Docs: https://docs.python.org/2/library/configparser.html + // Quote: Comments may appear on their own in an otherwise empty line, or may be entered in lines holding values or section names. + // In the latter case, they need to be preceded by a whitespace character to be recognized as a comment. + SpaceBeforeInlineComment bool + // UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format + // when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value" + UnescapeValueDoubleQuotes bool + // UnescapeValueCommentSymbols indicates to unescape comment symbols (\# and \;) inside value to regular format + // when value is NOT surrounded by any quotes. + // Note: UNSTABLE, behavior might change to only unescape inside double quotes but may noy necessary at all. + UnescapeValueCommentSymbols bool + // UnparseableSections stores a list of blocks that are allowed with raw content which do not otherwise + // conform to key/value pairs. Specify the names of those blocks here. + UnparseableSections []string + // KeyValueDelimiters is the sequence of delimiters that are used to separate key and value. By default, it is "=:". + KeyValueDelimiters string + // KeyValueDelimiterOnWrite is the delimiter that are used to separate key and value output. By default, it is "=". + KeyValueDelimiterOnWrite string + // ChildSectionDelimiter is the delimiter that is used to separate child sections. By default, it is ".". + ChildSectionDelimiter string + // PreserveSurroundedQuote indicates whether to preserve surrounded quote (single and double quotes). + PreserveSurroundedQuote bool + // DebugFunc is called to collect debug information (currently only useful to debug parsing Python-style multiline values). + DebugFunc DebugFunc + // ReaderBufferSize is the buffer size of the reader in bytes. + ReaderBufferSize int + // AllowNonUniqueSections indicates whether to allow sections with the same name multiple times. + AllowNonUniqueSections bool + // AllowDuplicateShadowValues indicates whether values for shadowed keys should be deduplicated. + AllowDuplicateShadowValues bool +} + +// DebugFunc is the type of function called to log parse events. +type DebugFunc func(message string) + +// LoadSources allows caller to apply customized options for loading from data source(s). +func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) { + sources := make([]dataSource, len(others)+1) + sources[0], err = parseDataSource(source) + if err != nil { + return nil, err + } + for i := range others { + sources[i+1], err = parseDataSource(others[i]) + if err != nil { + return nil, err + } + } + f := newFile(sources, opts) + if err = f.Reload(); err != nil { + return nil, err + } + return f, nil +} + +// Load loads and parses from INI data sources. +// Arguments can be mixed of file name with string type, or raw data in []byte. +// It will return error if list contains nonexistent files. +func Load(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{}, source, others...) +} + +// LooseLoad has exactly same functionality as Load function +// except it ignores nonexistent files instead of returning error. +func LooseLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Loose: true}, source, others...) +} + +// InsensitiveLoad has exactly same functionality as Load function +// except it forces all section and key names to be lowercased. +func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Insensitive: true}, source, others...) +} + +// ShadowLoad has exactly same functionality as Load function +// except it allows have shadow keys. +func ShadowLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{AllowShadows: true}, source, others...) +} diff --git a/cluster-autoscaler/vendor/gopkg.in/ini.v1/key.go b/cluster-autoscaler/vendor/gopkg.in/ini.v1/key.go new file mode 100644 index 000000000000..a19d9f38ef14 --- /dev/null +++ b/cluster-autoscaler/vendor/gopkg.in/ini.v1/key.go @@ -0,0 +1,837 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "strings" + "time" +) + +// Key represents a key under a section. +type Key struct { + s *Section + Comment string + name string + value string + isAutoIncrement bool + isBooleanType bool + + isShadow bool + shadows []*Key + + nestedValues []string +} + +// newKey simply return a key object with given values. +func newKey(s *Section, name, val string) *Key { + return &Key{ + s: s, + name: name, + value: val, + } +} + +func (k *Key) addShadow(val string) error { + if k.isShadow { + return errors.New("cannot add shadow to another shadow key") + } else if k.isAutoIncrement || k.isBooleanType { + return errors.New("cannot add shadow to auto-increment or boolean key") + } + + if !k.s.f.options.AllowDuplicateShadowValues { + // Deduplicate shadows based on their values. + if k.value == val { + return nil + } + for i := range k.shadows { + if k.shadows[i].value == val { + return nil + } + } + } + + shadow := newKey(k.s, k.name, val) + shadow.isShadow = true + k.shadows = append(k.shadows, shadow) + return nil +} + +// AddShadow adds a new shadow key to itself. +func (k *Key) AddShadow(val string) error { + if !k.s.f.options.AllowShadows { + return errors.New("shadow key is not allowed") + } + return k.addShadow(val) +} + +func (k *Key) addNestedValue(val string) error { + if k.isAutoIncrement || k.isBooleanType { + return errors.New("cannot add nested value to auto-increment or boolean key") + } + + k.nestedValues = append(k.nestedValues, val) + return nil +} + +// AddNestedValue adds a nested value to the key. +func (k *Key) AddNestedValue(val string) error { + if !k.s.f.options.AllowNestedValues { + return errors.New("nested value is not allowed") + } + return k.addNestedValue(val) +} + +// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv +type ValueMapper func(string) string + +// Name returns name of key. +func (k *Key) Name() string { + return k.name +} + +// Value returns raw value of key for performance purpose. +func (k *Key) Value() string { + return k.value +} + +// ValueWithShadows returns raw values of key and its shadows if any. Shadow +// keys with empty values are ignored from the returned list. +func (k *Key) ValueWithShadows() []string { + if len(k.shadows) == 0 { + if k.value == "" { + return []string{} + } + return []string{k.value} + } + + vals := make([]string, 0, len(k.shadows)+1) + if k.value != "" { + vals = append(vals, k.value) + } + for _, s := range k.shadows { + if s.value != "" { + vals = append(vals, s.value) + } + } + return vals +} + +// NestedValues returns nested values stored in the key. +// It is possible returned value is nil if no nested values stored in the key. +func (k *Key) NestedValues() []string { + return k.nestedValues +} + +// transformValue takes a raw value and transforms to its final string. +func (k *Key) transformValue(val string) string { + if k.s.f.ValueMapper != nil { + val = k.s.f.ValueMapper(val) + } + + // Fail-fast if no indicate char found for recursive value + if !strings.Contains(val, "%") { + return val + } + for i := 0; i < depthValues; i++ { + vr := varPattern.FindString(val) + if len(vr) == 0 { + break + } + + // Take off leading '%(' and trailing ')s'. + noption := vr[2 : len(vr)-2] + + // Search in the same section. + // If not found or found the key itself, then search again in default section. + nk, err := k.s.GetKey(noption) + if err != nil || k == nk { + nk, _ = k.s.f.Section("").GetKey(noption) + if nk == nil { + // Stop when no results found in the default section, + // and returns the value as-is. + break + } + } + + // Substitute by new value and take off leading '%(' and trailing ')s'. + val = strings.Replace(val, vr, nk.value, -1) + } + return val +} + +// String returns string representation of value. +func (k *Key) String() string { + return k.transformValue(k.value) +} + +// Validate accepts a validate function which can +// return modifed result as key value. +func (k *Key) Validate(fn func(string) string) string { + return fn(k.String()) +} + +// parseBool returns the boolean value represented by the string. +// +// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, +// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. +// Any other value returns an error. +func parseBool(str string) (value bool, err error) { + switch str { + case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": + return true, nil + case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": + return false, nil + } + return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) +} + +// Bool returns bool type value. +func (k *Key) Bool() (bool, error) { + return parseBool(k.String()) +} + +// Float64 returns float64 type value. +func (k *Key) Float64() (float64, error) { + return strconv.ParseFloat(k.String(), 64) +} + +// Int returns int type value. +func (k *Key) Int() (int, error) { + v, err := strconv.ParseInt(k.String(), 0, 64) + return int(v), err +} + +// Int64 returns int64 type value. +func (k *Key) Int64() (int64, error) { + return strconv.ParseInt(k.String(), 0, 64) +} + +// Uint returns uint type valued. +func (k *Key) Uint() (uint, error) { + u, e := strconv.ParseUint(k.String(), 0, 64) + return uint(u), e +} + +// Uint64 returns uint64 type value. +func (k *Key) Uint64() (uint64, error) { + return strconv.ParseUint(k.String(), 0, 64) +} + +// Duration returns time.Duration type value. +func (k *Key) Duration() (time.Duration, error) { + return time.ParseDuration(k.String()) +} + +// TimeFormat parses with given format and returns time.Time type value. +func (k *Key) TimeFormat(format string) (time.Time, error) { + return time.Parse(format, k.String()) +} + +// Time parses with RFC3339 format and returns time.Time type value. +func (k *Key) Time() (time.Time, error) { + return k.TimeFormat(time.RFC3339) +} + +// MustString returns default value if key value is empty. +func (k *Key) MustString(defaultVal string) string { + val := k.String() + if len(val) == 0 { + k.value = defaultVal + return defaultVal + } + return val +} + +// MustBool always returns value without error, +// it returns false if error occurs. +func (k *Key) MustBool(defaultVal ...bool) bool { + val, err := k.Bool() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatBool(defaultVal[0]) + return defaultVal[0] + } + return val +} + +// MustFloat64 always returns value without error, +// it returns 0.0 if error occurs. +func (k *Key) MustFloat64(defaultVal ...float64) float64 { + val, err := k.Float64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64) + return defaultVal[0] + } + return val +} + +// MustInt always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt(defaultVal ...int) int { + val, err := k.Int() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(int64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustInt64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt64(defaultVal ...int64) int64 { + val, err := k.Int64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustUint always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint(defaultVal ...uint) uint { + val, err := k.Uint() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(uint64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustUint64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint64(defaultVal ...uint64) uint64 { + val, err := k.Uint64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustDuration always returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { + val, err := k.Duration() + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].String() + return defaultVal[0] + } + return val +} + +// MustTimeFormat always parses with given format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { + val, err := k.TimeFormat(format) + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].Format(format) + return defaultVal[0] + } + return val +} + +// MustTime always parses with RFC3339 format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTime(defaultVal ...time.Time) time.Time { + return k.MustTimeFormat(time.RFC3339, defaultVal...) +} + +// In always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) In(defaultVal string, candidates []string) string { + val := k.String() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InFloat64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { + val := k.MustFloat64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt(defaultVal int, candidates []int) int { + val := k.MustInt() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { + val := k.MustInt64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint(defaultVal uint, candidates []uint) uint { + val := k.MustUint() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { + val := k.MustUint64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTimeFormat always parses with given format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { + val := k.MustTimeFormat(format) + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTime always parses with RFC3339 format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { + return k.InTimeFormat(time.RFC3339, defaultVal, candidates) +} + +// RangeFloat64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { + val := k.MustFloat64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt(defaultVal, min, max int) int { + val := k.MustInt() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { + val := k.MustInt64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeTimeFormat checks if value with given format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { + val := k.MustTimeFormat(format) + if val.Unix() < min.Unix() || val.Unix() > max.Unix() { + return defaultVal + } + return val +} + +// RangeTime checks if value with RFC3339 format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { + return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) +} + +// Strings returns list of string divided by given delimiter. +func (k *Key) Strings(delim string) []string { + str := k.String() + if len(str) == 0 { + return []string{} + } + + runes := []rune(str) + vals := make([]string, 0, 2) + var buf bytes.Buffer + escape := false + idx := 0 + for { + if escape { + escape = false + if runes[idx] != '\\' && !strings.HasPrefix(string(runes[idx:]), delim) { + buf.WriteRune('\\') + } + buf.WriteRune(runes[idx]) + } else { + if runes[idx] == '\\' { + escape = true + } else if strings.HasPrefix(string(runes[idx:]), delim) { + idx += len(delim) - 1 + vals = append(vals, strings.TrimSpace(buf.String())) + buf.Reset() + } else { + buf.WriteRune(runes[idx]) + } + } + idx++ + if idx == len(runes) { + break + } + } + + if buf.Len() > 0 { + vals = append(vals, strings.TrimSpace(buf.String())) + } + + return vals +} + +// StringsWithShadows returns list of string divided by given delimiter. +// Shadows will also be appended if any. +func (k *Key) StringsWithShadows(delim string) []string { + vals := k.ValueWithShadows() + results := make([]string, 0, len(vals)*2) + for i := range vals { + if len(vals) == 0 { + continue + } + + results = append(results, strings.Split(vals[i], delim)...) + } + + for i := range results { + results[i] = k.transformValue(strings.TrimSpace(results[i])) + } + return results +} + +// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Float64s(delim string) []float64 { + vals, _ := k.parseFloat64s(k.Strings(delim), true, false) + return vals +} + +// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Ints(delim string) []int { + vals, _ := k.parseInts(k.Strings(delim), true, false) + return vals +} + +// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Int64s(delim string) []int64 { + vals, _ := k.parseInt64s(k.Strings(delim), true, false) + return vals +} + +// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uints(delim string) []uint { + vals, _ := k.parseUints(k.Strings(delim), true, false) + return vals +} + +// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uint64s(delim string) []uint64 { + vals, _ := k.parseUint64s(k.Strings(delim), true, false) + return vals +} + +// Bools returns list of bool divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Bools(delim string) []bool { + vals, _ := k.parseBools(k.Strings(delim), true, false) + return vals +} + +// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) TimesFormat(format, delim string) []time.Time { + vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false) + return vals +} + +// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) Times(delim string) []time.Time { + return k.TimesFormat(time.RFC3339, delim) +} + +// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then +// it will not be included to result list. +func (k *Key) ValidFloat64s(delim string) []float64 { + vals, _ := k.parseFloat64s(k.Strings(delim), false, false) + return vals +} + +// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will +// not be included to result list. +func (k *Key) ValidInts(delim string) []int { + vals, _ := k.parseInts(k.Strings(delim), false, false) + return vals +} + +// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer, +// then it will not be included to result list. +func (k *Key) ValidInt64s(delim string) []int64 { + vals, _ := k.parseInt64s(k.Strings(delim), false, false) + return vals +} + +// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer, +// then it will not be included to result list. +func (k *Key) ValidUints(delim string) []uint { + vals, _ := k.parseUints(k.Strings(delim), false, false) + return vals +} + +// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned +// integer, then it will not be included to result list. +func (k *Key) ValidUint64s(delim string) []uint64 { + vals, _ := k.parseUint64s(k.Strings(delim), false, false) + return vals +} + +// ValidBools returns list of bool divided by given delimiter. If some value is not 64-bit unsigned +// integer, then it will not be included to result list. +func (k *Key) ValidBools(delim string) []bool { + vals, _ := k.parseBools(k.Strings(delim), false, false) + return vals +} + +// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimesFormat(format, delim string) []time.Time { + vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false) + return vals +} + +// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimes(delim string) []time.Time { + return k.ValidTimesFormat(time.RFC3339, delim) +} + +// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictFloat64s(delim string) ([]float64, error) { + return k.parseFloat64s(k.Strings(delim), false, true) +} + +// StrictInts returns list of int divided by given delimiter or error on first invalid input. +func (k *Key) StrictInts(delim string) ([]int, error) { + return k.parseInts(k.Strings(delim), false, true) +} + +// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictInt64s(delim string) ([]int64, error) { + return k.parseInt64s(k.Strings(delim), false, true) +} + +// StrictUints returns list of uint divided by given delimiter or error on first invalid input. +func (k *Key) StrictUints(delim string) ([]uint, error) { + return k.parseUints(k.Strings(delim), false, true) +} + +// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictUint64s(delim string) ([]uint64, error) { + return k.parseUint64s(k.Strings(delim), false, true) +} + +// StrictBools returns list of bool divided by given delimiter or error on first invalid input. +func (k *Key) StrictBools(delim string) ([]bool, error) { + return k.parseBools(k.Strings(delim), false, true) +} + +// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) { + return k.parseTimesFormat(format, k.Strings(delim), false, true) +} + +// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimes(delim string) ([]time.Time, error) { + return k.StrictTimesFormat(time.RFC3339, delim) +} + +// parseBools transforms strings to bools. +func (k *Key) parseBools(strs []string, addInvalid, returnOnInvalid bool) ([]bool, error) { + vals := make([]bool, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := parseBool(str) + return val, err + } + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, val.(bool)) + } + } + return vals, err +} + +// parseFloat64s transforms strings to float64s. +func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) { + vals := make([]float64, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := strconv.ParseFloat(str, 64) + return val, err + } + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, val.(float64)) + } + } + return vals, err +} + +// parseInts transforms strings to ints. +func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) { + vals := make([]int, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := strconv.ParseInt(str, 0, 64) + return val, err + } + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, int(val.(int64))) + } + } + return vals, err +} + +// parseInt64s transforms strings to int64s. +func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) { + vals := make([]int64, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := strconv.ParseInt(str, 0, 64) + return val, err + } + + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, val.(int64)) + } + } + return vals, err +} + +// parseUints transforms strings to uints. +func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) { + vals := make([]uint, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := strconv.ParseUint(str, 0, 64) + return val, err + } + + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, uint(val.(uint64))) + } + } + return vals, err +} + +// parseUint64s transforms strings to uint64s. +func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) { + vals := make([]uint64, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := strconv.ParseUint(str, 0, 64) + return val, err + } + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, val.(uint64)) + } + } + return vals, err +} + +type Parser func(str string) (interface{}, error) + +// parseTimesFormat transforms strings to times in given format. +func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { + vals := make([]time.Time, 0, len(strs)) + parser := func(str string) (interface{}, error) { + val, err := time.Parse(format, str) + return val, err + } + rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) + if err == nil { + for _, val := range rawVals { + vals = append(vals, val.(time.Time)) + } + } + return vals, err +} + +// doParse transforms strings to different types +func (k *Key) doParse(strs []string, addInvalid, returnOnInvalid bool, parser Parser) ([]interface{}, error) { + vals := make([]interface{}, 0, len(strs)) + for _, str := range strs { + val, err := parser(str) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// SetValue changes key value. +func (k *Key) SetValue(v string) { + if k.s.f.BlockMode { + k.s.f.lock.Lock() + defer k.s.f.lock.Unlock() + } + + k.value = v + k.s.keysHash[k.name] = v +} diff --git a/cluster-autoscaler/vendor/gopkg.in/ini.v1/parser.go b/cluster-autoscaler/vendor/gopkg.in/ini.v1/parser.go new file mode 100644 index 000000000000..44fc526c2cb6 --- /dev/null +++ b/cluster-autoscaler/vendor/gopkg.in/ini.v1/parser.go @@ -0,0 +1,520 @@ +// Copyright 2015 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bufio" + "bytes" + "fmt" + "io" + "regexp" + "strconv" + "strings" + "unicode" +) + +const minReaderBufferSize = 4096 + +var pythonMultiline = regexp.MustCompile(`^([\t\f ]+)(.*)`) + +type parserOptions struct { + IgnoreContinuation bool + IgnoreInlineComment bool + AllowPythonMultilineValues bool + SpaceBeforeInlineComment bool + UnescapeValueDoubleQuotes bool + UnescapeValueCommentSymbols bool + PreserveSurroundedQuote bool + DebugFunc DebugFunc + ReaderBufferSize int +} + +type parser struct { + buf *bufio.Reader + options parserOptions + + isEOF bool + count int + comment *bytes.Buffer +} + +func (p *parser) debug(format string, args ...interface{}) { + if p.options.DebugFunc != nil { + p.options.DebugFunc(fmt.Sprintf(format, args...)) + } +} + +func newParser(r io.Reader, opts parserOptions) *parser { + size := opts.ReaderBufferSize + if size < minReaderBufferSize { + size = minReaderBufferSize + } + + return &parser{ + buf: bufio.NewReaderSize(r, size), + options: opts, + count: 1, + comment: &bytes.Buffer{}, + } +} + +// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format. +// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding +func (p *parser) BOM() error { + mask, err := p.buf.Peek(2) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 2 { + return nil + } + + switch { + case mask[0] == 254 && mask[1] == 255: + fallthrough + case mask[0] == 255 && mask[1] == 254: + _, err = p.buf.Read(mask) + if err != nil { + return err + } + case mask[0] == 239 && mask[1] == 187: + mask, err := p.buf.Peek(3) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 3 { + return nil + } + if mask[2] == 191 { + _, err = p.buf.Read(mask) + if err != nil { + return err + } + } + } + return nil +} + +func (p *parser) readUntil(delim byte) ([]byte, error) { + data, err := p.buf.ReadBytes(delim) + if err != nil { + if err == io.EOF { + p.isEOF = true + } else { + return nil, err + } + } + return data, nil +} + +func cleanComment(in []byte) ([]byte, bool) { + i := bytes.IndexAny(in, "#;") + if i == -1 { + return nil, false + } + return in[i:], true +} + +func readKeyName(delimiters string, in []byte) (string, int, error) { + line := string(in) + + // Check if key name surrounded by quotes. + var keyQuote string + if line[0] == '"' { + if len(line) > 6 && line[0:3] == `"""` { + keyQuote = `"""` + } else { + keyQuote = `"` + } + } else if line[0] == '`' { + keyQuote = "`" + } + + // Get out key name + var endIdx int + if len(keyQuote) > 0 { + startIdx := len(keyQuote) + // FIXME: fail case -> """"""name"""=value + pos := strings.Index(line[startIdx:], keyQuote) + if pos == -1 { + return "", -1, fmt.Errorf("missing closing key quote: %s", line) + } + pos += startIdx + + // Find key-value delimiter + i := strings.IndexAny(line[pos+startIdx:], delimiters) + if i < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + endIdx = pos + i + return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil + } + + endIdx = strings.IndexAny(line, delimiters) + if endIdx < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + if endIdx == 0 { + return "", -1, ErrEmptyKeyName{line} + } + + return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil +} + +func (p *parser) readMultilines(line, val, valQuote string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := string(data) + + pos := strings.LastIndex(next, valQuote) + if pos > -1 { + val += next[:pos] + + comment, has := cleanComment([]byte(next[pos:])) + if has { + p.comment.Write(bytes.TrimSpace(comment)) + } + break + } + val += next + if p.isEOF { + return "", fmt.Errorf("missing closing key quote from %q to %q", line, next) + } + } + return val, nil +} + +func (p *parser) readContinuationLines(val string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := strings.TrimSpace(string(data)) + + if len(next) == 0 { + break + } + val += next + if val[len(val)-1] != '\\' { + break + } + val = val[:len(val)-1] + } + return val, nil +} + +// hasSurroundedQuote check if and only if the first and last characters +// are quotes \" or \'. +// It returns false if any other parts also contain same kind of quotes. +func hasSurroundedQuote(in string, quote byte) bool { + return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote && + strings.IndexByte(in[1:], quote) == len(in)-2 +} + +func (p *parser) readValue(in []byte, bufferSize int) (string, error) { + + line := strings.TrimLeftFunc(string(in), unicode.IsSpace) + if len(line) == 0 { + if p.options.AllowPythonMultilineValues && len(in) > 0 && in[len(in)-1] == '\n' { + return p.readPythonMultilines(line, bufferSize) + } + return "", nil + } + + var valQuote string + if len(line) > 3 && line[0:3] == `"""` { + valQuote = `"""` + } else if line[0] == '`' { + valQuote = "`" + } else if p.options.UnescapeValueDoubleQuotes && line[0] == '"' { + valQuote = `"` + } + + if len(valQuote) > 0 { + startIdx := len(valQuote) + pos := strings.LastIndex(line[startIdx:], valQuote) + // Check for multi-line value + if pos == -1 { + return p.readMultilines(line, line[startIdx:], valQuote) + } + + if p.options.UnescapeValueDoubleQuotes && valQuote == `"` { + return strings.Replace(line[startIdx:pos+startIdx], `\"`, `"`, -1), nil + } + return line[startIdx : pos+startIdx], nil + } + + lastChar := line[len(line)-1] + // Won't be able to reach here if value only contains whitespace + line = strings.TrimSpace(line) + trimmedLastChar := line[len(line)-1] + + // Check continuation lines when desired + if !p.options.IgnoreContinuation && trimmedLastChar == '\\' { + return p.readContinuationLines(line[:len(line)-1]) + } + + // Check if ignore inline comment + if !p.options.IgnoreInlineComment { + var i int + if p.options.SpaceBeforeInlineComment { + i = strings.Index(line, " #") + if i == -1 { + i = strings.Index(line, " ;") + } + + } else { + i = strings.IndexAny(line, "#;") + } + + if i > -1 { + p.comment.WriteString(line[i:]) + line = strings.TrimSpace(line[:i]) + } + + } + + // Trim single and double quotes + if (hasSurroundedQuote(line, '\'') || + hasSurroundedQuote(line, '"')) && !p.options.PreserveSurroundedQuote { + line = line[1 : len(line)-1] + } else if len(valQuote) == 0 && p.options.UnescapeValueCommentSymbols { + line = strings.ReplaceAll(line, `\;`, ";") + line = strings.ReplaceAll(line, `\#`, "#") + } else if p.options.AllowPythonMultilineValues && lastChar == '\n' { + return p.readPythonMultilines(line, bufferSize) + } + + return line, nil +} + +func (p *parser) readPythonMultilines(line string, bufferSize int) (string, error) { + parserBufferPeekResult, _ := p.buf.Peek(bufferSize) + peekBuffer := bytes.NewBuffer(parserBufferPeekResult) + + for { + peekData, peekErr := peekBuffer.ReadBytes('\n') + if peekErr != nil && peekErr != io.EOF { + p.debug("readPythonMultilines: failed to peek with error: %v", peekErr) + return "", peekErr + } + + p.debug("readPythonMultilines: parsing %q", string(peekData)) + + peekMatches := pythonMultiline.FindStringSubmatch(string(peekData)) + p.debug("readPythonMultilines: matched %d parts", len(peekMatches)) + for n, v := range peekMatches { + p.debug(" %d: %q", n, v) + } + + // Return if not a Python multiline value. + if len(peekMatches) != 3 { + p.debug("readPythonMultilines: end of value, got: %q", line) + return line, nil + } + + // Advance the parser reader (buffer) in-sync with the peek buffer. + _, err := p.buf.Discard(len(peekData)) + if err != nil { + p.debug("readPythonMultilines: failed to skip to the end, returning error") + return "", err + } + + line += "\n" + peekMatches[0] + } +} + +// parse parses data through an io.Reader. +func (f *File) parse(reader io.Reader) (err error) { + p := newParser(reader, parserOptions{ + IgnoreContinuation: f.options.IgnoreContinuation, + IgnoreInlineComment: f.options.IgnoreInlineComment, + AllowPythonMultilineValues: f.options.AllowPythonMultilineValues, + SpaceBeforeInlineComment: f.options.SpaceBeforeInlineComment, + UnescapeValueDoubleQuotes: f.options.UnescapeValueDoubleQuotes, + UnescapeValueCommentSymbols: f.options.UnescapeValueCommentSymbols, + PreserveSurroundedQuote: f.options.PreserveSurroundedQuote, + DebugFunc: f.options.DebugFunc, + ReaderBufferSize: f.options.ReaderBufferSize, + }) + if err = p.BOM(); err != nil { + return fmt.Errorf("BOM: %v", err) + } + + // Ignore error because default section name is never empty string. + name := DefaultSection + if f.options.Insensitive || f.options.InsensitiveSections { + name = strings.ToLower(DefaultSection) + } + section, _ := f.NewSection(name) + + // This "last" is not strictly equivalent to "previous one" if current key is not the first nested key + var isLastValueEmpty bool + var lastRegularKey *Key + + var line []byte + var inUnparseableSection bool + + // NOTE: Iterate and increase `currentPeekSize` until + // the size of the parser buffer is found. + // TODO(unknwon): When Golang 1.10 is the lowest version supported, replace with `parserBufferSize := p.buf.Size()`. + parserBufferSize := 0 + // NOTE: Peek 4kb at a time. + currentPeekSize := minReaderBufferSize + + if f.options.AllowPythonMultilineValues { + for { + peekBytes, _ := p.buf.Peek(currentPeekSize) + peekBytesLength := len(peekBytes) + + if parserBufferSize >= peekBytesLength { + break + } + + currentPeekSize *= 2 + parserBufferSize = peekBytesLength + } + } + + for !p.isEOF { + line, err = p.readUntil('\n') + if err != nil { + return err + } + + if f.options.AllowNestedValues && + isLastValueEmpty && len(line) > 0 { + if line[0] == ' ' || line[0] == '\t' { + err = lastRegularKey.addNestedValue(string(bytes.TrimSpace(line))) + if err != nil { + return err + } + continue + } + } + + line = bytes.TrimLeftFunc(line, unicode.IsSpace) + if len(line) == 0 { + continue + } + + // Comments + if line[0] == '#' || line[0] == ';' { + // Note: we do not care ending line break, + // it is needed for adding second line, + // so just clean it once at the end when set to value. + p.comment.Write(line) + continue + } + + // Section + if line[0] == '[' { + // Read to the next ']' (TODO: support quoted strings) + closeIdx := bytes.LastIndexByte(line, ']') + if closeIdx == -1 { + return fmt.Errorf("unclosed section: %s", line) + } + + name := string(line[1:closeIdx]) + section, err = f.NewSection(name) + if err != nil { + return err + } + + comment, has := cleanComment(line[closeIdx+1:]) + if has { + p.comment.Write(comment) + } + + section.Comment = strings.TrimSpace(p.comment.String()) + + // Reset auto-counter and comments + p.comment.Reset() + p.count = 1 + // Nested values can't span sections + isLastValueEmpty = false + + inUnparseableSection = false + for i := range f.options.UnparseableSections { + if f.options.UnparseableSections[i] == name || + ((f.options.Insensitive || f.options.InsensitiveSections) && strings.EqualFold(f.options.UnparseableSections[i], name)) { + inUnparseableSection = true + continue + } + } + continue + } + + if inUnparseableSection { + section.isRawSection = true + section.rawBody += string(line) + continue + } + + kname, offset, err := readKeyName(f.options.KeyValueDelimiters, line) + if err != nil { + switch { + // Treat as boolean key when desired, and whole line is key name. + case IsErrDelimiterNotFound(err): + switch { + case f.options.AllowBooleanKeys: + kname, err := p.readValue(line, parserBufferSize) + if err != nil { + return err + } + key, err := section.NewBooleanKey(kname) + if err != nil { + return err + } + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + continue + + case f.options.SkipUnrecognizableLines: + continue + } + case IsErrEmptyKeyName(err) && f.options.SkipUnrecognizableLines: + continue + } + return err + } + + // Auto increment. + isAutoIncr := false + if kname == "-" { + isAutoIncr = true + kname = "#" + strconv.Itoa(p.count) + p.count++ + } + + value, err := p.readValue(line[offset:], parserBufferSize) + if err != nil { + return err + } + isLastValueEmpty = len(value) == 0 + + key, err := section.NewKey(kname, value) + if err != nil { + return err + } + key.isAutoIncrement = isAutoIncr + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + lastRegularKey = key + } + return nil +} diff --git a/cluster-autoscaler/vendor/gopkg.in/ini.v1/section.go b/cluster-autoscaler/vendor/gopkg.in/ini.v1/section.go new file mode 100644 index 000000000000..a3615d820b7a --- /dev/null +++ b/cluster-autoscaler/vendor/gopkg.in/ini.v1/section.go @@ -0,0 +1,256 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "errors" + "fmt" + "strings" +) + +// Section represents a config section. +type Section struct { + f *File + Comment string + name string + keys map[string]*Key + keyList []string + keysHash map[string]string + + isRawSection bool + rawBody string +} + +func newSection(f *File, name string) *Section { + return &Section{ + f: f, + name: name, + keys: make(map[string]*Key), + keyList: make([]string, 0, 10), + keysHash: make(map[string]string), + } +} + +// Name returns name of Section. +func (s *Section) Name() string { + return s.name +} + +// Body returns rawBody of Section if the section was marked as unparseable. +// It still follows the other rules of the INI format surrounding leading/trailing whitespace. +func (s *Section) Body() string { + return strings.TrimSpace(s.rawBody) +} + +// SetBody updates body content only if section is raw. +func (s *Section) SetBody(body string) { + if !s.isRawSection { + return + } + s.rawBody = body +} + +// NewKey creates a new key to given section. +func (s *Section) NewKey(name, val string) (*Key, error) { + if len(name) == 0 { + return nil, errors.New("error creating new key: empty key name") + } else if s.f.options.Insensitive || s.f.options.InsensitiveKeys { + name = strings.ToLower(name) + } + + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + if inSlice(name, s.keyList) { + if s.f.options.AllowShadows { + if err := s.keys[name].addShadow(val); err != nil { + return nil, err + } + } else { + s.keys[name].value = val + s.keysHash[name] = val + } + return s.keys[name], nil + } + + s.keyList = append(s.keyList, name) + s.keys[name] = newKey(s, name, val) + s.keysHash[name] = val + return s.keys[name], nil +} + +// NewBooleanKey creates a new boolean type key to given section. +func (s *Section) NewBooleanKey(name string) (*Key, error) { + key, err := s.NewKey(name, "true") + if err != nil { + return nil, err + } + + key.isBooleanType = true + return key, nil +} + +// GetKey returns key in section by given name. +func (s *Section) GetKey(name string) (*Key, error) { + if s.f.BlockMode { + s.f.lock.RLock() + } + if s.f.options.Insensitive || s.f.options.InsensitiveKeys { + name = strings.ToLower(name) + } + key := s.keys[name] + if s.f.BlockMode { + s.f.lock.RUnlock() + } + + if key == nil { + // Check if it is a child-section. + sname := s.name + for { + if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + return sec.GetKey(name) + } + break + } + return nil, fmt.Errorf("error when getting key of section %q: key %q not exists", s.name, name) + } + return key, nil +} + +// HasKey returns true if section contains a key with given name. +func (s *Section) HasKey(name string) bool { + key, _ := s.GetKey(name) + return key != nil +} + +// Deprecated: Use "HasKey" instead. +func (s *Section) Haskey(name string) bool { + return s.HasKey(name) +} + +// HasValue returns true if section contains given raw value. +func (s *Section) HasValue(value string) bool { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + for _, k := range s.keys { + if value == k.value { + return true + } + } + return false +} + +// Key assumes named Key exists in section and returns a zero-value when not. +func (s *Section) Key(name string) *Key { + key, err := s.GetKey(name) + if err != nil { + // It's OK here because the only possible error is empty key name, + // but if it's empty, this piece of code won't be executed. + key, _ = s.NewKey(name, "") + return key + } + return key +} + +// Keys returns list of keys of section. +func (s *Section) Keys() []*Key { + keys := make([]*Key, len(s.keyList)) + for i := range s.keyList { + keys[i] = s.Key(s.keyList[i]) + } + return keys +} + +// ParentKeys returns list of keys of parent section. +func (s *Section) ParentKeys() []*Key { + var parentKeys []*Key + sname := s.name + for { + if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + parentKeys = append(parentKeys, sec.Keys()...) + } else { + break + } + + } + return parentKeys +} + +// KeyStrings returns list of key names of section. +func (s *Section) KeyStrings() []string { + list := make([]string, len(s.keyList)) + copy(list, s.keyList) + return list +} + +// KeysHash returns keys hash consisting of names and values. +func (s *Section) KeysHash() map[string]string { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + hash := make(map[string]string, len(s.keysHash)) + for key, value := range s.keysHash { + hash[key] = value + } + return hash +} + +// DeleteKey deletes a key from section. +func (s *Section) DeleteKey(name string) { + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + for i, k := range s.keyList { + if k == name { + s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) + delete(s.keys, name) + delete(s.keysHash, name) + return + } + } +} + +// ChildSections returns a list of child sections of current section. +// For example, "[parent.child1]" and "[parent.child12]" are child sections +// of section "[parent]". +func (s *Section) ChildSections() []*Section { + prefix := s.name + s.f.options.ChildSectionDelimiter + children := make([]*Section, 0, 3) + for _, name := range s.f.sectionList { + if strings.HasPrefix(name, prefix) { + children = append(children, s.f.sections[name]...) + } + } + return children +} diff --git a/cluster-autoscaler/vendor/gopkg.in/ini.v1/struct.go b/cluster-autoscaler/vendor/gopkg.in/ini.v1/struct.go new file mode 100644 index 000000000000..a486b2fe0fdc --- /dev/null +++ b/cluster-autoscaler/vendor/gopkg.in/ini.v1/struct.go @@ -0,0 +1,747 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "strings" + "time" + "unicode" +) + +// NameMapper represents a ini tag name mapper. +type NameMapper func(string) string + +// Built-in name getters. +var ( + // SnackCase converts to format SNACK_CASE. + SnackCase NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + } + newstr = append(newstr, unicode.ToUpper(chr)) + } + return string(newstr) + } + // TitleUnderscore converts to format title_underscore. + TitleUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + chr -= 'A' - 'a' + } + newstr = append(newstr, chr) + } + return string(newstr) + } +) + +func (s *Section) parseFieldName(raw, actual string) string { + if len(actual) > 0 { + return actual + } + if s.f.NameMapper != nil { + return s.f.NameMapper(raw) + } + return raw +} + +func parseDelim(actual string) string { + if len(actual) > 0 { + return actual + } + return "," +} + +var reflectTime = reflect.TypeOf(time.Now()).Kind() + +// setSliceWithProperType sets proper values to slice based on its type. +func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { + var strs []string + if allowShadow { + strs = key.StringsWithShadows(delim) + } else { + strs = key.Strings(delim) + } + + numVals := len(strs) + if numVals == 0 { + return nil + } + + var vals interface{} + var err error + + sliceOf := field.Type().Elem().Kind() + switch sliceOf { + case reflect.String: + vals = strs + case reflect.Int: + vals, err = key.parseInts(strs, true, false) + case reflect.Int64: + vals, err = key.parseInt64s(strs, true, false) + case reflect.Uint: + vals, err = key.parseUints(strs, true, false) + case reflect.Uint64: + vals, err = key.parseUint64s(strs, true, false) + case reflect.Float64: + vals, err = key.parseFloat64s(strs, true, false) + case reflect.Bool: + vals, err = key.parseBools(strs, true, false) + case reflectTime: + vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + if err != nil && isStrict { + return err + } + + slice := reflect.MakeSlice(field.Type(), numVals, numVals) + for i := 0; i < numVals; i++ { + switch sliceOf { + case reflect.String: + slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i])) + case reflect.Int: + slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i])) + case reflect.Int64: + slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i])) + case reflect.Uint: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i])) + case reflect.Uint64: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i])) + case reflect.Float64: + slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i])) + case reflect.Bool: + slice.Index(i).Set(reflect.ValueOf(vals.([]bool)[i])) + case reflectTime: + slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i])) + } + } + field.Set(slice) + return nil +} + +func wrapStrictError(err error, isStrict bool) error { + if isStrict { + return err + } + return nil +} + +// setWithProperType sets proper value to field based on its type, +// but it does not return error for failing parsing, +// because we want to use default value that is already assigned to struct. +func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { + vt := t + isPtr := t.Kind() == reflect.Ptr + if isPtr { + vt = t.Elem() + } + switch vt.Kind() { + case reflect.String: + stringVal := key.String() + if isPtr { + field.Set(reflect.ValueOf(&stringVal)) + } else if len(stringVal) > 0 { + field.SetString(key.String()) + } + case reflect.Bool: + boolVal, err := key.Bool() + if err != nil { + return wrapStrictError(err, isStrict) + } + if isPtr { + field.Set(reflect.ValueOf(&boolVal)) + } else { + field.SetBool(boolVal) + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + // ParseDuration will not return err for `0`, so check the type name + if vt.Name() == "Duration" { + durationVal, err := key.Duration() + if err != nil { + if intVal, err := key.Int64(); err == nil { + field.SetInt(intVal) + return nil + } + return wrapStrictError(err, isStrict) + } + if isPtr { + field.Set(reflect.ValueOf(&durationVal)) + } else if int64(durationVal) > 0 { + field.Set(reflect.ValueOf(durationVal)) + } + return nil + } + + intVal, err := key.Int64() + if err != nil { + return wrapStrictError(err, isStrict) + } + if isPtr { + pv := reflect.New(t.Elem()) + pv.Elem().SetInt(intVal) + field.Set(pv) + } else { + field.SetInt(intVal) + } + // byte is an alias for uint8, so supporting uint8 breaks support for byte + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + durationVal, err := key.Duration() + // Skip zero value + if err == nil && uint64(durationVal) > 0 { + if isPtr { + field.Set(reflect.ValueOf(&durationVal)) + } else { + field.Set(reflect.ValueOf(durationVal)) + } + return nil + } + + uintVal, err := key.Uint64() + if err != nil { + return wrapStrictError(err, isStrict) + } + if isPtr { + pv := reflect.New(t.Elem()) + pv.Elem().SetUint(uintVal) + field.Set(pv) + } else { + field.SetUint(uintVal) + } + + case reflect.Float32, reflect.Float64: + floatVal, err := key.Float64() + if err != nil { + return wrapStrictError(err, isStrict) + } + if isPtr { + pv := reflect.New(t.Elem()) + pv.Elem().SetFloat(floatVal) + field.Set(pv) + } else { + field.SetFloat(floatVal) + } + case reflectTime: + timeVal, err := key.Time() + if err != nil { + return wrapStrictError(err, isStrict) + } + if isPtr { + field.Set(reflect.ValueOf(&timeVal)) + } else { + field.Set(reflect.ValueOf(timeVal)) + } + case reflect.Slice: + return setSliceWithProperType(key, field, delim, allowShadow, isStrict) + default: + return fmt.Errorf("unsupported type %q", t) + } + return nil +} + +func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool, allowNonUnique bool, extends bool) { + opts := strings.SplitN(tag, ",", 5) + rawName = opts[0] + for _, opt := range opts[1:] { + omitEmpty = omitEmpty || (opt == "omitempty") + allowShadow = allowShadow || (opt == "allowshadow") + allowNonUnique = allowNonUnique || (opt == "nonunique") + extends = extends || (opt == "extends") + } + return rawName, omitEmpty, allowShadow, allowNonUnique, extends +} + +// mapToField maps the given value to the matching field of the given section. +// The sectionIndex is the index (if non unique sections are enabled) to which the value should be added. +func (s *Section) mapToField(val reflect.Value, isStrict bool, sectionIndex int, sectionName string) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + rawName, _, allowShadow, allowNonUnique, extends := parseTagOptions(tag) + fieldName := s.parseFieldName(tpField.Name, rawName) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + isStruct := tpField.Type.Kind() == reflect.Struct + isStructPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct + isAnonymousPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous + if isAnonymousPtr { + field.Set(reflect.New(tpField.Type.Elem())) + } + + if extends && (isAnonymousPtr || (isStruct && tpField.Anonymous)) { + if isStructPtr && field.IsNil() { + field.Set(reflect.New(tpField.Type.Elem())) + } + fieldSection := s + if rawName != "" { + sectionName = s.name + s.f.options.ChildSectionDelimiter + rawName + if secs, err := s.f.SectionsByName(sectionName); err == nil && sectionIndex < len(secs) { + fieldSection = secs[sectionIndex] + } + } + if err := fieldSection.mapToField(field, isStrict, sectionIndex, sectionName); err != nil { + return fmt.Errorf("map to field %q: %v", fieldName, err) + } + } else if isAnonymousPtr || isStruct || isStructPtr { + if secs, err := s.f.SectionsByName(fieldName); err == nil { + if len(secs) <= sectionIndex { + return fmt.Errorf("there are not enough sections (%d <= %d) for the field %q", len(secs), sectionIndex, fieldName) + } + // Only set the field to non-nil struct value if we have a section for it. + // Otherwise, we end up with a non-nil struct ptr even though there is no data. + if isStructPtr && field.IsNil() { + field.Set(reflect.New(tpField.Type.Elem())) + } + if err = secs[sectionIndex].mapToField(field, isStrict, sectionIndex, fieldName); err != nil { + return fmt.Errorf("map to field %q: %v", fieldName, err) + } + continue + } + } + + // Map non-unique sections + if allowNonUnique && tpField.Type.Kind() == reflect.Slice { + newField, err := s.mapToSlice(fieldName, field, isStrict) + if err != nil { + return fmt.Errorf("map to slice %q: %v", fieldName, err) + } + + field.Set(newField) + continue + } + + if key, err := s.GetKey(fieldName); err == nil { + delim := parseDelim(tpField.Tag.Get("delim")) + if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil { + return fmt.Errorf("set field %q: %v", fieldName, err) + } + } + } + return nil +} + +// mapToSlice maps all sections with the same name and returns the new value. +// The type of the Value must be a slice. +func (s *Section) mapToSlice(secName string, val reflect.Value, isStrict bool) (reflect.Value, error) { + secs, err := s.f.SectionsByName(secName) + if err != nil { + return reflect.Value{}, err + } + + typ := val.Type().Elem() + for i, sec := range secs { + elem := reflect.New(typ) + if err = sec.mapToField(elem, isStrict, i, sec.name); err != nil { + return reflect.Value{}, fmt.Errorf("map to field from section %q: %v", secName, err) + } + + val = reflect.Append(val, elem.Elem()) + } + return val, nil +} + +// mapTo maps a section to object v. +func (s *Section) mapTo(v interface{}, isStrict bool) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("not a pointer to a struct") + } + + if typ.Kind() == reflect.Slice { + newField, err := s.mapToSlice(s.name, val, isStrict) + if err != nil { + return err + } + + val.Set(newField) + return nil + } + + return s.mapToField(val, isStrict, 0, s.name) +} + +// MapTo maps section to given struct. +func (s *Section) MapTo(v interface{}) error { + return s.mapTo(v, false) +} + +// StrictMapTo maps section to given struct in strict mode, +// which returns all possible error including value parsing error. +func (s *Section) StrictMapTo(v interface{}) error { + return s.mapTo(v, true) +} + +// MapTo maps file to given struct. +func (f *File) MapTo(v interface{}) error { + return f.Section("").MapTo(v) +} + +// StrictMapTo maps file to given struct in strict mode, +// which returns all possible error including value parsing error. +func (f *File) StrictMapTo(v interface{}) error { + return f.Section("").StrictMapTo(v) +} + +// MapToWithMapper maps data sources to given struct with name mapper. +func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.MapTo(v) +} + +// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode, +// which returns all possible error including value parsing error. +func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.StrictMapTo(v) +} + +// MapTo maps data sources to given struct. +func MapTo(v, source interface{}, others ...interface{}) error { + return MapToWithMapper(v, nil, source, others...) +} + +// StrictMapTo maps data sources to given struct in strict mode, +// which returns all possible error including value parsing error. +func StrictMapTo(v, source interface{}, others ...interface{}) error { + return StrictMapToWithMapper(v, nil, source, others...) +} + +// reflectSliceWithProperType does the opposite thing as setSliceWithProperType. +func reflectSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow bool) error { + slice := field.Slice(0, field.Len()) + if field.Len() == 0 { + return nil + } + sliceOf := field.Type().Elem().Kind() + + if allowShadow { + var keyWithShadows *Key + for i := 0; i < field.Len(); i++ { + var val string + switch sliceOf { + case reflect.String: + val = slice.Index(i).String() + case reflect.Int, reflect.Int64: + val = fmt.Sprint(slice.Index(i).Int()) + case reflect.Uint, reflect.Uint64: + val = fmt.Sprint(slice.Index(i).Uint()) + case reflect.Float64: + val = fmt.Sprint(slice.Index(i).Float()) + case reflect.Bool: + val = fmt.Sprint(slice.Index(i).Bool()) + case reflectTime: + val = slice.Index(i).Interface().(time.Time).Format(time.RFC3339) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + + if i == 0 { + keyWithShadows = newKey(key.s, key.name, val) + } else { + _ = keyWithShadows.AddShadow(val) + } + } + *key = *keyWithShadows + return nil + } + + var buf bytes.Buffer + for i := 0; i < field.Len(); i++ { + switch sliceOf { + case reflect.String: + buf.WriteString(slice.Index(i).String()) + case reflect.Int, reflect.Int64: + buf.WriteString(fmt.Sprint(slice.Index(i).Int())) + case reflect.Uint, reflect.Uint64: + buf.WriteString(fmt.Sprint(slice.Index(i).Uint())) + case reflect.Float64: + buf.WriteString(fmt.Sprint(slice.Index(i).Float())) + case reflect.Bool: + buf.WriteString(fmt.Sprint(slice.Index(i).Bool())) + case reflectTime: + buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339)) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + buf.WriteString(delim) + } + key.SetValue(buf.String()[:buf.Len()-len(delim)]) + return nil +} + +// reflectWithProperType does the opposite thing as setWithProperType. +func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow bool) error { + switch t.Kind() { + case reflect.String: + key.SetValue(field.String()) + case reflect.Bool: + key.SetValue(fmt.Sprint(field.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + key.SetValue(fmt.Sprint(field.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + key.SetValue(fmt.Sprint(field.Uint())) + case reflect.Float32, reflect.Float64: + key.SetValue(fmt.Sprint(field.Float())) + case reflectTime: + key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339))) + case reflect.Slice: + return reflectSliceWithProperType(key, field, delim, allowShadow) + case reflect.Ptr: + if !field.IsNil() { + return reflectWithProperType(t.Elem(), key, field.Elem(), delim, allowShadow) + } + default: + return fmt.Errorf("unsupported type %q", t) + } + return nil +} + +// CR: copied from encoding/json/encode.go with modifications of time.Time support. +// TODO: add more test coverage. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflectTime: + t, ok := v.Interface().(time.Time) + return ok && t.IsZero() + } + return false +} + +// StructReflector is the interface implemented by struct types that can extract themselves into INI objects. +type StructReflector interface { + ReflectINIStruct(*File) error +} + +func (s *Section) reflectFrom(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + if !val.Field(i).CanInterface() { + continue + } + + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + rawName, omitEmpty, allowShadow, allowNonUnique, extends := parseTagOptions(tag) + if omitEmpty && isEmptyValue(field) { + continue + } + + if r, ok := field.Interface().(StructReflector); ok { + return r.ReflectINIStruct(s.f) + } + + fieldName := s.parseFieldName(tpField.Name, rawName) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + if extends && tpField.Anonymous && (tpField.Type.Kind() == reflect.Ptr || tpField.Type.Kind() == reflect.Struct) { + if err := s.reflectFrom(field); err != nil { + return fmt.Errorf("reflect from field %q: %v", fieldName, err) + } + continue + } + + if (tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct) || + (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") { + // Note: The only error here is section doesn't exist. + sec, err := s.f.GetSection(fieldName) + if err != nil { + // Note: fieldName can never be empty here, ignore error. + sec, _ = s.f.NewSection(fieldName) + } + + // Add comment from comment tag + if len(sec.Comment) == 0 { + sec.Comment = tpField.Tag.Get("comment") + } + + if err = sec.reflectFrom(field); err != nil { + return fmt.Errorf("reflect from field %q: %v", fieldName, err) + } + continue + } + + if allowNonUnique && tpField.Type.Kind() == reflect.Slice { + slice := field.Slice(0, field.Len()) + if field.Len() == 0 { + return nil + } + sliceOf := field.Type().Elem().Kind() + + for i := 0; i < field.Len(); i++ { + if sliceOf != reflect.Struct && sliceOf != reflect.Ptr { + return fmt.Errorf("field %q is not a slice of pointer or struct", fieldName) + } + + sec, err := s.f.NewSection(fieldName) + if err != nil { + return err + } + + // Add comment from comment tag + if len(sec.Comment) == 0 { + sec.Comment = tpField.Tag.Get("comment") + } + + if err := sec.reflectFrom(slice.Index(i)); err != nil { + return fmt.Errorf("reflect from field %q: %v", fieldName, err) + } + } + continue + } + + // Note: Same reason as section. + key, err := s.GetKey(fieldName) + if err != nil { + key, _ = s.NewKey(fieldName, "") + } + + // Add comment from comment tag + if len(key.Comment) == 0 { + key.Comment = tpField.Tag.Get("comment") + } + + delim := parseDelim(tpField.Tag.Get("delim")) + if err = reflectWithProperType(tpField.Type, key, field, delim, allowShadow); err != nil { + return fmt.Errorf("reflect field %q: %v", fieldName, err) + } + + } + return nil +} + +// ReflectFrom reflects section from given struct. It overwrites existing ones. +func (s *Section) ReflectFrom(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + + if s.name != DefaultSection && s.f.options.AllowNonUniqueSections && + (typ.Kind() == reflect.Slice || typ.Kind() == reflect.Ptr) { + // Clear sections to make sure none exists before adding the new ones + s.f.DeleteSection(s.name) + + if typ.Kind() == reflect.Ptr { + sec, err := s.f.NewSection(s.name) + if err != nil { + return err + } + return sec.reflectFrom(val.Elem()) + } + + slice := val.Slice(0, val.Len()) + sliceOf := val.Type().Elem().Kind() + if sliceOf != reflect.Ptr { + return fmt.Errorf("not a slice of pointers") + } + + for i := 0; i < slice.Len(); i++ { + sec, err := s.f.NewSection(s.name) + if err != nil { + return err + } + + err = sec.reflectFrom(slice.Index(i)) + if err != nil { + return fmt.Errorf("reflect from %dth field: %v", i, err) + } + } + + return nil + } + + if typ.Kind() == reflect.Ptr { + val = val.Elem() + } else { + return errors.New("not a pointer to a struct") + } + + return s.reflectFrom(val) +} + +// ReflectFrom reflects file from given struct. +func (f *File) ReflectFrom(v interface{}) error { + return f.Section("").ReflectFrom(v) +} + +// ReflectFromWithMapper reflects data sources from given struct with name mapper. +func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { + cfg.NameMapper = mapper + return cfg.ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct. +func ReflectFrom(cfg *File, v interface{}) error { + return ReflectFromWithMapper(cfg, v, nil) +} diff --git a/cluster-autoscaler/vendor/modules.txt b/cluster-autoscaler/vendor/modules.txt index eb6548afc4b0..42f51d6be071 100644 --- a/cluster-autoscaler/vendor/modules.txt +++ b/cluster-autoscaler/vendor/modules.txt @@ -565,14 +565,30 @@ github.com/spf13/cobra # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag +# github.com/spotinst/spotinst-sdk-go v1.357.0 +## explicit; go 1.20 +github.com/spotinst/spotinst-sdk-go/service/elastigroup +github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws +github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/v3 +github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gcp +github.com/spotinst/spotinst-sdk-go/spotinst +github.com/spotinst/spotinst-sdk-go/spotinst/client +github.com/spotinst/spotinst-sdk-go/spotinst/credentials +github.com/spotinst/spotinst-sdk-go/spotinst/featureflag +github.com/spotinst/spotinst-sdk-go/spotinst/log +github.com/spotinst/spotinst-sdk-go/spotinst/session +github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil +github.com/spotinst/spotinst-sdk-go/spotinst/util/stringutil +github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates +github.com/spotinst/spotinst-sdk-go/spotinst/util/useragent # github.com/stoewer/go-strcase v1.3.0 ## explicit; go 1.11 github.com/stoewer/go-strcase -# github.com/stretchr/objx v0.5.0 -## explicit; go 1.12 -github.com/stretchr/objx -# github.com/stretchr/testify v1.8.4 +# github.com/stretchr/objx v0.5.2 ## explicit; go 1.20 +github.com/stretchr/objx +# github.com/stretchr/testify v1.9.0 +## explicit; go 1.17 github.com/stretchr/testify/assert github.com/stretchr/testify/mock github.com/stretchr/testify/require @@ -965,6 +981,9 @@ gopkg.in/gcfg.v1/types # gopkg.in/inf.v0 v0.9.1 ## explicit gopkg.in/inf.v0 +# gopkg.in/ini.v1 v1.67.0 +## explicit +gopkg.in/ini.v1 # gopkg.in/natefinch/lumberjack.v2 v2.2.1 ## explicit; go 1.13 gopkg.in/natefinch/lumberjack.v2 diff --git a/cluster-autoscaler/visenze.md b/cluster-autoscaler/visenze.md new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/patch/1.27.patch b/patch/1.27.patch new file mode 100644 index 000000000000..58648641d9a2 --- /dev/null +++ b/patch/1.27.patch @@ -0,0 +1,6240 @@ +diff --git a/cluster-autoscaler/Dockerfile b/cluster-autoscaler/Dockerfile +new file mode 100644 +index 000000000..6b4ae3a2a +--- /dev/null ++++ b/cluster-autoscaler/Dockerfile +@@ -0,0 +1,21 @@ ++FROM golang:1.19.13 AS build ++ ++ARG TARGETARCH ++ENV GOPATH /gopath/ ++ENV PATH $GOPATH/bin:$PATH ++ENV GO111MODULE auto ++ENV GOARCH ${TARGETARCH} ++ ++RUN apt-get update && apt-get --yes install libseccomp-dev ++RUN go version ++RUN go get github.com/tools/godep ++RUN godep version ++ ++WORKDIR /gopath/src/k8s.io/autoscaler/cluster-autoscaler ++ADD . . ++RUN CGO_ENABLED=0 GOOS=linux go build -o cluster-autoscaler --ldflags "-s" ++ ++FROM alpine ++COPY --from=build /gopath/src/k8s.io/autoscaler/cluster-autoscaler/cluster-autoscaler / ++ ++CMD ["./cluster-autoscaler"] +\ No newline at end of file +diff --git a/cluster-autoscaler/cloudprovider/aws/aws_manager.go b/cluster-autoscaler/cloudprovider/aws/aws_manager.go +index f45716359..d489a11cc 100644 +--- a/cluster-autoscaler/cloudprovider/aws/aws_manager.go ++++ b/cluster-autoscaler/cloudprovider/aws/aws_manager.go +@@ -39,6 +39,9 @@ import ( + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/aws/aws-sdk-go/service/eks" + "k8s.io/autoscaler/cluster-autoscaler/config" + "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" ++ "k8s.io/autoscaler/cluster-autoscaler/utils/gpumemory" ++ "k8s.io/autoscaler/cluster-autoscaler/utils/mpscontext" ++ klog "k8s.io/klog/v2" + ) + + const ( +@@ -270,6 +273,8 @@ func (m *AwsManager) buildNodeFromTemplate(asg *asg, template *asgTemplate) (*ap + node.Status.Capacity[apiv1.ResourceCPU] = *resource.NewQuantity(template.InstanceType.VCPU, resource.DecimalSI) + node.Status.Capacity[gpu.ResourceNvidiaGPU] = *resource.NewQuantity(template.InstanceType.GPU, resource.DecimalSI) + node.Status.Capacity[apiv1.ResourceMemory] = *resource.NewQuantity(template.InstanceType.MemoryMb*1024*1024, resource.DecimalSI) ++ node.Status.Capacity[gpumemory.ResourceVisenzeGPUMemory] = *resource.NewQuantity(template.InstanceType.GPUMemory, resource.DecimalSI) ++ node.Status.Capacity[mpscontext.ResourceVisenzeMPSContext] = *resource.NewQuantity(template.InstanceType.MPSContext, resource.DecimalSI) + + m.updateCapacityWithRequirementsOverrides(&node.Status.Capacity, asg.MixedInstancesPolicy) + +diff --git a/cluster-autoscaler/cloudprovider/builder/builder_all.go b/cluster-autoscaler/cloudprovider/builder/builder_all.go +index c8a2677ac..dddf6578d 100644 +--- a/cluster-autoscaler/cloudprovider/builder/builder_all.go ++++ b/cluster-autoscaler/cloudprovider/builder/builder_all.go +@@ -48,6 +48,7 @@ import ( + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/scaleway" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/tencentcloud" + "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/vultr" ++ "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/spotinst" + "k8s.io/autoscaler/cluster-autoscaler/config" + ) + +@@ -80,6 +81,7 @@ var AvailableCloudProviders = []string{ + cloudprovider.CivoProviderName, + cloudprovider.ScalewayProviderName, + cloudprovider.RancherProviderName, ++ cloudprovider.SpotinstProviderName, + } + + // DefaultCloudProvider is GCE. +@@ -141,6 +143,8 @@ func buildCloudProvider(opts config.AutoscalingOptions, do cloudprovider.NodeGro + return scaleway.BuildScaleway(opts, do, rl) + case cloudprovider.RancherProviderName: + return rancher.BuildRancher(opts, do, rl) ++ case cloudprovider.SpotinstProviderName: ++ return spotinst.BuildSpotinst(opts, do, rl) + } + return nil + } +diff --git a/cluster-autoscaler/cloudprovider/cloud_provider.go b/cluster-autoscaler/cloudprovider/cloud_provider.go +index 5b482857c..34f96f871 100644 +--- a/cluster-autoscaler/cloudprovider/cloud_provider.go ++++ b/cluster-autoscaler/cloudprovider/cloud_provider.go +@@ -84,6 +84,8 @@ const ( + CivoProviderName = "civo" + // RancherProviderName gets the provider name of rancher + RancherProviderName = "rancher" ++ // SpotinstProviderName gets the provider name of aws ++ SpotinstProviderName = "spotinst" + ) + + // GpuConfig contains the label, type and the resource name for a GPU. +diff --git a/cluster-autoscaler/cloudprovider/spotinst/aws_ec2_instance_types.go b/cluster-autoscaler/cloudprovider/spotinst/aws_ec2_instance_types.go +new file mode 100644 +index 000000000..5cf2902af +--- /dev/null ++++ b/cluster-autoscaler/cloudprovider/spotinst/aws_ec2_instance_types.go +@@ -0,0 +1,4540 @@ ++/* ++Copyright The Kubernetes Authors. ++ ++Licensed under the Apache License, Version 2.0 (the "License"); ++you may not use this file except in compliance with the License. ++You may obtain a copy of the License at ++ ++ http://www.apache.org/licenses/LICENSE-2.0 ++ ++Unless required by applicable law or agreed to in writing, software ++distributed under the License is distributed on an "AS IS" BASIS, ++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++See the License for the specific language governing permissions and ++limitations under the License. ++*/ ++ ++// This file was generated by go generate; DO NOT EDIT ++ ++package aws ++ ++// InstanceType is spec of EC2 instance ++type InstanceType struct { ++ InstanceType string ++ VCPU int64 ++ MemoryMb int64 ++ GPU int64 ++ Architecture string ++ GPUMemory int64 ++ MPSContext int64 ++} ++ ++// StaticListLastUpdateTime is a string declaring the last time the static list was updated. ++var StaticListLastUpdateTime = "2023-02-14" ++ ++// InstanceTypes is a map of ec2 resources ++var InstanceTypes = map[string]*InstanceType{ ++ "a1.2xlarge": { ++ InstanceType: "a1.2xlarge", ++ VCPU: 8, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "a1.4xlarge": { ++ InstanceType: "a1.4xlarge", ++ VCPU: 16, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "a1.large": { ++ InstanceType: "a1.large", ++ VCPU: 2, ++ MemoryMb: 4096, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "a1.medium": { ++ InstanceType: "a1.medium", ++ VCPU: 1, ++ MemoryMb: 2048, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "a1.metal": { ++ InstanceType: "a1.metal", ++ VCPU: 16, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "a1.xlarge": { ++ InstanceType: "a1.xlarge", ++ VCPU: 4, ++ MemoryMb: 8192, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "c1.medium": { ++ InstanceType: "c1.medium", ++ VCPU: 2, ++ MemoryMb: 1740, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c1.xlarge": { ++ InstanceType: "c1.xlarge", ++ VCPU: 8, ++ MemoryMb: 7168, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c3.2xlarge": { ++ InstanceType: "c3.2xlarge", ++ VCPU: 8, ++ MemoryMb: 15360, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c3.4xlarge": { ++ InstanceType: "c3.4xlarge", ++ VCPU: 16, ++ MemoryMb: 30720, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c3.8xlarge": { ++ InstanceType: "c3.8xlarge", ++ VCPU: 32, ++ MemoryMb: 61440, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c3.large": { ++ InstanceType: "c3.large", ++ VCPU: 2, ++ MemoryMb: 3840, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c3.xlarge": { ++ InstanceType: "c3.xlarge", ++ VCPU: 4, ++ MemoryMb: 7680, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c4.2xlarge": { ++ InstanceType: "c4.2xlarge", ++ VCPU: 8, ++ MemoryMb: 15360, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c4.4xlarge": { ++ InstanceType: "c4.4xlarge", ++ VCPU: 16, ++ MemoryMb: 30720, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c4.8xlarge": { ++ InstanceType: "c4.8xlarge", ++ VCPU: 36, ++ MemoryMb: 61440, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c4.large": { ++ InstanceType: "c4.large", ++ VCPU: 2, ++ MemoryMb: 3840, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c4.xlarge": { ++ InstanceType: "c4.xlarge", ++ VCPU: 4, ++ MemoryMb: 7680, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5.12xlarge": { ++ InstanceType: "c5.12xlarge", ++ VCPU: 48, ++ MemoryMb: 98304, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5.18xlarge": { ++ InstanceType: "c5.18xlarge", ++ VCPU: 72, ++ MemoryMb: 147456, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5.24xlarge": { ++ InstanceType: "c5.24xlarge", ++ VCPU: 96, ++ MemoryMb: 196608, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5.2xlarge": { ++ InstanceType: "c5.2xlarge", ++ VCPU: 8, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5.4xlarge": { ++ InstanceType: "c5.4xlarge", ++ VCPU: 16, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5.9xlarge": { ++ InstanceType: "c5.9xlarge", ++ VCPU: 36, ++ MemoryMb: 73728, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5.large": { ++ InstanceType: "c5.large", ++ VCPU: 2, ++ MemoryMb: 4096, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5.metal": { ++ InstanceType: "c5.metal", ++ VCPU: 96, ++ MemoryMb: 196608, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5.xlarge": { ++ InstanceType: "c5.xlarge", ++ VCPU: 4, ++ MemoryMb: 8192, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5a.12xlarge": { ++ InstanceType: "c5a.12xlarge", ++ VCPU: 48, ++ MemoryMb: 98304, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5a.16xlarge": { ++ InstanceType: "c5a.16xlarge", ++ VCPU: 64, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5a.24xlarge": { ++ InstanceType: "c5a.24xlarge", ++ VCPU: 96, ++ MemoryMb: 196608, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5a.2xlarge": { ++ InstanceType: "c5a.2xlarge", ++ VCPU: 8, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5a.4xlarge": { ++ InstanceType: "c5a.4xlarge", ++ VCPU: 16, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5a.8xlarge": { ++ InstanceType: "c5a.8xlarge", ++ VCPU: 32, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5a.large": { ++ InstanceType: "c5a.large", ++ VCPU: 2, ++ MemoryMb: 4096, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5a.xlarge": { ++ InstanceType: "c5a.xlarge", ++ VCPU: 4, ++ MemoryMb: 8192, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5ad.12xlarge": { ++ InstanceType: "c5ad.12xlarge", ++ VCPU: 48, ++ MemoryMb: 98304, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5ad.16xlarge": { ++ InstanceType: "c5ad.16xlarge", ++ VCPU: 64, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5ad.24xlarge": { ++ InstanceType: "c5ad.24xlarge", ++ VCPU: 96, ++ MemoryMb: 196608, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5ad.2xlarge": { ++ InstanceType: "c5ad.2xlarge", ++ VCPU: 8, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5ad.4xlarge": { ++ InstanceType: "c5ad.4xlarge", ++ VCPU: 16, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5ad.8xlarge": { ++ InstanceType: "c5ad.8xlarge", ++ VCPU: 32, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5ad.large": { ++ InstanceType: "c5ad.large", ++ VCPU: 2, ++ MemoryMb: 4096, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5ad.xlarge": { ++ InstanceType: "c5ad.xlarge", ++ VCPU: 4, ++ MemoryMb: 8192, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5d.12xlarge": { ++ InstanceType: "c5d.12xlarge", ++ VCPU: 48, ++ MemoryMb: 98304, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5d.18xlarge": { ++ InstanceType: "c5d.18xlarge", ++ VCPU: 72, ++ MemoryMb: 147456, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5d.24xlarge": { ++ InstanceType: "c5d.24xlarge", ++ VCPU: 96, ++ MemoryMb: 196608, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5d.2xlarge": { ++ InstanceType: "c5d.2xlarge", ++ VCPU: 8, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5d.4xlarge": { ++ InstanceType: "c5d.4xlarge", ++ VCPU: 16, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5d.9xlarge": { ++ InstanceType: "c5d.9xlarge", ++ VCPU: 36, ++ MemoryMb: 73728, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5d.large": { ++ InstanceType: "c5d.large", ++ VCPU: 2, ++ MemoryMb: 4096, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5d.metal": { ++ InstanceType: "c5d.metal", ++ VCPU: 96, ++ MemoryMb: 196608, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5d.xlarge": { ++ InstanceType: "c5d.xlarge", ++ VCPU: 4, ++ MemoryMb: 8192, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5n.18xlarge": { ++ InstanceType: "c5n.18xlarge", ++ VCPU: 72, ++ MemoryMb: 196608, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5n.2xlarge": { ++ InstanceType: "c5n.2xlarge", ++ VCPU: 8, ++ MemoryMb: 21504, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5n.4xlarge": { ++ InstanceType: "c5n.4xlarge", ++ VCPU: 16, ++ MemoryMb: 43008, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5n.9xlarge": { ++ InstanceType: "c5n.9xlarge", ++ VCPU: 36, ++ MemoryMb: 98304, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5n.large": { ++ InstanceType: "c5n.large", ++ VCPU: 2, ++ MemoryMb: 5376, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5n.metal": { ++ InstanceType: "c5n.metal", ++ VCPU: 72, ++ MemoryMb: 196608, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c5n.xlarge": { ++ InstanceType: "c5n.xlarge", ++ VCPU: 4, ++ MemoryMb: 10752, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6a.12xlarge": { ++ InstanceType: "c6a.12xlarge", ++ VCPU: 48, ++ MemoryMb: 98304, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6a.16xlarge": { ++ InstanceType: "c6a.16xlarge", ++ VCPU: 64, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6a.24xlarge": { ++ InstanceType: "c6a.24xlarge", ++ VCPU: 96, ++ MemoryMb: 196608, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6a.2xlarge": { ++ InstanceType: "c6a.2xlarge", ++ VCPU: 8, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6a.32xlarge": { ++ InstanceType: "c6a.32xlarge", ++ VCPU: 128, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6a.48xlarge": { ++ InstanceType: "c6a.48xlarge", ++ VCPU: 192, ++ MemoryMb: 393216, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6a.4xlarge": { ++ InstanceType: "c6a.4xlarge", ++ VCPU: 16, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6a.8xlarge": { ++ InstanceType: "c6a.8xlarge", ++ VCPU: 32, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6a.large": { ++ InstanceType: "c6a.large", ++ VCPU: 2, ++ MemoryMb: 4096, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6a.metal": { ++ InstanceType: "c6a.metal", ++ VCPU: 192, ++ MemoryMb: 393216, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6a.xlarge": { ++ InstanceType: "c6a.xlarge", ++ VCPU: 4, ++ MemoryMb: 8192, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6g.12xlarge": { ++ InstanceType: "c6g.12xlarge", ++ VCPU: 48, ++ MemoryMb: 98304, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "c6g.16xlarge": { ++ InstanceType: "c6g.16xlarge", ++ VCPU: 64, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "c6g.2xlarge": { ++ InstanceType: "c6g.2xlarge", ++ VCPU: 8, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "c6g.4xlarge": { ++ InstanceType: "c6g.4xlarge", ++ VCPU: 16, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "c6g.8xlarge": { ++ InstanceType: "c6g.8xlarge", ++ VCPU: 32, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "c6g.large": { ++ InstanceType: "c6g.large", ++ VCPU: 2, ++ MemoryMb: 4096, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "c6g.medium": { ++ InstanceType: "c6g.medium", ++ VCPU: 1, ++ MemoryMb: 2048, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "c6g.metal": { ++ InstanceType: "c6g.metal", ++ VCPU: 64, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "c6g.xlarge": { ++ InstanceType: "c6g.xlarge", ++ VCPU: 4, ++ MemoryMb: 8192, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "c6gd.12xlarge": { ++ InstanceType: "c6gd.12xlarge", ++ VCPU: 48, ++ MemoryMb: 98304, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "c6gd.16xlarge": { ++ InstanceType: "c6gd.16xlarge", ++ VCPU: 64, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "c6gd.2xlarge": { ++ InstanceType: "c6gd.2xlarge", ++ VCPU: 8, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "c6gd.4xlarge": { ++ InstanceType: "c6gd.4xlarge", ++ VCPU: 16, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "c6gd.8xlarge": { ++ InstanceType: "c6gd.8xlarge", ++ VCPU: 32, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "c6gd.large": { ++ InstanceType: "c6gd.large", ++ VCPU: 2, ++ MemoryMb: 4096, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "c6gd.medium": { ++ InstanceType: "c6gd.medium", ++ VCPU: 1, ++ MemoryMb: 2048, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "c6gd.metal": { ++ InstanceType: "c6gd.metal", ++ VCPU: 64, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "c6gd.xlarge": { ++ InstanceType: "c6gd.xlarge", ++ VCPU: 4, ++ MemoryMb: 8192, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "c6gn.12xlarge": { ++ InstanceType: "c6gn.12xlarge", ++ VCPU: 48, ++ MemoryMb: 98304, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "c6gn.16xlarge": { ++ InstanceType: "c6gn.16xlarge", ++ VCPU: 64, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "c6gn.2xlarge": { ++ InstanceType: "c6gn.2xlarge", ++ VCPU: 8, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "c6gn.4xlarge": { ++ InstanceType: "c6gn.4xlarge", ++ VCPU: 16, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "c6gn.8xlarge": { ++ InstanceType: "c6gn.8xlarge", ++ VCPU: 32, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "c6gn.large": { ++ InstanceType: "c6gn.large", ++ VCPU: 2, ++ MemoryMb: 4096, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "c6gn.medium": { ++ InstanceType: "c6gn.medium", ++ VCPU: 1, ++ MemoryMb: 2048, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "c6gn.xlarge": { ++ InstanceType: "c6gn.xlarge", ++ VCPU: 4, ++ MemoryMb: 8192, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "c6i.12xlarge": { ++ InstanceType: "c6i.12xlarge", ++ VCPU: 48, ++ MemoryMb: 98304, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6i.16xlarge": { ++ InstanceType: "c6i.16xlarge", ++ VCPU: 64, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6i.24xlarge": { ++ InstanceType: "c6i.24xlarge", ++ VCPU: 96, ++ MemoryMb: 196608, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6i.2xlarge": { ++ InstanceType: "c6i.2xlarge", ++ VCPU: 8, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6i.32xlarge": { ++ InstanceType: "c6i.32xlarge", ++ VCPU: 128, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6i.4xlarge": { ++ InstanceType: "c6i.4xlarge", ++ VCPU: 16, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6i.8xlarge": { ++ InstanceType: "c6i.8xlarge", ++ VCPU: 32, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6i.large": { ++ InstanceType: "c6i.large", ++ VCPU: 2, ++ MemoryMb: 4096, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6i.metal": { ++ InstanceType: "c6i.metal", ++ VCPU: 128, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6i.xlarge": { ++ InstanceType: "c6i.xlarge", ++ VCPU: 4, ++ MemoryMb: 8192, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6id.12xlarge": { ++ InstanceType: "c6id.12xlarge", ++ VCPU: 48, ++ MemoryMb: 98304, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6id.16xlarge": { ++ InstanceType: "c6id.16xlarge", ++ VCPU: 64, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6id.24xlarge": { ++ InstanceType: "c6id.24xlarge", ++ VCPU: 96, ++ MemoryMb: 196608, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6id.2xlarge": { ++ InstanceType: "c6id.2xlarge", ++ VCPU: 8, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6id.32xlarge": { ++ InstanceType: "c6id.32xlarge", ++ VCPU: 128, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6id.4xlarge": { ++ InstanceType: "c6id.4xlarge", ++ VCPU: 16, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6id.8xlarge": { ++ InstanceType: "c6id.8xlarge", ++ VCPU: 32, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6id.large": { ++ InstanceType: "c6id.large", ++ VCPU: 2, ++ MemoryMb: 4096, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6id.metal": { ++ InstanceType: "c6id.metal", ++ VCPU: 128, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6id.xlarge": { ++ InstanceType: "c6id.xlarge", ++ VCPU: 4, ++ MemoryMb: 8192, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6in.12xlarge": { ++ InstanceType: "c6in.12xlarge", ++ VCPU: 48, ++ MemoryMb: 98304, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6in.16xlarge": { ++ InstanceType: "c6in.16xlarge", ++ VCPU: 64, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6in.24xlarge": { ++ InstanceType: "c6in.24xlarge", ++ VCPU: 96, ++ MemoryMb: 196608, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6in.2xlarge": { ++ InstanceType: "c6in.2xlarge", ++ VCPU: 8, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6in.32xlarge": { ++ InstanceType: "c6in.32xlarge", ++ VCPU: 128, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6in.4xlarge": { ++ InstanceType: "c6in.4xlarge", ++ VCPU: 16, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6in.8xlarge": { ++ InstanceType: "c6in.8xlarge", ++ VCPU: 32, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6in.large": { ++ InstanceType: "c6in.large", ++ VCPU: 2, ++ MemoryMb: 4096, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c6in.xlarge": { ++ InstanceType: "c6in.xlarge", ++ VCPU: 4, ++ MemoryMb: 8192, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "c7g.12xlarge": { ++ InstanceType: "c7g.12xlarge", ++ VCPU: 48, ++ MemoryMb: 98304, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "c7g.16xlarge": { ++ InstanceType: "c7g.16xlarge", ++ VCPU: 64, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "c7g.2xlarge": { ++ InstanceType: "c7g.2xlarge", ++ VCPU: 8, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "c7g.4xlarge": { ++ InstanceType: "c7g.4xlarge", ++ VCPU: 16, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "c7g.8xlarge": { ++ InstanceType: "c7g.8xlarge", ++ VCPU: 32, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "c7g.large": { ++ InstanceType: "c7g.large", ++ VCPU: 2, ++ MemoryMb: 4096, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "c7g.medium": { ++ InstanceType: "c7g.medium", ++ VCPU: 1, ++ MemoryMb: 2048, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "c7g.metal": { ++ InstanceType: "c7g.metal", ++ VCPU: 64, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "c7g.xlarge": { ++ InstanceType: "c7g.xlarge", ++ VCPU: 4, ++ MemoryMb: 8192, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "cc2.8xlarge": { ++ InstanceType: "cc2.8xlarge", ++ VCPU: 32, ++ MemoryMb: 61952, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "d2.2xlarge": { ++ InstanceType: "d2.2xlarge", ++ VCPU: 8, ++ MemoryMb: 62464, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "d2.4xlarge": { ++ InstanceType: "d2.4xlarge", ++ VCPU: 16, ++ MemoryMb: 124928, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "d2.8xlarge": { ++ InstanceType: "d2.8xlarge", ++ VCPU: 36, ++ MemoryMb: 249856, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "d2.xlarge": { ++ InstanceType: "d2.xlarge", ++ VCPU: 4, ++ MemoryMb: 31232, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "d3.2xlarge": { ++ InstanceType: "d3.2xlarge", ++ VCPU: 8, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "d3.4xlarge": { ++ InstanceType: "d3.4xlarge", ++ VCPU: 16, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "d3.8xlarge": { ++ InstanceType: "d3.8xlarge", ++ VCPU: 32, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "d3.xlarge": { ++ InstanceType: "d3.xlarge", ++ VCPU: 4, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "d3en.12xlarge": { ++ InstanceType: "d3en.12xlarge", ++ VCPU: 48, ++ MemoryMb: 196608, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "d3en.2xlarge": { ++ InstanceType: "d3en.2xlarge", ++ VCPU: 8, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "d3en.4xlarge": { ++ InstanceType: "d3en.4xlarge", ++ VCPU: 16, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "d3en.6xlarge": { ++ InstanceType: "d3en.6xlarge", ++ VCPU: 24, ++ MemoryMb: 98304, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "d3en.8xlarge": { ++ InstanceType: "d3en.8xlarge", ++ VCPU: 32, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "d3en.xlarge": { ++ InstanceType: "d3en.xlarge", ++ VCPU: 4, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "dl1.24xlarge": { ++ InstanceType: "dl1.24xlarge", ++ VCPU: 96, ++ MemoryMb: 786432, ++ GPU: 8, ++ Architecture: "amd64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "f1.16xlarge": { ++ InstanceType: "f1.16xlarge", ++ VCPU: 64, ++ MemoryMb: 999424, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "f1.2xlarge": { ++ InstanceType: "f1.2xlarge", ++ VCPU: 8, ++ MemoryMb: 124928, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "f1.4xlarge": { ++ InstanceType: "f1.4xlarge", ++ VCPU: 16, ++ MemoryMb: 249856, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "g2.2xlarge": { ++ InstanceType: "g2.2xlarge", ++ VCPU: 8, ++ MemoryMb: 15360, ++ GPU: 1, ++ Architecture: "amd64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "g2.8xlarge": { ++ InstanceType: "g2.8xlarge", ++ VCPU: 32, ++ MemoryMb: 61440, ++ GPU: 4, ++ Architecture: "amd64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "g3.16xlarge": { ++ InstanceType: "g3.16xlarge", ++ VCPU: 64, ++ MemoryMb: 499712, ++ GPU: 4, ++ Architecture: "amd64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "g3.4xlarge": { ++ InstanceType: "g3.4xlarge", ++ VCPU: 16, ++ MemoryMb: 124928, ++ GPU: 1, ++ Architecture: "amd64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "g3.8xlarge": { ++ InstanceType: "g3.8xlarge", ++ VCPU: 32, ++ MemoryMb: 249856, ++ GPU: 2, ++ Architecture: "amd64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "g3s.xlarge": { ++ InstanceType: "g3s.xlarge", ++ VCPU: 4, ++ MemoryMb: 31232, ++ GPU: 1, ++ Architecture: "amd64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "g4ad.16xlarge": { ++ InstanceType: "g4ad.16xlarge", ++ VCPU: 64, ++ MemoryMb: 262144, ++ GPU: 4, ++ Architecture: "amd64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "g4ad.2xlarge": { ++ InstanceType: "g4ad.2xlarge", ++ VCPU: 8, ++ MemoryMb: 32768, ++ GPU: 1, ++ Architecture: "amd64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "g4ad.4xlarge": { ++ InstanceType: "g4ad.4xlarge", ++ VCPU: 16, ++ MemoryMb: 65536, ++ GPU: 1, ++ Architecture: "amd64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "g4ad.8xlarge": { ++ InstanceType: "g4ad.8xlarge", ++ VCPU: 32, ++ MemoryMb: 131072, ++ GPU: 2, ++ Architecture: "amd64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "g4ad.xlarge": { ++ InstanceType: "g4ad.xlarge", ++ VCPU: 4, ++ MemoryMb: 16384, ++ GPU: 1, ++ Architecture: "amd64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "g4dn.12xlarge": { ++ InstanceType: "g4dn.12xlarge", ++ VCPU: 48, ++ MemoryMb: 196608, ++ GPU: 4, ++ Architecture: "amd64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "g4dn.16xlarge": { ++ InstanceType: "g4dn.16xlarge", ++ VCPU: 64, ++ MemoryMb: 262144, ++ GPU: 1, ++ Architecture: "amd64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "g4dn.2xlarge": { ++ InstanceType: "g4dn.2xlarge", ++ VCPU: 8, ++ MemoryMb: 32768, ++ GPU: 1, ++ Architecture: "amd64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "g4dn.4xlarge": { ++ InstanceType: "g4dn.4xlarge", ++ VCPU: 16, ++ MemoryMb: 65536, ++ GPU: 1, ++ Architecture: "amd64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "g4dn.8xlarge": { ++ InstanceType: "g4dn.8xlarge", ++ VCPU: 32, ++ MemoryMb: 131072, ++ GPU: 1, ++ Architecture: "amd64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "g4dn.metal": { ++ InstanceType: "g4dn.metal", ++ VCPU: 96, ++ MemoryMb: 393216, ++ GPU: 8, ++ Architecture: "amd64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "g4dn.xlarge": { ++ InstanceType: "g4dn.xlarge", ++ VCPU: 4, ++ MemoryMb: 16384, ++ GPU: 1, ++ Architecture: "amd64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "g5.12xlarge": { ++ InstanceType: "g5.12xlarge", ++ VCPU: 48, ++ MemoryMb: 196608, ++ GPU: 4, ++ Architecture: "amd64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "g5.16xlarge": { ++ InstanceType: "g5.16xlarge", ++ VCPU: 64, ++ MemoryMb: 262144, ++ GPU: 1, ++ Architecture: "amd64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "g5.24xlarge": { ++ InstanceType: "g5.24xlarge", ++ VCPU: 96, ++ MemoryMb: 393216, ++ GPU: 4, ++ Architecture: "amd64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "g5.2xlarge": { ++ InstanceType: "g5.2xlarge", ++ VCPU: 8, ++ MemoryMb: 32768, ++ GPU: 1, ++ Architecture: "amd64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "g5.48xlarge": { ++ InstanceType: "g5.48xlarge", ++ VCPU: 192, ++ MemoryMb: 786432, ++ GPU: 8, ++ Architecture: "amd64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "g5.4xlarge": { ++ InstanceType: "g5.4xlarge", ++ VCPU: 16, ++ MemoryMb: 65536, ++ GPU: 1, ++ Architecture: "amd64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "g5.8xlarge": { ++ InstanceType: "g5.8xlarge", ++ VCPU: 32, ++ MemoryMb: 131072, ++ GPU: 1, ++ Architecture: "amd64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "g5.xlarge": { ++ InstanceType: "g5.xlarge", ++ VCPU: 4, ++ MemoryMb: 16384, ++ GPU: 1, ++ Architecture: "amd64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "g5g.16xlarge": { ++ InstanceType: "g5g.16xlarge", ++ VCPU: 64, ++ MemoryMb: 131072, ++ GPU: 2, ++ Architecture: "arm64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "g5g.2xlarge": { ++ InstanceType: "g5g.2xlarge", ++ VCPU: 8, ++ MemoryMb: 16384, ++ GPU: 1, ++ Architecture: "arm64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "g5g.4xlarge": { ++ InstanceType: "g5g.4xlarge", ++ VCPU: 16, ++ MemoryMb: 32768, ++ GPU: 1, ++ Architecture: "arm64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "g5g.8xlarge": { ++ InstanceType: "g5g.8xlarge", ++ VCPU: 32, ++ MemoryMb: 65536, ++ GPU: 1, ++ Architecture: "arm64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "g5g.metal": { ++ InstanceType: "g5g.metal", ++ VCPU: 64, ++ MemoryMb: 131072, ++ GPU: 2, ++ Architecture: "arm64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "g5g.xlarge": { ++ InstanceType: "g5g.xlarge", ++ VCPU: 4, ++ MemoryMb: 8192, ++ GPU: 1, ++ Architecture: "arm64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "h1.16xlarge": { ++ InstanceType: "h1.16xlarge", ++ VCPU: 64, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "h1.2xlarge": { ++ InstanceType: "h1.2xlarge", ++ VCPU: 8, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "h1.4xlarge": { ++ InstanceType: "h1.4xlarge", ++ VCPU: 16, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "h1.8xlarge": { ++ InstanceType: "h1.8xlarge", ++ VCPU: 32, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "i2.2xlarge": { ++ InstanceType: "i2.2xlarge", ++ VCPU: 8, ++ MemoryMb: 62464, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "i2.4xlarge": { ++ InstanceType: "i2.4xlarge", ++ VCPU: 16, ++ MemoryMb: 124928, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "i2.8xlarge": { ++ InstanceType: "i2.8xlarge", ++ VCPU: 32, ++ MemoryMb: 249856, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "i2.xlarge": { ++ InstanceType: "i2.xlarge", ++ VCPU: 4, ++ MemoryMb: 31232, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "i3.16xlarge": { ++ InstanceType: "i3.16xlarge", ++ VCPU: 64, ++ MemoryMb: 499712, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "i3.2xlarge": { ++ InstanceType: "i3.2xlarge", ++ VCPU: 8, ++ MemoryMb: 62464, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "i3.4xlarge": { ++ InstanceType: "i3.4xlarge", ++ VCPU: 16, ++ MemoryMb: 124928, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "i3.8xlarge": { ++ InstanceType: "i3.8xlarge", ++ VCPU: 32, ++ MemoryMb: 249856, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "i3.large": { ++ InstanceType: "i3.large", ++ VCPU: 2, ++ MemoryMb: 15616, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "i3.metal": { ++ InstanceType: "i3.metal", ++ VCPU: 72, ++ MemoryMb: 524288, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "i3.xlarge": { ++ InstanceType: "i3.xlarge", ++ VCPU: 4, ++ MemoryMb: 31232, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "i3en.12xlarge": { ++ InstanceType: "i3en.12xlarge", ++ VCPU: 48, ++ MemoryMb: 393216, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "i3en.24xlarge": { ++ InstanceType: "i3en.24xlarge", ++ VCPU: 96, ++ MemoryMb: 786432, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "i3en.2xlarge": { ++ InstanceType: "i3en.2xlarge", ++ VCPU: 8, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "i3en.3xlarge": { ++ InstanceType: "i3en.3xlarge", ++ VCPU: 12, ++ MemoryMb: 98304, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "i3en.6xlarge": { ++ InstanceType: "i3en.6xlarge", ++ VCPU: 24, ++ MemoryMb: 196608, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "i3en.large": { ++ InstanceType: "i3en.large", ++ VCPU: 2, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "i3en.metal": { ++ InstanceType: "i3en.metal", ++ VCPU: 96, ++ MemoryMb: 786432, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "i3en.xlarge": { ++ InstanceType: "i3en.xlarge", ++ VCPU: 4, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "i4i.16xlarge": { ++ InstanceType: "i4i.16xlarge", ++ VCPU: 64, ++ MemoryMb: 524288, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "i4i.2xlarge": { ++ InstanceType: "i4i.2xlarge", ++ VCPU: 8, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "i4i.32xlarge": { ++ InstanceType: "i4i.32xlarge", ++ VCPU: 128, ++ MemoryMb: 1048576, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "i4i.4xlarge": { ++ InstanceType: "i4i.4xlarge", ++ VCPU: 16, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "i4i.8xlarge": { ++ InstanceType: "i4i.8xlarge", ++ VCPU: 32, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "i4i.large": { ++ InstanceType: "i4i.large", ++ VCPU: 2, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "i4i.metal": { ++ InstanceType: "i4i.metal", ++ VCPU: 128, ++ MemoryMb: 1048576, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "i4i.xlarge": { ++ InstanceType: "i4i.xlarge", ++ VCPU: 4, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "im4gn.16xlarge": { ++ InstanceType: "im4gn.16xlarge", ++ VCPU: 64, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "im4gn.2xlarge": { ++ InstanceType: "im4gn.2xlarge", ++ VCPU: 8, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "im4gn.4xlarge": { ++ InstanceType: "im4gn.4xlarge", ++ VCPU: 16, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "im4gn.8xlarge": { ++ InstanceType: "im4gn.8xlarge", ++ VCPU: 32, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "im4gn.large": { ++ InstanceType: "im4gn.large", ++ VCPU: 2, ++ MemoryMb: 8192, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "im4gn.xlarge": { ++ InstanceType: "im4gn.xlarge", ++ VCPU: 4, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "inf1.24xlarge": { ++ InstanceType: "inf1.24xlarge", ++ VCPU: 96, ++ MemoryMb: 196608, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "inf1.2xlarge": { ++ InstanceType: "inf1.2xlarge", ++ VCPU: 8, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "inf1.6xlarge": { ++ InstanceType: "inf1.6xlarge", ++ VCPU: 24, ++ MemoryMb: 49152, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "inf1.xlarge": { ++ InstanceType: "inf1.xlarge", ++ VCPU: 4, ++ MemoryMb: 8192, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "inf2.48xlarge": { ++ InstanceType: "inf2.48xlarge", ++ VCPU: 192, ++ MemoryMb: 786432, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "inf2.24xlarge": { ++ InstanceType: "inf2.24xlarge", ++ VCPU: 96, ++ MemoryMb: 393216, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "inf2.8xlarge": { ++ InstanceType: "inf2.8xlarge", ++ VCPU: 32, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "inf2.xlarge": { ++ InstanceType: "inf2.xlarge", ++ VCPU: 4, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "is4gen.2xlarge": { ++ InstanceType: "is4gen.2xlarge", ++ VCPU: 8, ++ MemoryMb: 49152, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "is4gen.4xlarge": { ++ InstanceType: "is4gen.4xlarge", ++ VCPU: 16, ++ MemoryMb: 98304, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "is4gen.8xlarge": { ++ InstanceType: "is4gen.8xlarge", ++ VCPU: 32, ++ MemoryMb: 196608, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "is4gen.large": { ++ InstanceType: "is4gen.large", ++ VCPU: 2, ++ MemoryMb: 12288, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "is4gen.medium": { ++ InstanceType: "is4gen.medium", ++ VCPU: 1, ++ MemoryMb: 6144, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "is4gen.xlarge": { ++ InstanceType: "is4gen.xlarge", ++ VCPU: 4, ++ MemoryMb: 24576, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "m1.large": { ++ InstanceType: "m1.large", ++ VCPU: 2, ++ MemoryMb: 7680, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m1.medium": { ++ InstanceType: "m1.medium", ++ VCPU: 1, ++ MemoryMb: 3788, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m1.small": { ++ InstanceType: "m1.small", ++ VCPU: 1, ++ MemoryMb: 1740, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m1.xlarge": { ++ InstanceType: "m1.xlarge", ++ VCPU: 4, ++ MemoryMb: 15360, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m2.2xlarge": { ++ InstanceType: "m2.2xlarge", ++ VCPU: 4, ++ MemoryMb: 35020, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m2.4xlarge": { ++ InstanceType: "m2.4xlarge", ++ VCPU: 8, ++ MemoryMb: 70041, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m2.xlarge": { ++ InstanceType: "m2.xlarge", ++ VCPU: 2, ++ MemoryMb: 17510, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m3.2xlarge": { ++ InstanceType: "m3.2xlarge", ++ VCPU: 8, ++ MemoryMb: 30720, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m3.large": { ++ InstanceType: "m3.large", ++ VCPU: 2, ++ MemoryMb: 7680, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m3.medium": { ++ InstanceType: "m3.medium", ++ VCPU: 1, ++ MemoryMb: 3840, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m3.xlarge": { ++ InstanceType: "m3.xlarge", ++ VCPU: 4, ++ MemoryMb: 15360, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m4.10xlarge": { ++ InstanceType: "m4.10xlarge", ++ VCPU: 40, ++ MemoryMb: 163840, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m4.16xlarge": { ++ InstanceType: "m4.16xlarge", ++ VCPU: 64, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m4.2xlarge": { ++ InstanceType: "m4.2xlarge", ++ VCPU: 8, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m4.4xlarge": { ++ InstanceType: "m4.4xlarge", ++ VCPU: 16, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m4.large": { ++ InstanceType: "m4.large", ++ VCPU: 2, ++ MemoryMb: 8192, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m4.xlarge": { ++ InstanceType: "m4.xlarge", ++ VCPU: 4, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5.12xlarge": { ++ InstanceType: "m5.12xlarge", ++ VCPU: 48, ++ MemoryMb: 196608, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5.16xlarge": { ++ InstanceType: "m5.16xlarge", ++ VCPU: 64, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5.24xlarge": { ++ InstanceType: "m5.24xlarge", ++ VCPU: 96, ++ MemoryMb: 393216, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5.2xlarge": { ++ InstanceType: "m5.2xlarge", ++ VCPU: 8, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5.4xlarge": { ++ InstanceType: "m5.4xlarge", ++ VCPU: 16, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5.8xlarge": { ++ InstanceType: "m5.8xlarge", ++ VCPU: 32, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5.large": { ++ InstanceType: "m5.large", ++ VCPU: 2, ++ MemoryMb: 8192, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5.metal": { ++ InstanceType: "m5.metal", ++ VCPU: 96, ++ MemoryMb: 393216, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5.xlarge": { ++ InstanceType: "m5.xlarge", ++ VCPU: 4, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5a.12xlarge": { ++ InstanceType: "m5a.12xlarge", ++ VCPU: 48, ++ MemoryMb: 196608, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5a.16xlarge": { ++ InstanceType: "m5a.16xlarge", ++ VCPU: 64, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5a.24xlarge": { ++ InstanceType: "m5a.24xlarge", ++ VCPU: 96, ++ MemoryMb: 393216, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5a.2xlarge": { ++ InstanceType: "m5a.2xlarge", ++ VCPU: 8, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5a.4xlarge": { ++ InstanceType: "m5a.4xlarge", ++ VCPU: 16, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5a.8xlarge": { ++ InstanceType: "m5a.8xlarge", ++ VCPU: 32, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5a.large": { ++ InstanceType: "m5a.large", ++ VCPU: 2, ++ MemoryMb: 8192, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5a.xlarge": { ++ InstanceType: "m5a.xlarge", ++ VCPU: 4, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5ad.12xlarge": { ++ InstanceType: "m5ad.12xlarge", ++ VCPU: 48, ++ MemoryMb: 196608, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5ad.16xlarge": { ++ InstanceType: "m5ad.16xlarge", ++ VCPU: 64, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5ad.24xlarge": { ++ InstanceType: "m5ad.24xlarge", ++ VCPU: 96, ++ MemoryMb: 393216, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5ad.2xlarge": { ++ InstanceType: "m5ad.2xlarge", ++ VCPU: 8, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5ad.4xlarge": { ++ InstanceType: "m5ad.4xlarge", ++ VCPU: 16, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5ad.8xlarge": { ++ InstanceType: "m5ad.8xlarge", ++ VCPU: 32, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5ad.large": { ++ InstanceType: "m5ad.large", ++ VCPU: 2, ++ MemoryMb: 8192, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5ad.xlarge": { ++ InstanceType: "m5ad.xlarge", ++ VCPU: 4, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5d.12xlarge": { ++ InstanceType: "m5d.12xlarge", ++ VCPU: 48, ++ MemoryMb: 196608, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5d.16xlarge": { ++ InstanceType: "m5d.16xlarge", ++ VCPU: 64, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5d.24xlarge": { ++ InstanceType: "m5d.24xlarge", ++ VCPU: 96, ++ MemoryMb: 393216, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5d.2xlarge": { ++ InstanceType: "m5d.2xlarge", ++ VCPU: 8, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5d.4xlarge": { ++ InstanceType: "m5d.4xlarge", ++ VCPU: 16, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5d.8xlarge": { ++ InstanceType: "m5d.8xlarge", ++ VCPU: 32, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5d.large": { ++ InstanceType: "m5d.large", ++ VCPU: 2, ++ MemoryMb: 8192, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5d.metal": { ++ InstanceType: "m5d.metal", ++ VCPU: 96, ++ MemoryMb: 393216, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5d.xlarge": { ++ InstanceType: "m5d.xlarge", ++ VCPU: 4, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5dn.12xlarge": { ++ InstanceType: "m5dn.12xlarge", ++ VCPU: 48, ++ MemoryMb: 196608, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5dn.16xlarge": { ++ InstanceType: "m5dn.16xlarge", ++ VCPU: 64, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5dn.24xlarge": { ++ InstanceType: "m5dn.24xlarge", ++ VCPU: 96, ++ MemoryMb: 393216, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5dn.2xlarge": { ++ InstanceType: "m5dn.2xlarge", ++ VCPU: 8, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5dn.4xlarge": { ++ InstanceType: "m5dn.4xlarge", ++ VCPU: 16, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5dn.8xlarge": { ++ InstanceType: "m5dn.8xlarge", ++ VCPU: 32, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5dn.large": { ++ InstanceType: "m5dn.large", ++ VCPU: 2, ++ MemoryMb: 8192, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5dn.metal": { ++ InstanceType: "m5dn.metal", ++ VCPU: 96, ++ MemoryMb: 393216, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5dn.xlarge": { ++ InstanceType: "m5dn.xlarge", ++ VCPU: 4, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5n.12xlarge": { ++ InstanceType: "m5n.12xlarge", ++ VCPU: 48, ++ MemoryMb: 196608, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5n.16xlarge": { ++ InstanceType: "m5n.16xlarge", ++ VCPU: 64, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5n.24xlarge": { ++ InstanceType: "m5n.24xlarge", ++ VCPU: 96, ++ MemoryMb: 393216, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5n.2xlarge": { ++ InstanceType: "m5n.2xlarge", ++ VCPU: 8, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5n.4xlarge": { ++ InstanceType: "m5n.4xlarge", ++ VCPU: 16, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5n.8xlarge": { ++ InstanceType: "m5n.8xlarge", ++ VCPU: 32, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5n.large": { ++ InstanceType: "m5n.large", ++ VCPU: 2, ++ MemoryMb: 8192, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5n.metal": { ++ InstanceType: "m5n.metal", ++ VCPU: 96, ++ MemoryMb: 393216, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5n.xlarge": { ++ InstanceType: "m5n.xlarge", ++ VCPU: 4, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5zn.12xlarge": { ++ InstanceType: "m5zn.12xlarge", ++ VCPU: 48, ++ MemoryMb: 196608, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5zn.2xlarge": { ++ InstanceType: "m5zn.2xlarge", ++ VCPU: 8, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5zn.3xlarge": { ++ InstanceType: "m5zn.3xlarge", ++ VCPU: 12, ++ MemoryMb: 49152, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5zn.6xlarge": { ++ InstanceType: "m5zn.6xlarge", ++ VCPU: 24, ++ MemoryMb: 98304, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5zn.large": { ++ InstanceType: "m5zn.large", ++ VCPU: 2, ++ MemoryMb: 8192, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5zn.metal": { ++ InstanceType: "m5zn.metal", ++ VCPU: 48, ++ MemoryMb: 196608, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m5zn.xlarge": { ++ InstanceType: "m5zn.xlarge", ++ VCPU: 4, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6a.12xlarge": { ++ InstanceType: "m6a.12xlarge", ++ VCPU: 48, ++ MemoryMb: 196608, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6a.16xlarge": { ++ InstanceType: "m6a.16xlarge", ++ VCPU: 64, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6a.24xlarge": { ++ InstanceType: "m6a.24xlarge", ++ VCPU: 96, ++ MemoryMb: 393216, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6a.2xlarge": { ++ InstanceType: "m6a.2xlarge", ++ VCPU: 8, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6a.32xlarge": { ++ InstanceType: "m6a.32xlarge", ++ VCPU: 128, ++ MemoryMb: 524288, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6a.48xlarge": { ++ InstanceType: "m6a.48xlarge", ++ VCPU: 192, ++ MemoryMb: 786432, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6a.4xlarge": { ++ InstanceType: "m6a.4xlarge", ++ VCPU: 16, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6a.8xlarge": { ++ InstanceType: "m6a.8xlarge", ++ VCPU: 32, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6a.large": { ++ InstanceType: "m6a.large", ++ VCPU: 2, ++ MemoryMb: 8192, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6a.metal": { ++ InstanceType: "m6a.metal", ++ VCPU: 192, ++ MemoryMb: 786432, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6a.xlarge": { ++ InstanceType: "m6a.xlarge", ++ VCPU: 4, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6g.12xlarge": { ++ InstanceType: "m6g.12xlarge", ++ VCPU: 48, ++ MemoryMb: 196608, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "m6g.16xlarge": { ++ InstanceType: "m6g.16xlarge", ++ VCPU: 64, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "m6g.2xlarge": { ++ InstanceType: "m6g.2xlarge", ++ VCPU: 8, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "m6g.4xlarge": { ++ InstanceType: "m6g.4xlarge", ++ VCPU: 16, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "m6g.8xlarge": { ++ InstanceType: "m6g.8xlarge", ++ VCPU: 32, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "m6g.large": { ++ InstanceType: "m6g.large", ++ VCPU: 2, ++ MemoryMb: 8192, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "m6g.medium": { ++ InstanceType: "m6g.medium", ++ VCPU: 1, ++ MemoryMb: 4096, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "m6g.metal": { ++ InstanceType: "m6g.metal", ++ VCPU: 64, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "m6g.xlarge": { ++ InstanceType: "m6g.xlarge", ++ VCPU: 4, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "m6gd.12xlarge": { ++ InstanceType: "m6gd.12xlarge", ++ VCPU: 48, ++ MemoryMb: 196608, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "m6gd.16xlarge": { ++ InstanceType: "m6gd.16xlarge", ++ VCPU: 64, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "m6gd.2xlarge": { ++ InstanceType: "m6gd.2xlarge", ++ VCPU: 8, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "m6gd.4xlarge": { ++ InstanceType: "m6gd.4xlarge", ++ VCPU: 16, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "m6gd.8xlarge": { ++ InstanceType: "m6gd.8xlarge", ++ VCPU: 32, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "m6gd.large": { ++ InstanceType: "m6gd.large", ++ VCPU: 2, ++ MemoryMb: 8192, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "m6gd.medium": { ++ InstanceType: "m6gd.medium", ++ VCPU: 1, ++ MemoryMb: 4096, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "m6gd.metal": { ++ InstanceType: "m6gd.metal", ++ VCPU: 64, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "m6gd.xlarge": { ++ InstanceType: "m6gd.xlarge", ++ VCPU: 4, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "m6i.12xlarge": { ++ InstanceType: "m6i.12xlarge", ++ VCPU: 48, ++ MemoryMb: 196608, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6i.16xlarge": { ++ InstanceType: "m6i.16xlarge", ++ VCPU: 64, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6i.24xlarge": { ++ InstanceType: "m6i.24xlarge", ++ VCPU: 96, ++ MemoryMb: 393216, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6i.2xlarge": { ++ InstanceType: "m6i.2xlarge", ++ VCPU: 8, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6i.32xlarge": { ++ InstanceType: "m6i.32xlarge", ++ VCPU: 128, ++ MemoryMb: 524288, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6i.4xlarge": { ++ InstanceType: "m6i.4xlarge", ++ VCPU: 16, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6i.8xlarge": { ++ InstanceType: "m6i.8xlarge", ++ VCPU: 32, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6i.large": { ++ InstanceType: "m6i.large", ++ VCPU: 2, ++ MemoryMb: 8192, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6i.metal": { ++ InstanceType: "m6i.metal", ++ VCPU: 128, ++ MemoryMb: 524288, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6i.xlarge": { ++ InstanceType: "m6i.xlarge", ++ VCPU: 4, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6id.12xlarge": { ++ InstanceType: "m6id.12xlarge", ++ VCPU: 48, ++ MemoryMb: 196608, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6id.16xlarge": { ++ InstanceType: "m6id.16xlarge", ++ VCPU: 64, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6id.24xlarge": { ++ InstanceType: "m6id.24xlarge", ++ VCPU: 96, ++ MemoryMb: 393216, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6id.2xlarge": { ++ InstanceType: "m6id.2xlarge", ++ VCPU: 8, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6id.32xlarge": { ++ InstanceType: "m6id.32xlarge", ++ VCPU: 128, ++ MemoryMb: 524288, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6id.4xlarge": { ++ InstanceType: "m6id.4xlarge", ++ VCPU: 16, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6id.8xlarge": { ++ InstanceType: "m6id.8xlarge", ++ VCPU: 32, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6id.large": { ++ InstanceType: "m6id.large", ++ VCPU: 2, ++ MemoryMb: 8192, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6id.metal": { ++ InstanceType: "m6id.metal", ++ VCPU: 128, ++ MemoryMb: 524288, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6id.xlarge": { ++ InstanceType: "m6id.xlarge", ++ VCPU: 4, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6idn.12xlarge": { ++ InstanceType: "m6idn.12xlarge", ++ VCPU: 48, ++ MemoryMb: 196608, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6idn.16xlarge": { ++ InstanceType: "m6idn.16xlarge", ++ VCPU: 64, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6idn.24xlarge": { ++ InstanceType: "m6idn.24xlarge", ++ VCPU: 96, ++ MemoryMb: 393216, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6idn.2xlarge": { ++ InstanceType: "m6idn.2xlarge", ++ VCPU: 8, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6idn.32xlarge": { ++ InstanceType: "m6idn.32xlarge", ++ VCPU: 128, ++ MemoryMb: 524288, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6idn.4xlarge": { ++ InstanceType: "m6idn.4xlarge", ++ VCPU: 16, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6idn.8xlarge": { ++ InstanceType: "m6idn.8xlarge", ++ VCPU: 32, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6idn.large": { ++ InstanceType: "m6idn.large", ++ VCPU: 2, ++ MemoryMb: 8192, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6idn.xlarge": { ++ InstanceType: "m6idn.xlarge", ++ VCPU: 4, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6in.12xlarge": { ++ InstanceType: "m6in.12xlarge", ++ VCPU: 48, ++ MemoryMb: 196608, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6in.16xlarge": { ++ InstanceType: "m6in.16xlarge", ++ VCPU: 64, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6in.24xlarge": { ++ InstanceType: "m6in.24xlarge", ++ VCPU: 96, ++ MemoryMb: 393216, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6in.2xlarge": { ++ InstanceType: "m6in.2xlarge", ++ VCPU: 8, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6in.32xlarge": { ++ InstanceType: "m6in.32xlarge", ++ VCPU: 128, ++ MemoryMb: 524288, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6in.4xlarge": { ++ InstanceType: "m6in.4xlarge", ++ VCPU: 16, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6in.8xlarge": { ++ InstanceType: "m6in.8xlarge", ++ VCPU: 32, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6in.large": { ++ InstanceType: "m6in.large", ++ VCPU: 2, ++ MemoryMb: 8192, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m6in.xlarge": { ++ InstanceType: "m6in.xlarge", ++ VCPU: 4, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "m7g.12xlarge": { ++ InstanceType: "m7g.12xlarge", ++ VCPU: 48, ++ MemoryMb: 196608, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "m7g.16xlarge": { ++ InstanceType: "m7g.16xlarge", ++ VCPU: 64, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "m7g.2xlarge": { ++ InstanceType: "m7g.2xlarge", ++ VCPU: 8, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "m7g.4xlarge": { ++ InstanceType: "m7g.4xlarge", ++ VCPU: 16, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "m7g.8xlarge": { ++ InstanceType: "m7g.8xlarge", ++ VCPU: 32, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "m7g.large": { ++ InstanceType: "m7g.large", ++ VCPU: 2, ++ MemoryMb: 8192, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "m7g.medium": { ++ InstanceType: "m7g.medium", ++ VCPU: 1, ++ MemoryMb: 4096, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "m7g.metal": { ++ InstanceType: "m7g.metal", ++ VCPU: 64, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "m7g.xlarge": { ++ InstanceType: "m7g.xlarge", ++ VCPU: 4, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "mac1.metal": { ++ InstanceType: "mac1.metal", ++ VCPU: 12, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "mac2.metal": { ++ InstanceType: "mac2.metal", ++ VCPU: 8, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "p2.16xlarge": { ++ InstanceType: "p2.16xlarge", ++ VCPU: 64, ++ MemoryMb: 749568, ++ GPU: 16, ++ Architecture: "amd64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "p2.8xlarge": { ++ InstanceType: "p2.8xlarge", ++ VCPU: 32, ++ MemoryMb: 499712, ++ GPU: 8, ++ Architecture: "amd64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "p2.xlarge": { ++ InstanceType: "p2.xlarge", ++ VCPU: 4, ++ MemoryMb: 62464, ++ GPU: 1, ++ Architecture: "amd64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "p3.16xlarge": { ++ InstanceType: "p3.16xlarge", ++ VCPU: 64, ++ MemoryMb: 499712, ++ GPU: 8, ++ Architecture: "amd64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "p3.2xlarge": { ++ InstanceType: "p3.2xlarge", ++ VCPU: 8, ++ MemoryMb: 62464, ++ GPU: 1, ++ Architecture: "amd64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "p3.8xlarge": { ++ InstanceType: "p3.8xlarge", ++ VCPU: 32, ++ MemoryMb: 249856, ++ GPU: 4, ++ Architecture: "amd64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "p3dn.24xlarge": { ++ InstanceType: "p3dn.24xlarge", ++ VCPU: 96, ++ MemoryMb: 786432, ++ GPU: 8, ++ Architecture: "amd64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "p4d.24xlarge": { ++ InstanceType: "p4d.24xlarge", ++ VCPU: 96, ++ MemoryMb: 1179648, ++ GPU: 8, ++ Architecture: "amd64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "p4de.24xlarge": { ++ InstanceType: "p4de.24xlarge", ++ VCPU: 96, ++ MemoryMb: 1179648, ++ GPU: 8, ++ Architecture: "amd64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "p5.48xlarge": { ++ InstanceType: "p5.48xlarge", ++ VCPU: 192, ++ MemoryMb: 2048000, ++ GPU: 8, ++ Architecture: "amd64", ++ GPUMemory: 15842934784, ++ MPSContext: 32, ++ }, ++ "r3.2xlarge": { ++ InstanceType: "r3.2xlarge", ++ VCPU: 8, ++ MemoryMb: 62464, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r3.4xlarge": { ++ InstanceType: "r3.4xlarge", ++ VCPU: 16, ++ MemoryMb: 124928, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r3.8xlarge": { ++ InstanceType: "r3.8xlarge", ++ VCPU: 32, ++ MemoryMb: 249856, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r3.large": { ++ InstanceType: "r3.large", ++ VCPU: 2, ++ MemoryMb: 15360, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r3.xlarge": { ++ InstanceType: "r3.xlarge", ++ VCPU: 4, ++ MemoryMb: 31232, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r4.16xlarge": { ++ InstanceType: "r4.16xlarge", ++ VCPU: 64, ++ MemoryMb: 499712, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r4.2xlarge": { ++ InstanceType: "r4.2xlarge", ++ VCPU: 8, ++ MemoryMb: 62464, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r4.4xlarge": { ++ InstanceType: "r4.4xlarge", ++ VCPU: 16, ++ MemoryMb: 124928, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r4.8xlarge": { ++ InstanceType: "r4.8xlarge", ++ VCPU: 32, ++ MemoryMb: 249856, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r4.large": { ++ InstanceType: "r4.large", ++ VCPU: 2, ++ MemoryMb: 15616, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r4.xlarge": { ++ InstanceType: "r4.xlarge", ++ VCPU: 4, ++ MemoryMb: 31232, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5.12xlarge": { ++ InstanceType: "r5.12xlarge", ++ VCPU: 48, ++ MemoryMb: 393216, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5.16xlarge": { ++ InstanceType: "r5.16xlarge", ++ VCPU: 64, ++ MemoryMb: 524288, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5.24xlarge": { ++ InstanceType: "r5.24xlarge", ++ VCPU: 96, ++ MemoryMb: 786432, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5.2xlarge": { ++ InstanceType: "r5.2xlarge", ++ VCPU: 8, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5.4xlarge": { ++ InstanceType: "r5.4xlarge", ++ VCPU: 16, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5.8xlarge": { ++ InstanceType: "r5.8xlarge", ++ VCPU: 32, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5.large": { ++ InstanceType: "r5.large", ++ VCPU: 2, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5.metal": { ++ InstanceType: "r5.metal", ++ VCPU: 96, ++ MemoryMb: 786432, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5.xlarge": { ++ InstanceType: "r5.xlarge", ++ VCPU: 4, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5a.12xlarge": { ++ InstanceType: "r5a.12xlarge", ++ VCPU: 48, ++ MemoryMb: 393216, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5a.16xlarge": { ++ InstanceType: "r5a.16xlarge", ++ VCPU: 64, ++ MemoryMb: 524288, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5a.24xlarge": { ++ InstanceType: "r5a.24xlarge", ++ VCPU: 96, ++ MemoryMb: 786432, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5a.2xlarge": { ++ InstanceType: "r5a.2xlarge", ++ VCPU: 8, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5a.4xlarge": { ++ InstanceType: "r5a.4xlarge", ++ VCPU: 16, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5a.8xlarge": { ++ InstanceType: "r5a.8xlarge", ++ VCPU: 32, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5a.large": { ++ InstanceType: "r5a.large", ++ VCPU: 2, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5a.xlarge": { ++ InstanceType: "r5a.xlarge", ++ VCPU: 4, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5ad.12xlarge": { ++ InstanceType: "r5ad.12xlarge", ++ VCPU: 48, ++ MemoryMb: 393216, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5ad.16xlarge": { ++ InstanceType: "r5ad.16xlarge", ++ VCPU: 64, ++ MemoryMb: 524288, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5ad.24xlarge": { ++ InstanceType: "r5ad.24xlarge", ++ VCPU: 96, ++ MemoryMb: 786432, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5ad.2xlarge": { ++ InstanceType: "r5ad.2xlarge", ++ VCPU: 8, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5ad.4xlarge": { ++ InstanceType: "r5ad.4xlarge", ++ VCPU: 16, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5ad.8xlarge": { ++ InstanceType: "r5ad.8xlarge", ++ VCPU: 32, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5ad.large": { ++ InstanceType: "r5ad.large", ++ VCPU: 2, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5ad.xlarge": { ++ InstanceType: "r5ad.xlarge", ++ VCPU: 4, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5b.12xlarge": { ++ InstanceType: "r5b.12xlarge", ++ VCPU: 48, ++ MemoryMb: 393216, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5b.16xlarge": { ++ InstanceType: "r5b.16xlarge", ++ VCPU: 64, ++ MemoryMb: 524288, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5b.24xlarge": { ++ InstanceType: "r5b.24xlarge", ++ VCPU: 96, ++ MemoryMb: 786432, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5b.2xlarge": { ++ InstanceType: "r5b.2xlarge", ++ VCPU: 8, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5b.4xlarge": { ++ InstanceType: "r5b.4xlarge", ++ VCPU: 16, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5b.8xlarge": { ++ InstanceType: "r5b.8xlarge", ++ VCPU: 32, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5b.large": { ++ InstanceType: "r5b.large", ++ VCPU: 2, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5b.metal": { ++ InstanceType: "r5b.metal", ++ VCPU: 96, ++ MemoryMb: 786432, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5b.xlarge": { ++ InstanceType: "r5b.xlarge", ++ VCPU: 4, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5d.12xlarge": { ++ InstanceType: "r5d.12xlarge", ++ VCPU: 48, ++ MemoryMb: 393216, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5d.16xlarge": { ++ InstanceType: "r5d.16xlarge", ++ VCPU: 64, ++ MemoryMb: 524288, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5d.24xlarge": { ++ InstanceType: "r5d.24xlarge", ++ VCPU: 96, ++ MemoryMb: 786432, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5d.2xlarge": { ++ InstanceType: "r5d.2xlarge", ++ VCPU: 8, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5d.4xlarge": { ++ InstanceType: "r5d.4xlarge", ++ VCPU: 16, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5d.8xlarge": { ++ InstanceType: "r5d.8xlarge", ++ VCPU: 32, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5d.large": { ++ InstanceType: "r5d.large", ++ VCPU: 2, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5d.metal": { ++ InstanceType: "r5d.metal", ++ VCPU: 96, ++ MemoryMb: 786432, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5d.xlarge": { ++ InstanceType: "r5d.xlarge", ++ VCPU: 4, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5dn.12xlarge": { ++ InstanceType: "r5dn.12xlarge", ++ VCPU: 48, ++ MemoryMb: 393216, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5dn.16xlarge": { ++ InstanceType: "r5dn.16xlarge", ++ VCPU: 64, ++ MemoryMb: 524288, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5dn.24xlarge": { ++ InstanceType: "r5dn.24xlarge", ++ VCPU: 96, ++ MemoryMb: 786432, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5dn.2xlarge": { ++ InstanceType: "r5dn.2xlarge", ++ VCPU: 8, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5dn.4xlarge": { ++ InstanceType: "r5dn.4xlarge", ++ VCPU: 16, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5dn.8xlarge": { ++ InstanceType: "r5dn.8xlarge", ++ VCPU: 32, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5dn.large": { ++ InstanceType: "r5dn.large", ++ VCPU: 2, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5dn.metal": { ++ InstanceType: "r5dn.metal", ++ VCPU: 96, ++ MemoryMb: 786432, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5dn.xlarge": { ++ InstanceType: "r5dn.xlarge", ++ VCPU: 4, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5n.12xlarge": { ++ InstanceType: "r5n.12xlarge", ++ VCPU: 48, ++ MemoryMb: 393216, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5n.16xlarge": { ++ InstanceType: "r5n.16xlarge", ++ VCPU: 64, ++ MemoryMb: 524288, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5n.24xlarge": { ++ InstanceType: "r5n.24xlarge", ++ VCPU: 96, ++ MemoryMb: 786432, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5n.2xlarge": { ++ InstanceType: "r5n.2xlarge", ++ VCPU: 8, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5n.4xlarge": { ++ InstanceType: "r5n.4xlarge", ++ VCPU: 16, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5n.8xlarge": { ++ InstanceType: "r5n.8xlarge", ++ VCPU: 32, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5n.large": { ++ InstanceType: "r5n.large", ++ VCPU: 2, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5n.metal": { ++ InstanceType: "r5n.metal", ++ VCPU: 96, ++ MemoryMb: 786432, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r5n.xlarge": { ++ InstanceType: "r5n.xlarge", ++ VCPU: 4, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6a.12xlarge": { ++ InstanceType: "r6a.12xlarge", ++ VCPU: 48, ++ MemoryMb: 393216, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6a.16xlarge": { ++ InstanceType: "r6a.16xlarge", ++ VCPU: 64, ++ MemoryMb: 524288, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6a.24xlarge": { ++ InstanceType: "r6a.24xlarge", ++ VCPU: 96, ++ MemoryMb: 786432, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6a.2xlarge": { ++ InstanceType: "r6a.2xlarge", ++ VCPU: 8, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6a.32xlarge": { ++ InstanceType: "r6a.32xlarge", ++ VCPU: 128, ++ MemoryMb: 1048576, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6a.48xlarge": { ++ InstanceType: "r6a.48xlarge", ++ VCPU: 192, ++ MemoryMb: 1572864, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6a.4xlarge": { ++ InstanceType: "r6a.4xlarge", ++ VCPU: 16, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6a.8xlarge": { ++ InstanceType: "r6a.8xlarge", ++ VCPU: 32, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6a.large": { ++ InstanceType: "r6a.large", ++ VCPU: 2, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6a.metal": { ++ InstanceType: "r6a.metal", ++ VCPU: 192, ++ MemoryMb: 1572864, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6a.xlarge": { ++ InstanceType: "r6a.xlarge", ++ VCPU: 4, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6g.12xlarge": { ++ InstanceType: "r6g.12xlarge", ++ VCPU: 48, ++ MemoryMb: 393216, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "r6g.16xlarge": { ++ InstanceType: "r6g.16xlarge", ++ VCPU: 64, ++ MemoryMb: 524288, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "r6g.2xlarge": { ++ InstanceType: "r6g.2xlarge", ++ VCPU: 8, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "r6g.4xlarge": { ++ InstanceType: "r6g.4xlarge", ++ VCPU: 16, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "r6g.8xlarge": { ++ InstanceType: "r6g.8xlarge", ++ VCPU: 32, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "r6g.large": { ++ InstanceType: "r6g.large", ++ VCPU: 2, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "r6g.medium": { ++ InstanceType: "r6g.medium", ++ VCPU: 1, ++ MemoryMb: 8192, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "r6g.metal": { ++ InstanceType: "r6g.metal", ++ VCPU: 64, ++ MemoryMb: 524288, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "r6g.xlarge": { ++ InstanceType: "r6g.xlarge", ++ VCPU: 4, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "r6gd.12xlarge": { ++ InstanceType: "r6gd.12xlarge", ++ VCPU: 48, ++ MemoryMb: 393216, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "r6gd.16xlarge": { ++ InstanceType: "r6gd.16xlarge", ++ VCPU: 64, ++ MemoryMb: 524288, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "r6gd.2xlarge": { ++ InstanceType: "r6gd.2xlarge", ++ VCPU: 8, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "r6gd.4xlarge": { ++ InstanceType: "r6gd.4xlarge", ++ VCPU: 16, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "r6gd.8xlarge": { ++ InstanceType: "r6gd.8xlarge", ++ VCPU: 32, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "r6gd.large": { ++ InstanceType: "r6gd.large", ++ VCPU: 2, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "r6gd.medium": { ++ InstanceType: "r6gd.medium", ++ VCPU: 1, ++ MemoryMb: 8192, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "r6gd.metal": { ++ InstanceType: "r6gd.metal", ++ VCPU: 64, ++ MemoryMb: 524288, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "r6gd.xlarge": { ++ InstanceType: "r6gd.xlarge", ++ VCPU: 4, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "r6i.12xlarge": { ++ InstanceType: "r6i.12xlarge", ++ VCPU: 48, ++ MemoryMb: 393216, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6i.16xlarge": { ++ InstanceType: "r6i.16xlarge", ++ VCPU: 64, ++ MemoryMb: 524288, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6i.24xlarge": { ++ InstanceType: "r6i.24xlarge", ++ VCPU: 96, ++ MemoryMb: 786432, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6i.2xlarge": { ++ InstanceType: "r6i.2xlarge", ++ VCPU: 8, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6i.32xlarge": { ++ InstanceType: "r6i.32xlarge", ++ VCPU: 128, ++ MemoryMb: 1048576, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6i.4xlarge": { ++ InstanceType: "r6i.4xlarge", ++ VCPU: 16, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6i.8xlarge": { ++ InstanceType: "r6i.8xlarge", ++ VCPU: 32, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6i.large": { ++ InstanceType: "r6i.large", ++ VCPU: 2, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6i.metal": { ++ InstanceType: "r6i.metal", ++ VCPU: 128, ++ MemoryMb: 1048576, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6i.xlarge": { ++ InstanceType: "r6i.xlarge", ++ VCPU: 4, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6id.12xlarge": { ++ InstanceType: "r6id.12xlarge", ++ VCPU: 48, ++ MemoryMb: 393216, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6id.16xlarge": { ++ InstanceType: "r6id.16xlarge", ++ VCPU: 64, ++ MemoryMb: 524288, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6id.24xlarge": { ++ InstanceType: "r6id.24xlarge", ++ VCPU: 96, ++ MemoryMb: 786432, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6id.2xlarge": { ++ InstanceType: "r6id.2xlarge", ++ VCPU: 8, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6id.32xlarge": { ++ InstanceType: "r6id.32xlarge", ++ VCPU: 128, ++ MemoryMb: 1048576, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6id.4xlarge": { ++ InstanceType: "r6id.4xlarge", ++ VCPU: 16, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6id.8xlarge": { ++ InstanceType: "r6id.8xlarge", ++ VCPU: 32, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6id.large": { ++ InstanceType: "r6id.large", ++ VCPU: 2, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6id.metal": { ++ InstanceType: "r6id.metal", ++ VCPU: 128, ++ MemoryMb: 1048576, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6id.xlarge": { ++ InstanceType: "r6id.xlarge", ++ VCPU: 4, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6idn.12xlarge": { ++ InstanceType: "r6idn.12xlarge", ++ VCPU: 48, ++ MemoryMb: 393216, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6idn.16xlarge": { ++ InstanceType: "r6idn.16xlarge", ++ VCPU: 64, ++ MemoryMb: 524288, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6idn.24xlarge": { ++ InstanceType: "r6idn.24xlarge", ++ VCPU: 96, ++ MemoryMb: 786432, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6idn.2xlarge": { ++ InstanceType: "r6idn.2xlarge", ++ VCPU: 8, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6idn.32xlarge": { ++ InstanceType: "r6idn.32xlarge", ++ VCPU: 128, ++ MemoryMb: 1048576, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6idn.4xlarge": { ++ InstanceType: "r6idn.4xlarge", ++ VCPU: 16, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6idn.8xlarge": { ++ InstanceType: "r6idn.8xlarge", ++ VCPU: 32, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6idn.large": { ++ InstanceType: "r6idn.large", ++ VCPU: 2, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6idn.xlarge": { ++ InstanceType: "r6idn.xlarge", ++ VCPU: 4, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6in.12xlarge": { ++ InstanceType: "r6in.12xlarge", ++ VCPU: 48, ++ MemoryMb: 393216, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6in.16xlarge": { ++ InstanceType: "r6in.16xlarge", ++ VCPU: 64, ++ MemoryMb: 524288, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6in.24xlarge": { ++ InstanceType: "r6in.24xlarge", ++ VCPU: 96, ++ MemoryMb: 786432, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6in.2xlarge": { ++ InstanceType: "r6in.2xlarge", ++ VCPU: 8, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6in.32xlarge": { ++ InstanceType: "r6in.32xlarge", ++ VCPU: 128, ++ MemoryMb: 1048576, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6in.4xlarge": { ++ InstanceType: "r6in.4xlarge", ++ VCPU: 16, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6in.8xlarge": { ++ InstanceType: "r6in.8xlarge", ++ VCPU: 32, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6in.large": { ++ InstanceType: "r6in.large", ++ VCPU: 2, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r6in.xlarge": { ++ InstanceType: "r6in.xlarge", ++ VCPU: 4, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "r7g.12xlarge": { ++ InstanceType: "r7g.12xlarge", ++ VCPU: 48, ++ MemoryMb: 393216, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "r7g.16xlarge": { ++ InstanceType: "r7g.16xlarge", ++ VCPU: 64, ++ MemoryMb: 524288, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "r7g.2xlarge": { ++ InstanceType: "r7g.2xlarge", ++ VCPU: 8, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "r7g.4xlarge": { ++ InstanceType: "r7g.4xlarge", ++ VCPU: 16, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "r7g.8xlarge": { ++ InstanceType: "r7g.8xlarge", ++ VCPU: 32, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "r7g.large": { ++ InstanceType: "r7g.large", ++ VCPU: 2, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "r7g.medium": { ++ InstanceType: "r7g.medium", ++ VCPU: 1, ++ MemoryMb: 8192, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "r7g.metal": { ++ InstanceType: "r7g.metal", ++ VCPU: 64, ++ MemoryMb: 524288, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "r7g.xlarge": { ++ InstanceType: "r7g.xlarge", ++ VCPU: 4, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "t1.micro": { ++ InstanceType: "t1.micro", ++ VCPU: 1, ++ MemoryMb: 627, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "t2.2xlarge": { ++ InstanceType: "t2.2xlarge", ++ VCPU: 8, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "t2.large": { ++ InstanceType: "t2.large", ++ VCPU: 2, ++ MemoryMb: 8192, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "t2.medium": { ++ InstanceType: "t2.medium", ++ VCPU: 2, ++ MemoryMb: 4096, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "t2.micro": { ++ InstanceType: "t2.micro", ++ VCPU: 1, ++ MemoryMb: 1024, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "t2.nano": { ++ InstanceType: "t2.nano", ++ VCPU: 1, ++ MemoryMb: 512, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "t2.small": { ++ InstanceType: "t2.small", ++ VCPU: 1, ++ MemoryMb: 2048, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "t2.xlarge": { ++ InstanceType: "t2.xlarge", ++ VCPU: 4, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "t3.2xlarge": { ++ InstanceType: "t3.2xlarge", ++ VCPU: 8, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "t3.large": { ++ InstanceType: "t3.large", ++ VCPU: 2, ++ MemoryMb: 8192, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "t3.medium": { ++ InstanceType: "t3.medium", ++ VCPU: 2, ++ MemoryMb: 4096, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "t3.micro": { ++ InstanceType: "t3.micro", ++ VCPU: 2, ++ MemoryMb: 1024, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "t3.nano": { ++ InstanceType: "t3.nano", ++ VCPU: 2, ++ MemoryMb: 512, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "t3.small": { ++ InstanceType: "t3.small", ++ VCPU: 2, ++ MemoryMb: 2048, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "t3.xlarge": { ++ InstanceType: "t3.xlarge", ++ VCPU: 4, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "t3a.2xlarge": { ++ InstanceType: "t3a.2xlarge", ++ VCPU: 8, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "t3a.large": { ++ InstanceType: "t3a.large", ++ VCPU: 2, ++ MemoryMb: 8192, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "t3a.medium": { ++ InstanceType: "t3a.medium", ++ VCPU: 2, ++ MemoryMb: 4096, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "t3a.micro": { ++ InstanceType: "t3a.micro", ++ VCPU: 2, ++ MemoryMb: 1024, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "t3a.nano": { ++ InstanceType: "t3a.nano", ++ VCPU: 2, ++ MemoryMb: 512, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "t3a.small": { ++ InstanceType: "t3a.small", ++ VCPU: 2, ++ MemoryMb: 2048, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "t3a.xlarge": { ++ InstanceType: "t3a.xlarge", ++ VCPU: 4, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "t4g.2xlarge": { ++ InstanceType: "t4g.2xlarge", ++ VCPU: 8, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "t4g.large": { ++ InstanceType: "t4g.large", ++ VCPU: 2, ++ MemoryMb: 8192, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "t4g.medium": { ++ InstanceType: "t4g.medium", ++ VCPU: 2, ++ MemoryMb: 4096, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "t4g.micro": { ++ InstanceType: "t4g.micro", ++ VCPU: 2, ++ MemoryMb: 1024, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "t4g.nano": { ++ InstanceType: "t4g.nano", ++ VCPU: 2, ++ MemoryMb: 512, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "t4g.small": { ++ InstanceType: "t4g.small", ++ VCPU: 2, ++ MemoryMb: 2048, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "t4g.xlarge": { ++ InstanceType: "t4g.xlarge", ++ VCPU: 4, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "trn1.2xlarge": { ++ InstanceType: "trn1.2xlarge", ++ VCPU: 8, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "trn1.32xlarge": { ++ InstanceType: "trn1.32xlarge", ++ VCPU: 128, ++ MemoryMb: 524288, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "u-12tb1.112xlarge": { ++ InstanceType: "u-12tb1.112xlarge", ++ VCPU: 448, ++ MemoryMb: 12582912, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "u-18tb1.112xlarge": { ++ InstanceType: "u-18tb1.112xlarge", ++ VCPU: 448, ++ MemoryMb: 18874368, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "u-24tb1.112xlarge": { ++ InstanceType: "u-24tb1.112xlarge", ++ VCPU: 448, ++ MemoryMb: 25165824, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "u-3tb1.56xlarge": { ++ InstanceType: "u-3tb1.56xlarge", ++ VCPU: 224, ++ MemoryMb: 3145728, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "u-6tb1.112xlarge": { ++ InstanceType: "u-6tb1.112xlarge", ++ VCPU: 448, ++ MemoryMb: 6291456, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "u-6tb1.56xlarge": { ++ InstanceType: "u-6tb1.56xlarge", ++ VCPU: 224, ++ MemoryMb: 6291456, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "u-9tb1.112xlarge": { ++ InstanceType: "u-9tb1.112xlarge", ++ VCPU: 448, ++ MemoryMb: 9437184, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "vt1.24xlarge": { ++ InstanceType: "vt1.24xlarge", ++ VCPU: 96, ++ MemoryMb: 196608, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "vt1.3xlarge": { ++ InstanceType: "vt1.3xlarge", ++ VCPU: 12, ++ MemoryMb: 24576, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "vt1.6xlarge": { ++ InstanceType: "vt1.6xlarge", ++ VCPU: 24, ++ MemoryMb: 49152, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "x1.16xlarge": { ++ InstanceType: "x1.16xlarge", ++ VCPU: 64, ++ MemoryMb: 999424, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "x1.32xlarge": { ++ InstanceType: "x1.32xlarge", ++ VCPU: 128, ++ MemoryMb: 1998848, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "x1e.16xlarge": { ++ InstanceType: "x1e.16xlarge", ++ VCPU: 64, ++ MemoryMb: 1998848, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "x1e.2xlarge": { ++ InstanceType: "x1e.2xlarge", ++ VCPU: 8, ++ MemoryMb: 249856, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "x1e.32xlarge": { ++ InstanceType: "x1e.32xlarge", ++ VCPU: 128, ++ MemoryMb: 3997696, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "x1e.4xlarge": { ++ InstanceType: "x1e.4xlarge", ++ VCPU: 16, ++ MemoryMb: 499712, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "x1e.8xlarge": { ++ InstanceType: "x1e.8xlarge", ++ VCPU: 32, ++ MemoryMb: 999424, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "x1e.xlarge": { ++ InstanceType: "x1e.xlarge", ++ VCPU: 4, ++ MemoryMb: 124928, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "x2gd.12xlarge": { ++ InstanceType: "x2gd.12xlarge", ++ VCPU: 48, ++ MemoryMb: 786432, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "x2gd.16xlarge": { ++ InstanceType: "x2gd.16xlarge", ++ VCPU: 64, ++ MemoryMb: 1048576, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "x2gd.2xlarge": { ++ InstanceType: "x2gd.2xlarge", ++ VCPU: 8, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "x2gd.4xlarge": { ++ InstanceType: "x2gd.4xlarge", ++ VCPU: 16, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "x2gd.8xlarge": { ++ InstanceType: "x2gd.8xlarge", ++ VCPU: 32, ++ MemoryMb: 524288, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "x2gd.large": { ++ InstanceType: "x2gd.large", ++ VCPU: 2, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "x2gd.medium": { ++ InstanceType: "x2gd.medium", ++ VCPU: 1, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "x2gd.metal": { ++ InstanceType: "x2gd.metal", ++ VCPU: 64, ++ MemoryMb: 1048576, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "x2gd.xlarge": { ++ InstanceType: "x2gd.xlarge", ++ VCPU: 4, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "arm64", ++ }, ++ "x2idn.16xlarge": { ++ InstanceType: "x2idn.16xlarge", ++ VCPU: 64, ++ MemoryMb: 1048576, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "x2idn.24xlarge": { ++ InstanceType: "x2idn.24xlarge", ++ VCPU: 96, ++ MemoryMb: 1572864, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "x2idn.32xlarge": { ++ InstanceType: "x2idn.32xlarge", ++ VCPU: 128, ++ MemoryMb: 2097152, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "x2idn.metal": { ++ InstanceType: "x2idn.metal", ++ VCPU: 128, ++ MemoryMb: 2097152, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "x2iedn.16xlarge": { ++ InstanceType: "x2iedn.16xlarge", ++ VCPU: 64, ++ MemoryMb: 2097152, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "x2iedn.24xlarge": { ++ InstanceType: "x2iedn.24xlarge", ++ VCPU: 96, ++ MemoryMb: 3145728, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "x2iedn.2xlarge": { ++ InstanceType: "x2iedn.2xlarge", ++ VCPU: 8, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "x2iedn.32xlarge": { ++ InstanceType: "x2iedn.32xlarge", ++ VCPU: 128, ++ MemoryMb: 4194304, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "x2iedn.4xlarge": { ++ InstanceType: "x2iedn.4xlarge", ++ VCPU: 16, ++ MemoryMb: 524288, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "x2iedn.8xlarge": { ++ InstanceType: "x2iedn.8xlarge", ++ VCPU: 32, ++ MemoryMb: 1048576, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "x2iedn.metal": { ++ InstanceType: "x2iedn.metal", ++ VCPU: 128, ++ MemoryMb: 4194304, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "x2iedn.xlarge": { ++ InstanceType: "x2iedn.xlarge", ++ VCPU: 4, ++ MemoryMb: 131072, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "x2iezn.12xlarge": { ++ InstanceType: "x2iezn.12xlarge", ++ VCPU: 48, ++ MemoryMb: 1572864, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "x2iezn.2xlarge": { ++ InstanceType: "x2iezn.2xlarge", ++ VCPU: 8, ++ MemoryMb: 262144, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "x2iezn.4xlarge": { ++ InstanceType: "x2iezn.4xlarge", ++ VCPU: 16, ++ MemoryMb: 524288, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "x2iezn.6xlarge": { ++ InstanceType: "x2iezn.6xlarge", ++ VCPU: 24, ++ MemoryMb: 786432, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "x2iezn.8xlarge": { ++ InstanceType: "x2iezn.8xlarge", ++ VCPU: 32, ++ MemoryMb: 1048576, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "x2iezn.metal": { ++ InstanceType: "x2iezn.metal", ++ VCPU: 48, ++ MemoryMb: 1572864, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "z1d.12xlarge": { ++ InstanceType: "z1d.12xlarge", ++ VCPU: 48, ++ MemoryMb: 393216, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "z1d.2xlarge": { ++ InstanceType: "z1d.2xlarge", ++ VCPU: 8, ++ MemoryMb: 65536, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "z1d.3xlarge": { ++ InstanceType: "z1d.3xlarge", ++ VCPU: 12, ++ MemoryMb: 98304, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "z1d.6xlarge": { ++ InstanceType: "z1d.6xlarge", ++ VCPU: 24, ++ MemoryMb: 196608, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "z1d.large": { ++ InstanceType: "z1d.large", ++ VCPU: 2, ++ MemoryMb: 16384, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "z1d.metal": { ++ InstanceType: "z1d.metal", ++ VCPU: 48, ++ MemoryMb: 393216, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++ "z1d.xlarge": { ++ InstanceType: "z1d.xlarge", ++ VCPU: 4, ++ MemoryMb: 32768, ++ GPU: 0, ++ Architecture: "amd64", ++ }, ++} +diff --git a/cluster-autoscaler/cloudprovider/spotinst/spotinst_cloud_provider.go b/cluster-autoscaler/cloudprovider/spotinst/spotinst_cloud_provider.go +new file mode 100644 +index 000000000..1b9441dcd +--- /dev/null ++++ b/cluster-autoscaler/cloudprovider/spotinst/spotinst_cloud_provider.go +@@ -0,0 +1,159 @@ ++/* ++Copyright 2016 The Kubernetes Authors. ++Licensed under the Apache License, Version 2.0 (the "License"); ++you may not use this file except in compliance with the License. ++You may obtain a copy of the License at ++ http://www.apache.org/licenses/LICENSE-2.0 ++Unless required by applicable law or agreed to in writing, software ++distributed under the License is distributed on an "AS IS" BASIS, ++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++See the License for the specific language governing permissions and ++limitations under the License. ++*/ ++ ++package spotinst ++ ++import ( ++ "io" ++ apiv1 "k8s.io/api/core/v1" ++ "k8s.io/apimachinery/pkg/api/resource" ++ "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" ++ "k8s.io/autoscaler/cluster-autoscaler/config" ++ "k8s.io/autoscaler/cluster-autoscaler/utils/errors" ++ klog "k8s.io/klog/v2" ++ "os" ++) ++ ++const ( ++ // ProviderName is the provide name of spotinst ++ ProviderName = "spotinst" ++ ++ // GPULabel is the label added to nodes with GPU resource. ++ GPULabel = "k8s.amazonaws.com/accelerator" ++) ++ ++// CloudProvider implements CloudProvider interface. ++type CloudProvider struct { ++ manager *CloudManager ++ resourceLimiter *cloudprovider.ResourceLimiter ++} ++ ++var ( ++ availableGPUTypes = map[string]struct{}{ ++ "nvidia-tesla-k80": {}, ++ "nvidia-tesla-p100": {}, ++ "nvidia-tesla-v100": {}, ++ } ++) ++ ++// NewCloudProvider returns CloudProvider implementation for Spotinst. ++func NewCloudProvider(manager *CloudManager, resourceLimiter *cloudprovider.ResourceLimiter) (*CloudProvider, error) { ++ klog.Info("Building Spotinst cloud provider") ++ cloud := &CloudProvider{ ++ manager: manager, ++ resourceLimiter: resourceLimiter, ++ } ++ ++ return cloud, nil ++} ++ ++// Name returns name of the cloud c. ++func (c *CloudProvider) Name() string { ++ return ProviderName ++} ++ ++// NodeGroups returns all node groups configured for this cloud c. ++func (c *CloudProvider) NodeGroups() []cloudprovider.NodeGroup { ++ out := make([]cloudprovider.NodeGroup, len(c.manager.groups)) ++ for i, group := range c.manager.groups { ++ out[i] = group ++ } ++ return out ++} ++ ++// NodeGroupForNode returns the node group for the given node. ++func (c *CloudProvider) NodeGroupForNode(node *apiv1.Node) (cloudprovider.NodeGroup, error) { ++ instanceID, err := extractInstanceId(node.Spec.ProviderID) ++ if err != nil { ++ return nil, err ++ } ++ return c.manager.GetGroupForInstance(instanceID) ++} ++ ++// Pricing returns pricing model for this cloud provider or error if not available. ++func (c *CloudProvider) Pricing() (cloudprovider.PricingModel, errors.AutoscalerError) { ++ return nil, cloudprovider.ErrNotImplemented ++} ++ ++// GetAvailableMachineTypes get all machine types that can be requested from the cloud provider. ++// Implementation optional. ++func (c *CloudProvider) GetAvailableMachineTypes() ([]string, error) { ++ return []string{}, nil ++} ++ ++// NewNodeGroup builds a theoretical node group based on the node definition provided. ++func (c *CloudProvider) NewNodeGroup(machineType string, labels map[string]string, systemLabels map[string]string, ++ taints []apiv1.Taint, extraResources map[string]resource.Quantity) (cloudprovider.NodeGroup, error) { ++ return nil, cloudprovider.ErrNotImplemented ++} ++ ++// GetResourceLimiter returns struct containing limits (max, min) for resources (cores, memory etc.). ++func (c *CloudProvider) GetResourceLimiter() (*cloudprovider.ResourceLimiter, error) { ++ return c.resourceLimiter, nil ++} ++ ++// Cleanup cleans up open resources before the cloud provider is destroyed, i.e. go routines etc. ++func (c *CloudProvider) Cleanup() error { ++ return c.manager.Cleanup() ++} ++ ++// Refresh is called before every main loop and can be used to dynamically update cloud provider state. ++// In particular the list of node groups returned by NodeGroups can change as a result of CloudProvider.Refresh(). ++func (c *CloudProvider) Refresh() error { ++ return c.manager.Refresh() ++} ++ ++// GetInstanceID gets the instance ID for the specified node. ++func (c *CloudProvider) GetInstanceID(node *apiv1.Node) string { ++ return node.Spec.ProviderID ++} ++ ++// GPULabel returns the label added to nodes with GPU resource. ++func (c *CloudProvider) GPULabel() string { ++ return GPULabel ++} ++ ++// GetAvailableGPUTypes return all available GPU types cloud provider supports ++func (c *CloudProvider) GetAvailableGPUTypes() map[string]struct{} { ++ return availableGPUTypes ++} ++ ++// BuildSpotinst return the spotinst provider ++func BuildSpotinst(opts config.AutoscalingOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter) cloudprovider.CloudProvider { ++ var config io.ReadCloser ++ if opts.CloudConfig != "" { ++ var err error ++ config, err = os.Open(opts.CloudConfig) ++ if err != nil { ++ klog.Fatalf("Couldn't open cloud provider configuration %s: %#v", opts.CloudConfig, err) ++ } ++ defer config.Close() ++ } ++ ++ manager, err := NewCloudManager(config, do) ++ if err != nil { ++ klog.Fatalf("Failed to create Spotinst manager: %v", err) ++ } ++ ++ provider, err := NewCloudProvider(manager, rl) ++ if err != nil { ++ klog.Fatalf("Failed to create Spotinst cloud provider: %v", err) ++ } ++ ++ return provider ++} ++ ++// HasInstance returns whether a given node has a corresponding instance in this cloud provider ++func (c *CloudProvider) HasInstance(node *apiv1.Node) (bool, error) { ++ return true, nil ++} +\ No newline at end of file +diff --git a/cluster-autoscaler/cloudprovider/spotinst/spotinst_cloud_provider_test.go b/cluster-autoscaler/cloudprovider/spotinst/spotinst_cloud_provider_test.go +new file mode 100644 +index 000000000..7b944bca9 +--- /dev/null ++++ b/cluster-autoscaler/cloudprovider/spotinst/spotinst_cloud_provider_test.go +@@ -0,0 +1,411 @@ ++/* ++Copyright 2016 The Kubernetes Authors. ++Licensed under the Apache License, Version 2.0 (the "License"); ++you may not use this file except in compliance with the License. ++You may obtain a copy of the License at ++ http://www.apache.org/licenses/LICENSE-2.0 ++Unless required by applicable law or agreed to in writing, software ++distributed under the License is distributed on an "AS IS" BASIS, ++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++See the License for the specific language governing permissions and ++limitations under the License. ++*/ ++ ++package spotinst ++ ++import ( ++ "context" ++ "testing" ++ "time" ++ ++ "github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws" ++ azurev3 "github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/v3" ++ "github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gcp" ++ "github.com/spotinst/spotinst-sdk-go/spotinst" ++ "github.com/stretchr/testify/assert" ++ "github.com/stretchr/testify/mock" ++ apiv1 "k8s.io/api/core/v1" ++ "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" ++) ++ ++type groupServiceMock struct { ++ mock.Mock ++ providerAWS *awsServiceMock ++} ++ ++func (s *groupServiceMock) CloudProviderAWS() aws.Service { ++ return s.providerAWS ++} ++ ++func (s *groupServiceMock) CloudProviderGCP() gcp.Service { ++ return nil // not implemented ++} ++ ++func (s *groupServiceMock) CloudProviderAzureV3() azurev3.Service { ++ return nil // not implemented ++} ++ ++type awsServiceMock struct { ++ mock.Mock ++} ++ ++func (s *awsServiceMock) List(ctx context.Context, input *aws.ListGroupsInput) (*aws.ListGroupsOutput, error) { ++ return nil, nil ++} ++ ++func (s *awsServiceMock) Create(ctx context.Context, input *aws.CreateGroupInput) (*aws.CreateGroupOutput, error) { ++ return nil, nil ++} ++ ++func (s *awsServiceMock) DeploymentStatusECS(context.Context, *aws.DeploymentStatusInput) (*aws.RollGroupOutput, error) { ++ return nil, nil ++} ++ ++func (s *awsServiceMock) DeploymentStatus(context.Context, *aws.DeploymentStatusInput) (*aws.RollGroupOutput, error) { ++ return nil, nil ++} ++ ++func (s *awsServiceMock) RollECS(context.Context, *aws.RollECSGroupInput) (*aws.RollGroupOutput, error) { ++ return nil, nil ++} ++ ++func (s *awsServiceMock) Scale(context.Context, *aws.ScaleGroupInput) (*aws.ScaleGroupOutput, error) { ++ return nil, nil ++} ++ ++func (s *awsServiceMock) FinishBeanstalkMaintenance(context.Context, *aws.BeanstalkMaintenanceInput) (*aws.BeanstalkMaintenanceOutput, error) { ++ return nil, nil ++} ++ ++func (s *awsServiceMock) GetBeanstalkMaintenanceStatus(context.Context, *aws.BeanstalkMaintenanceInput) (*string, error) { ++ return nil, nil ++} ++ ++func (s *awsServiceMock) GetGroupEvents(context.Context, *aws.GetGroupEventsInput) (*aws.GetGroupEventsOutput, error) { ++ return nil, nil ++} ++ ++func (s *awsServiceMock) StartBeanstalkMaintenance(context.Context, *aws.BeanstalkMaintenanceInput) (*aws.BeanstalkMaintenanceOutput, error) { ++ return nil, nil ++} ++ ++func (s *awsServiceMock) Read(ctx context.Context, input *aws.ReadGroupInput) (*aws.ReadGroupOutput, error) { ++ out := &aws.ReadGroupOutput{ ++ Group: &aws.Group{ ++ Capacity: &aws.Capacity{ ++ Target: spotinst.Int(2), ++ }, ++ }, ++ } ++ return out, nil ++} ++ ++func (s *awsServiceMock) GetInstanceHealthiness(context.Context, *aws.GetInstanceHealthinessInput) (*aws.GetInstanceHealthinessOutput, error) { ++ return nil, nil ++} ++ ++func (s *awsServiceMock) ImportBeanstalkEnv(context.Context, *aws.ImportBeanstalkInput) (*aws.ImportBeanstalkOutput, error) { ++ return nil, nil ++} ++ ++func (s *awsServiceMock) StopDeployment(context.Context, *aws.StopDeploymentInput) (*aws.StopDeploymentOutput, error) { ++ return nil, nil ++} ++ ++func (s *awsServiceMock) Update(ctx context.Context, input *aws.UpdateGroupInput) (*aws.UpdateGroupOutput, error) { ++ args := s.Called(ctx, input) ++ return args.Get(0).(*aws.UpdateGroupOutput), nil ++} ++ ++func (s *awsServiceMock) Delete(ctx context.Context, input *aws.DeleteGroupInput) (*aws.DeleteGroupOutput, error) { ++ return nil, nil ++} ++ ++func (s *awsServiceMock) Status(ctx context.Context, input *aws.StatusGroupInput) (*aws.StatusGroupOutput, error) { ++ out := &aws.StatusGroupOutput{ ++ Instances: []*aws.Instance{ ++ { ++ ID: spotinst.String("test-instance-id"), ++ }, ++ { ++ ID: spotinst.String("second-test-instance-id"), ++ }, ++ }, ++ } ++ return out, nil ++} ++ ++func (s *awsServiceMock) Detach(ctx context.Context, input *aws.DetachGroupInput) (*aws.DetachGroupOutput, error) { ++ args := s.Called(ctx, input) ++ return args.Get(0).(*aws.DetachGroupOutput), nil ++} ++ ++func (s *awsServiceMock) Roll(ctx context.Context, input *aws.RollGroupInput) (*aws.RollGroupOutput, error) { ++ return nil, nil ++} ++ ++func (s *awsServiceMock) CreateSuspensions(ctx context.Context, input *aws.CreateSuspensionsInput) (*aws.CreateSuspensionsOutput, error) { ++ return nil, nil ++} ++ ++func (s *awsServiceMock) ListSuspensions(context.Context, *aws.ListSuspensionsInput) (*aws.ListSuspensionsOutput, error) { ++ return nil, nil ++} ++func (s *awsServiceMock) DeleteSuspensions(context.Context, *aws.DeleteSuspensionsInput) (*aws.DeleteSuspensionsOutput, error) { ++ return nil, nil ++} ++ ++func (s *awsServiceMock) ListStatefulInstances(context.Context, *aws.ListStatefulInstancesInput) (*aws.ListStatefulInstancesOutput, error) { ++ return nil, nil ++} ++func (s *awsServiceMock) PauseStatefulInstance(context.Context, *aws.PauseStatefulInstanceInput) (*aws.PauseStatefulInstanceOutput, error) { ++ return nil, nil ++} ++func (s *awsServiceMock) ResumeStatefulInstance(context.Context, *aws.ResumeStatefulInstanceInput) (*aws.ResumeStatefulInstanceOutput, error) { ++ return nil, nil ++} ++func (s *awsServiceMock) RecycleStatefulInstance(context.Context, *aws.RecycleStatefulInstanceInput) (*aws.RecycleStatefulInstanceOutput, error) { ++ return nil, nil ++} ++func (s *awsServiceMock) DeallocateStatefulInstance(context.Context, *aws.DeallocateStatefulInstanceInput) (*aws.DeallocateStatefulInstanceOutput, error) { ++ return nil, nil ++} ++ ++func testCloudManager(t *testing.T) *CloudManager { ++ return &CloudManager{ ++ groupService: &groupServiceMock{ ++ providerAWS: new(awsServiceMock), ++ }, ++ groups: make([]*Group, 0), ++ cache: make(map[string]*Group), ++ interruptCh: make(chan struct{}), ++ refreshInterval: time.Minute, ++ } ++} ++ ++func testCloudProvider(t *testing.T, m *CloudManager) *CloudProvider { ++ resourceLimiter := cloudprovider.NewResourceLimiter( ++ map[string]int64{cloudprovider.ResourceNameCores: 1, cloudprovider.ResourceNameMemory: 10000000}, ++ map[string]int64{cloudprovider.ResourceNameCores: 10, cloudprovider.ResourceNameMemory: 100000000}) ++ ++ cloud, err := NewCloudProvider(m, resourceLimiter) ++ assert.NoError(t, err) ++ return cloud ++} ++ ++func TestNewCloudProvider(t *testing.T) { ++ testCloudProvider(t, testCloudManager(t)) ++} ++ ++func TestAddNodeGroup(t *testing.T) { ++ provider := testCloudProvider(t, testCloudManager(t)) ++ err := provider.manager.addNodeGroup("bad spec") ++ assert.Error(t, err) ++ assert.Equal(t, len(provider.manager.groups), 0) ++ ++ err = provider.manager.addNodeGroup("1:5:sig-test") ++ assert.NoError(t, err) ++ assert.Equal(t, len(provider.manager.groups), 1) ++} ++ ++func TestName(t *testing.T) { ++ provider := testCloudProvider(t, testCloudManager(t)) ++ assert.Equal(t, provider.Name(), "spotinst") ++} ++ ++func TestNodeGroups(t *testing.T) { ++ provider := testCloudProvider(t, testCloudManager(t)) ++ assert.Equal(t, len(provider.NodeGroups()), 0) ++ err := provider.manager.addNodeGroup("1:5:sig-test") ++ assert.NoError(t, err) ++ assert.Equal(t, len(provider.NodeGroups()), 1) ++} ++ ++func TestNodeGroupForNode(t *testing.T) { ++ node := &apiv1.Node{ ++ Spec: apiv1.NodeSpec{ ++ ProviderID: "aws:///us-east-1a/test-instance-id", ++ }, ++ } ++ provider := testCloudProvider(t, testCloudManager(t)) ++ err := provider.manager.addNodeGroup("1:5:sig-test") ++ assert.NoError(t, err) ++ ++ provider.Refresh() ++ ++ group, err := provider.NodeGroupForNode(node) ++ ++ assert.NoError(t, err) ++ assert.Equal(t, group.Id(), "sig-test") ++ assert.Equal(t, group.MinSize(), 1) ++ assert.Equal(t, group.MaxSize(), 5) ++ ++ // test node in cluster that is not in a group managed by cluster autoscaler ++ nodeNotInGroup := &apiv1.Node{ ++ Spec: apiv1.NodeSpec{ ++ ProviderID: "aws:///us-east-1a/test-instance-id-not-in-group", ++ }, ++ } ++ ++ group, err = provider.NodeGroupForNode(nodeNotInGroup) ++ assert.NoError(t, err) ++ assert.Nil(t, group) ++} ++ ++func TestExtractInstanceId(t *testing.T) { ++ _, err := extractInstanceId("bad spec") ++ assert.Error(t, err) ++ ++ instanceID, err := extractInstanceId("aws:///us-east-1a/i-260942b3") ++ assert.NoError(t, err) ++ assert.Equal(t, instanceID, "i-260942b3") ++} ++ ++func TestMaxSize(t *testing.T) { ++ provider := testCloudProvider(t, testCloudManager(t)) ++ err := provider.manager.addNodeGroup("1:5:sig-test") ++ assert.NoError(t, err) ++ assert.Equal(t, len(provider.manager.groups), 1) ++ assert.Equal(t, provider.manager.groups[0].MaxSize(), 5) ++} ++ ++func TestMinSize(t *testing.T) { ++ provider := testCloudProvider(t, testCloudManager(t)) ++ err := provider.manager.addNodeGroup("1:5:sig-test") ++ assert.NoError(t, err) ++ assert.Equal(t, len(provider.manager.groups), 1) ++ assert.Equal(t, provider.manager.groups[0].MinSize(), 1) ++} ++ ++func TestTargetSize(t *testing.T) { ++ provider := testCloudProvider(t, testCloudManager(t)) ++ err := provider.manager.addNodeGroup("1:5:sig-test") ++ assert.NoError(t, err) ++ targetSize, err := provider.manager.groups[0].TargetSize() ++ assert.Equal(t, targetSize, 2) ++ assert.NoError(t, err) ++} ++ ++func TestIncreaseSize(t *testing.T) { ++ provider := testCloudProvider(t, testCloudManager(t)) ++ err := provider.manager.addNodeGroup("1:5:sig-test") ++ assert.NoError(t, err) ++ assert.Equal(t, len(provider.manager.groups), 1) ++ ++ cloud := provider.manager.groupService.CloudProviderAWS().(*awsServiceMock) ++ cloud.On("Update", context.Background(), &aws.UpdateGroupInput{ ++ Group: &aws.Group{ ++ ID: spotinst.String(provider.manager.groups[0].Id()), ++ Capacity: &aws.Capacity{ ++ Target: spotinst.Int(3), ++ Minimum: spotinst.Int(provider.manager.groups[0].minSize), ++ Maximum: spotinst.Int(provider.manager.groups[0].maxSize), ++ }, ++ }, ++ }).Return(&aws.UpdateGroupOutput{}) ++ ++ err = provider.manager.groups[0].IncreaseSize(1) ++ assert.NoError(t, err) ++ cloud.AssertExpectations(t) ++} ++ ++func TestBelongs(t *testing.T) { ++ provider := testCloudProvider(t, testCloudManager(t)) ++ err := provider.manager.addNodeGroup("1:5:sig-test") ++ assert.NoError(t, err) ++ ++ provider.Refresh() ++ ++ invalidNode := &apiv1.Node{ ++ Spec: apiv1.NodeSpec{ ++ ProviderID: "aws:///us-east-1a/invalid-instance-id", ++ }, ++ } ++ _, err = provider.manager.groups[0].Belongs(invalidNode) ++ assert.Error(t, err) ++ ++ validNode := &apiv1.Node{ ++ Spec: apiv1.NodeSpec{ ++ ProviderID: "aws:///us-east-1a/test-instance-id", ++ }, ++ } ++ belongs, err := provider.manager.groups[0].Belongs(validNode) ++ assert.Equal(t, belongs, true) ++ assert.NoError(t, err) ++} ++ ++func TestDeleteNodes(t *testing.T) { ++ provider := testCloudProvider(t, testCloudManager(t)) ++ err := provider.manager.addNodeGroup("1:5:sig-test") ++ assert.NoError(t, err) ++ assert.Equal(t, len(provider.manager.groups), 1) ++ ++ provider.Refresh() ++ ++ cloud := provider.manager.groupService.CloudProviderAWS().(*awsServiceMock) ++ cloud.On("Detach", context.Background(), &aws.DetachGroupInput{ ++ GroupID: spotinst.String(provider.manager.groups[0].Id()), ++ InstanceIDs: []string{"test-instance-id"}, ++ ShouldDecrementTargetCapacity: spotinst.Bool(true), ++ ShouldTerminateInstances: spotinst.Bool(true), ++ }).Return(&aws.DetachGroupOutput{}) ++ ++ node := &apiv1.Node{ ++ Spec: apiv1.NodeSpec{ ++ ProviderID: "aws:///us-east-1a/test-instance-id", ++ }, ++ } ++ ++ err = provider.manager.groups[0].DeleteNodes([]*apiv1.Node{node}) ++ assert.NoError(t, err) ++ cloud.AssertExpectations(t) ++} ++ ++func TestId(t *testing.T) { ++ provider := testCloudProvider(t, testCloudManager(t)) ++ err := provider.manager.addNodeGroup("1:5:sig-test") ++ assert.NoError(t, err) ++ assert.Equal(t, len(provider.manager.groups), 1) ++ assert.Equal(t, provider.manager.groups[0].Id(), "sig-test") ++} ++ ++func TestDebug(t *testing.T) { ++ grp := Group{ ++ manager: testCloudManager(t), ++ minSize: 5, ++ maxSize: 55, ++ } ++ grp.groupID = "sig-test" ++ assert.Equal(t, grp.Debug(), "sig-test (5:55)") ++} ++ ++func TestBuildGroup(t *testing.T) { ++ provider := testCloudProvider(t, testCloudManager(t)) ++ ++ _, err := provider.manager.buildGroupFromSpec("a") ++ assert.Error(t, err) ++ _, err = provider.manager.buildGroupFromSpec("a:b:c") ++ assert.Error(t, err) ++ _, err = provider.manager.buildGroupFromSpec("1:") ++ assert.Error(t, err) ++ _, err = provider.manager.buildGroupFromSpec("1:2:") ++ assert.Error(t, err) ++ ++ grp, err := provider.manager.buildGroupFromSpec("111:222:sig-test") ++ assert.NoError(t, err) ++ assert.Equal(t, 111, grp.MinSize()) ++ assert.Equal(t, 222, grp.MaxSize()) ++ assert.Equal(t, "sig-test", grp.Id()) ++} ++ ++func TestGetResourceLimiter(t *testing.T) { ++ provider := testCloudProvider(t, testCloudManager(t)) ++ _, err := provider.GetResourceLimiter() ++ assert.NoError(t, err) ++} ++ ++func TestCleanup(t *testing.T) { ++ provider := testCloudProvider(t, testCloudManager(t)) ++ err := provider.Cleanup() ++ assert.NoError(t, err) ++} +\ No newline at end of file +diff --git a/cluster-autoscaler/cloudprovider/spotinst/spotinst_manager.go b/cluster-autoscaler/cloudprovider/spotinst/spotinst_manager.go +new file mode 100644 +index 000000000..c5505f360 +--- /dev/null ++++ b/cluster-autoscaler/cloudprovider/spotinst/spotinst_manager.go +@@ -0,0 +1,503 @@ ++/* ++Copyright 2016 The Kubernetes Authors. ++Licensed under the Apache License, Version 2.0 (the "License"); ++you may not use this file except in compliance with the License. ++You may obtain a copy of the License at ++ http://www.apache.org/licenses/LICENSE-2.0 ++Unless required by applicable law or agreed to in writing, software ++distributed under the License is distributed on an "AS IS" BASIS, ++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++See the License for the specific language governing permissions and ++limitations under the License. ++*/ ++ ++package spotinst ++ ++import ( ++ "context" ++ "errors" ++ "fmt" ++ "io" ++ "math/rand" ++ "strconv" ++ "strings" ++ "sync" ++ "time" ++ ++ "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" ++ "k8s.io/klog/v2" ++ ++ "k8s.io/autoscaler/cluster-autoscaler/utils/mpscontext" ++ ++ "github.com/spotinst/spotinst-sdk-go/service/elastigroup" ++ "github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws" ++ "github.com/spotinst/spotinst-sdk-go/spotinst" ++ "github.com/spotinst/spotinst-sdk-go/spotinst/log" ++ "github.com/spotinst/spotinst-sdk-go/spotinst/session" ++ "github.com/spotinst/spotinst-sdk-go/spotinst/util/stringutil" ++ gcfg "gopkg.in/gcfg.v1" ++ apiv1 "k8s.io/api/core/v1" ++ "k8s.io/apimachinery/pkg/api/resource" ++ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ++ "k8s.io/apimachinery/pkg/util/wait" ++ "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" ++ "k8s.io/autoscaler/cluster-autoscaler/config/dynamic" ++ "k8s.io/autoscaler/cluster-autoscaler/utils/gpumemory" ++ kubeletapis "k8s.io/kubelet/pkg/apis" ++) ++ ++// CloudManager holds the config and client. ++type CloudManager struct { ++ groupService elastigroup.Service ++ groups []*Group ++ refreshedAt time.Time ++ refreshInterval time.Duration ++ interruptCh chan struct{} ++ cacheMu sync.Mutex ++ cache map[string]*Group // k: InstanceID, v: Group ++} ++ ++// CloudConfig holds the configuration parsed from the --cloud-config flag. ++// All fields are required unless otherwise specified. ++type CloudConfig struct { ++ Global struct{} ++} ++ ++// NewCloudManager constructs manager object. ++func NewCloudManager(config io.Reader, discoveryOpts cloudprovider.NodeGroupDiscoveryOptions) (*CloudManager, error) { ++ klog.Info("Building Spotinst cloud manager") ++ ++ cfg, err := readCloudConfig(config) ++ if err != nil { ++ return nil, err ++ } ++ ++ svc, err := newService(cfg) ++ if err != nil { ++ return nil, err ++ } ++ ++ manager := &CloudManager{ ++ groupService: svc, ++ refreshInterval: time.Minute, ++ interruptCh: make(chan struct{}), ++ groups: make([]*Group, 0), ++ cache: make(map[string]*Group), ++ } ++ ++ if err := manager.addNodeGroups(discoveryOpts.NodeGroupSpecs); err != nil { ++ return nil, err ++ } ++ ++ go wait.Until(func() { ++ manager.cacheMu.Lock() ++ defer manager.cacheMu.Unlock() ++ ++ if err := manager.forceRefresh(); err != nil { ++ klog.Errorf("Error while refreshing cache: %v", err) ++ } ++ }, time.Hour, manager.interruptCh) ++ ++ return manager, nil ++} ++ ++// newService returns a new instance of Spotinst Service. ++func newService(cloudConfig *CloudConfig) (elastigroup.Service, error) { ++ // Create a new config. ++ config := spotinst.DefaultConfig() ++ config.WithLogger(newStdLogger()) ++ config.WithUserAgent("Kubernetes-ClusterAutoscaler") ++ ++ // Create a new session. ++ sess := session.New(config) ++ ++ // Create a new service. ++ svc := elastigroup.New(sess) ++ ++ return svc, nil ++} ++ ++func newStdLogger() log.Logger { ++ return log.LoggerFunc(func(format string, args ...interface{}) { ++ klog.V(4).Infof(format, args...) ++ }) ++} ++ ++// readCloudConfig reads an instance of Config from config reader. ++func readCloudConfig(config io.Reader) (*CloudConfig, error) { ++ var cfg CloudConfig ++ ++ if config != nil { ++ if err := gcfg.ReadInto(&cfg, config); err != nil { ++ return nil, fmt.Errorf("couldn't read Spotinst config: %v", err) ++ } ++ } ++ ++ return &cfg, nil ++} ++ ++func (mgr *CloudManager) addNodeGroups(specs []string) error { ++ klog.Info("Attempting to add node groups") ++ ++ for _, spec := range specs { ++ if err := mgr.addNodeGroup(spec); err != nil { ++ return fmt.Errorf("could not register group with spec %s: %v", spec, err) ++ } ++ } ++ ++ return nil ++} ++ ++func (mgr *CloudManager) addNodeGroup(spec string) error { ++ klog.Infof("Attempting to add node group: %s", spec) ++ ++ group, err := mgr.buildGroupFromSpec(spec) ++ if err != nil { ++ return fmt.Errorf("could not parse spec for node group: %v", err) ++ } ++ err = mgr.RegisterGroup(group) ++ if err != nil { ++ return fmt.Errorf("could not register the group(%s): %v", group.Id(), err) ++ } ++ ++ klog.Infof("Node group added: %s", group.groupID) ++ return nil ++} ++ ++func (mgr *CloudManager) buildGroupFromSpec(value string) (*Group, error) { ++ spec, err := dynamic.SpecFromString(value, true) ++ if err != nil { ++ return nil, fmt.Errorf("failed to parse node group spec: %v", err) ++ } ++ group := &Group{ ++ manager: mgr, ++ groupID: spec.Name, ++ minSize: spec.MinSize, ++ maxSize: spec.MaxSize, ++ } ++ return group, nil ++} ++ ++// RegisterGroup registers a resource group in Spotinst Manager. ++func (mgr *CloudManager) RegisterGroup(grp *Group) error { ++ mgr.cacheMu.Lock() ++ defer mgr.cacheMu.Unlock() ++ ++ group, err := mgr.getResourceForGroup(grp.Id()) ++ if err != nil { ++ return err ++ } ++ grp.group = group ++ ++ mgr.groups = append(mgr.groups, grp) ++ return nil ++} ++ ++// GetGroupSize gets the current size of the group. ++func (mgr *CloudManager) GetGroupSize(grp *Group) (int64, error) { ++ group, err := mgr.getResourceForGroup(grp.Id()) ++ if err != nil { ++ return -1, err ++ } ++ size := spotinst.IntValue(group.Capacity.Target) ++ return int64(size), nil ++} ++ ++// SetGroupSize sets the instances count in a Group by updating a ++// predefined Spotinst stack parameter (specified by the user). ++func (mgr *CloudManager) SetGroupSize(grp *Group, size int64) error { ++ in := &aws.UpdateGroupInput{ ++ Group: &aws.Group{ ++ ID: spotinst.String(grp.Id()), ++ Capacity: &aws.Capacity{ ++ Target: spotinst.Int(int(size)), ++ Minimum: spotinst.Int(grp.minSize), ++ Maximum: spotinst.Int(grp.maxSize), ++ }, ++ }, ++ } ++ _, err := mgr.groupService.CloudProviderAWS().Update(context.Background(), in) ++ if err != nil { ++ return err ++ } ++ return nil ++} ++ ++// GetGroupForInstance retrieves the resource group that contains ++// a given instance. ++func (mgr *CloudManager) GetGroupForInstance(instanceID string) (*Group, error) { ++ mgr.cacheMu.Lock() ++ defer mgr.cacheMu.Unlock() ++ ++ if group, ok := mgr.cache[instanceID]; ok { ++ return group, nil ++ } ++ ++ klog.V(8).Infof("Instance `%s` does not belong to any managed group", instanceID) ++ return nil, nil ++} ++ ++// DeleteInstances deletes the specified instances from the ++// OpenStack resource group ++func (mgr *CloudManager) DeleteInstances(instanceIDs []string) error { ++ if len(instanceIDs) == 0 { ++ return nil ++ } ++ commonGroup, err := mgr.GetGroupForInstance(instanceIDs[0]) ++ if err != nil { ++ return err ++ } ++ for _, instanceID := range instanceIDs { ++ instanceGroup, err := mgr.GetGroupForInstance(instanceID) ++ if err != nil { ++ return err ++ } ++ if instanceGroup.groupID != commonGroup.groupID { ++ return errors.New("connot delete instances which don't belong to the same group") ++ } ++ } ++ in := &aws.DetachGroupInput{ ++ GroupID: spotinst.String(commonGroup.groupID), ++ InstanceIDs: instanceIDs, ++ ShouldDecrementTargetCapacity: spotinst.Bool(true), ++ ShouldTerminateInstances: spotinst.Bool(true), ++ } ++ if _, err := mgr.groupService.CloudProviderAWS().Detach(context.Background(), in); err != nil { ++ return fmt.Errorf("failed to detach instances from group %s: %v", commonGroup.groupID, err) ++ } ++ return nil ++} ++ ++func (mgr *CloudManager) getResourceForGroup(groupID string) (*aws.Group, error) { ++ in := &aws.ReadGroupInput{ ++ GroupID: spotinst.String(groupID), ++ } ++ out, err := mgr.groupService.CloudProviderAWS().Read(context.Background(), in) ++ if err != nil { ++ return nil, err ++ } ++ if out.Group == nil { ++ return nil, fmt.Errorf("failed to get group %s", groupID) ++ } ++ return out.Group, nil ++} ++ ++// Cleanup cleans up open resources before the cloud provider is destroyed, i.e. go routines etc. ++func (mgr *CloudManager) Cleanup() error { ++ close(mgr.interruptCh) ++ return nil ++} ++ ++// Refresh is called before every main loop and can be used to dynamically update cloud provider state. ++// In particular the list of node groups returned by NodeGroups can change as a result of CloudProvider.Refresh(). ++func (mgr *CloudManager) Refresh() error { ++ mgr.cacheMu.Lock() ++ defer mgr.cacheMu.Unlock() ++ ++ if mgr.refreshedAt.Add(mgr.refreshInterval).After(time.Now()) { ++ return nil ++ } ++ return mgr.forceRefresh() ++} ++ ++func (mgr *CloudManager) forceRefresh() error { ++ mgr.regenerateCache() ++ mgr.refreshedAt = time.Now() ++ klog.V(2).Infof("Refreshed, next refresh after %v", mgr.refreshedAt.Add(mgr.refreshInterval)) ++ return nil ++} ++ ++func (mgr *CloudManager) regenerateCache() { ++ mgr.cache = make(map[string]*Group) ++ for _, group := range mgr.groups { ++ klog.V(4).Infof("Regenerating resource group information for %s", group.groupID) ++ if err := mgr.refreshGroupNodes(group); err != nil { ++ klog.Warningf("Could not retrieve nodes for group %s: %v", group.groupID, err) ++ } ++ } ++} ++ ++func (mgr *CloudManager) refreshGroupNodes(grp *Group) error { ++ in := &aws.StatusGroupInput{ ++ GroupID: spotinst.String(grp.Id()), ++ } ++ status, err := mgr.groupService.CloudProviderAWS().Status(context.Background(), in) ++ if err != nil { ++ return err ++ } ++ for _, instance := range status.Instances { ++ if instance.ID != nil { ++ instanceID := spotinst.StringValue(instance.ID) ++ klog.V(8).Infof("Managing AWS instance with ID %s in group %s", instanceID, grp.Id()) ++ mgr.cache[instanceID] = grp ++ } ++ } ++ return nil ++} ++ ++type groupTemplate struct { ++ InstanceType *instanceType ++ Region string ++ Zone string ++ Tags []*aws.Tag ++} ++ ++func (mgr *CloudManager) inferInstanceType(instanceTypeName string) *instanceType { ++ ret := &instanceType{ ++ InstanceType: instanceTypeName, ++ VCPU: 1, ++ MemoryMb: 1024, // 1GB ++ GPU: 0, ++ } ++ size := 1 ++ if strings.HasSuffix(instanceTypeName, ".medium") || strings.HasSuffix(instanceTypeName, ".large") { ++ size = 1 ++ } else if strings.HasSuffix(instanceTypeName, ".xlarge") { ++ size = 2 ++ } else { ++ elems := strings.Split(instanceTypeName, ".") ++ if len(elems) > 1 { ++ nums := strings.Split(elems[1], "xlarge") ++ if len(nums) > 0 { ++ if num, err := strconv.Atoi(nums[0]); err == nil { ++ size = num * 2 ++ } ++ } ++ } ++ } ++ ret.VCPU = 2 * int64(size) ++ ret.MemoryMb = 1024 * 2 * ret.VCPU ++ if strings.HasPrefix(instanceTypeName, "g") || strings.HasPrefix(instanceTypeName, "p") { ++ ret.GPU = int64(size / 4) ++ if ret.GPU <= 0 { ++ ret.GPU = 1 ++ } ++ } ++ return ret ++} ++ ++func (mgr *CloudManager) buildGroupTemplate(groupID string) (*groupTemplate, error) { ++ klog.Infof("Building template for group %s", groupID) ++ ++ group, err := mgr.getResourceForGroup(groupID) ++ if err != nil { ++ return nil, err ++ } ++ ++ if len(group.Compute.AvailabilityZones) < 1 { ++ return nil, fmt.Errorf("unable to get first AvailabilityZone for %s", groupID) ++ } ++ ++ zone := spotinst.StringValue(group.Compute.AvailabilityZones[0].Name) ++ region := zone[0 : len(zone)-1] ++ ++ if len(group.Compute.AvailabilityZones) > 1 { ++ klog.Warningf("Found multiple availability zones, using %s", zone) ++ } ++ ++ instanceTypeName := spotinst.StringValue(group.Compute.InstanceTypes.OnDemand) ++ foundInstanceType := InstanceTypes[instanceTypeName] ++ if foundInstanceType == nil { ++ klog.Warningf("Unable to get node template info for instance type %s", instanceTypeName) ++ foundInstanceType = mgr.inferInstanceType(instanceTypeName) ++ } ++ ++ tmpl := &groupTemplate{ ++ InstanceType: foundInstanceType, ++ Region: region, ++ Zone: zone, ++ Tags: group.Compute.LaunchSpecification.Tags, ++ } ++ ++ return tmpl, nil ++} ++ ++func (mgr *CloudManager) buildNodeFromTemplate(group *Group, template *groupTemplate) (*apiv1.Node, error) { ++ klog.Infof("Building node from template of group %s", group.Id()) ++ ++ node := apiv1.Node{} ++ nodeName := fmt.Sprintf("%s-group-%d", group.groupID, rand.Int63()) ++ ++ node.ObjectMeta = metav1.ObjectMeta{ ++ Name: nodeName, ++ SelfLink: fmt.Sprintf("/api/v1/nodes/%s", nodeName), ++ Labels: map[string]string{}, ++ } ++ ++ node.Status = apiv1.NodeStatus{ ++ Capacity: apiv1.ResourceList{}, ++ } ++ ++ node.Status.Capacity[apiv1.ResourcePods] = *resource.NewQuantity(110, resource.DecimalSI) ++ node.Status.Capacity[apiv1.ResourceCPU] = *resource.NewQuantity(template.InstanceType.VCPU, resource.DecimalSI) ++ node.Status.Capacity[apiv1.ResourceMemory] = *resource.NewQuantity(template.InstanceType.MemoryMb*1024*1024, resource.DecimalSI) ++ node.Status.Capacity[gpu.ResourceNvidiaGPU] = *resource.NewQuantity(template.InstanceType.GPU, resource.DecimalSI) ++ node.Status.Capacity[gpumemory.ResourceVisenzeGPUMemory] = *resource.NewQuantity(template.InstanceType.GPUMemory, resource.DecimalSI) ++ node.Status.Capacity[mpscontext.ResourceVisenzeMPSContext] = *resource.NewQuantity(template.InstanceType.MPSContext, resource.DecimalSI) ++ node.Status.Allocatable = node.Status.Capacity ++ ++ // NodeLabels ++ node.Labels = cloudprovider.JoinStringMaps(node.Labels, extractLabelsFromGroup(template.Tags)) ++ ++ // GenericLabels ++ node.Labels = cloudprovider.JoinStringMaps(node.Labels, buildGenericLabels(template, nodeName)) ++ ++ node.Spec.Taints = extractTaintsFromGroup(template.Tags) ++ node.Status.Conditions = cloudprovider.BuildReadyConditions() ++ ++ klog.V(4).Infof("Node `%s` labels: %s", nodeName, stringutil.Stringify(node.Labels)) ++ klog.V(4).Infof("Node `%s` taints: %s", nodeName, stringutil.Stringify(node.Spec.Taints)) ++ ++ return &node, nil ++} ++ ++func buildGenericLabels(template *groupTemplate, nodeName string) map[string]string { ++ result := make(map[string]string) ++ ++ result[kubeletapis.LabelArch] = cloudprovider.DefaultArch ++ result[kubeletapis.LabelOS] = cloudprovider.DefaultOS ++ result[apiv1.LabelInstanceType] = template.InstanceType.InstanceType ++ result[apiv1.LabelZoneRegion] = template.Region ++ result[apiv1.LabelZoneFailureDomain] = template.Zone ++ result[apiv1.LabelHostname] = nodeName ++ ++ return result ++} ++ ++func extractLabelsFromGroup(tags []*aws.Tag) map[string]string { ++ result := make(map[string]string) ++ ++ for _, tag := range tags { ++ k := *tag.Key ++ v := *tag.Value ++ splits := strings.Split(k, "k8s.io/cluster-autoscaler/node-template/label/") ++ if len(splits) > 1 { ++ label := splits[1] ++ if label != "" { ++ result[label] = v ++ } ++ } ++ } ++ ++ return result ++} ++ ++func extractTaintsFromGroup(tags []*aws.Tag) []apiv1.Taint { ++ taints := make([]apiv1.Taint, 0) ++ ++ for _, tag := range tags { ++ k := *tag.Key ++ v := *tag.Value ++ splits := strings.Split(k, "k8s.io/cluster-autoscaler/node-template/taint/") ++ if len(splits) > 1 { ++ values := strings.SplitN(v, ":", 2) ++ taints = append(taints, apiv1.Taint{ ++ Key: splits[1], ++ Value: values[0], ++ Effect: apiv1.TaintEffect(values[1]), ++ }) ++ } ++ } ++ ++ return taints ++} +\ No newline at end of file +diff --git a/cluster-autoscaler/cloudprovider/spotinst/spotinst_node_group.go b/cluster-autoscaler/cloudprovider/spotinst/spotinst_node_group.go +new file mode 100644 +index 000000000..8eb1efd0f +--- /dev/null ++++ b/cluster-autoscaler/cloudprovider/spotinst/spotinst_node_group.go +@@ -0,0 +1,232 @@ ++/* ++Copyright 2016 The Kubernetes Authors. ++Licensed under the Apache License, Version 2.0 (the "License"); ++you may not use this file except in compliance with the License. ++You may obtain a copy of the License at ++ http://www.apache.org/licenses/LICENSE-2.0 ++Unless required by applicable law or agreed to in writing, software ++distributed under the License is distributed on an "AS IS" BASIS, ++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++See the License for the specific language governing permissions and ++limitations under the License. ++*/ ++ ++package spotinst ++ ++import ( ++ "context" ++ "errors" ++ "fmt" ++ "regexp" ++ "strings" ++ ++ "k8s.io/klog/v2" ++ ++ "github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws" ++ "github.com/spotinst/spotinst-sdk-go/spotinst" ++ apiv1 "k8s.io/api/core/v1" ++ "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" ++ "k8s.io/autoscaler/cluster-autoscaler/config" ++ schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ++) ++ ++// Group declaration of spotinst nodegroup ++type Group struct { ++ manager *CloudManager ++ group *aws.Group ++ groupID string ++ minSize int ++ maxSize int ++} ++ ++// MaxSize returns maximum size of the node group. ++func (grp *Group) MaxSize() int { ++ return grp.maxSize ++} ++ ++// MinSize returns minimum size of the node group. ++func (grp *Group) MinSize() int { ++ return grp.minSize ++} ++ ++// TargetSize returns the current target size of the node group. ++func (grp *Group) TargetSize() (int, error) { ++ size, err := grp.manager.GetGroupSize(grp) ++ return int(size), err ++} ++ ++// IncreaseSize increases the size of the node group. ++func (grp *Group) IncreaseSize(delta int) error { ++ if delta <= 0 { ++ return errors.New("size increase must be positive") ++ } ++ size, err := grp.manager.GetGroupSize(grp) ++ if err != nil { ++ return err ++ } ++ if int(size)+delta > grp.MaxSize() { ++ return fmt.Errorf("size increase too large - desired:%d max:%d", int(size)+delta, grp.MaxSize()) ++ } ++ return grp.manager.SetGroupSize(grp, size+int64(delta)) ++} ++ ++// DeleteNodes deletes nodes from this node group. ++func (grp *Group) DeleteNodes(nodes []*apiv1.Node) error { ++ size, err := grp.manager.GetGroupSize(grp) ++ if err != nil { ++ return fmt.Errorf("error when deleting nodes, retrieving size of group %s failed: %v", grp.Id(), err) ++ } ++ if int(size) <= grp.MinSize() { ++ return errors.New("min size reached, nodes will not be deleted") ++ } ++ toBeDeleted := make([]string, 0) ++ for _, node := range nodes { ++ belongs, err := grp.Belongs(node) ++ if err != nil { ++ return fmt.Errorf("failed to check membership of node %s in group %s: %v", node.Name, grp.Id(), err) ++ } ++ if !belongs { ++ return fmt.Errorf("%s belongs to a different group than %s", node.Name, grp.Id()) ++ } ++ instanceID, err := extractInstanceId(node.Spec.ProviderID) ++ if err != nil { ++ return fmt.Errorf("node %s's cloud provider ID is malformed: %v", node.Name, err) ++ } ++ toBeDeleted = append(toBeDeleted, instanceID) ++ } ++ return grp.manager.DeleteInstances(toBeDeleted) ++} ++ ++// DecreaseTargetSize decreases the target size of the node group. ++func (grp *Group) DecreaseTargetSize(delta int) error { ++ if delta >= 0 { ++ return errors.New("size decrease size must be negative") ++ } ++ size, err := grp.manager.GetGroupSize(grp) ++ if err != nil { ++ return err ++ } ++ nodes, err := grp.Nodes() ++ if err != nil { ++ return err ++ } ++ if int(size)+delta < len(nodes) { ++ return fmt.Errorf("size decrease too large - desired:%d existing:%d", int(size)+delta, len(nodes)) ++ } ++ return grp.manager.SetGroupSize(grp, size+int64(delta)) ++} ++ ++// Id returns an unique identifier of the node group. ++func (grp *Group) Id() string { ++ return grp.groupID ++} ++ ++// Debug returns a string containing all information regarding this node group. ++func (grp *Group) Debug() string { ++ return fmt.Sprintf("%s (%d:%d)", grp.Id(), grp.MinSize(), grp.MaxSize()) ++} ++ ++// Nodes returns a list of all nodes that belong to this node group. ++func (grp *Group) Nodes() ([]cloudprovider.Instance, error) { ++ in := &aws.StatusGroupInput{ ++ GroupID: spotinst.String(grp.Id()), ++ } ++ status, err := grp.manager.groupService.CloudProviderAWS().Status(context.Background(), in) ++ if err != nil { ++ return []cloudprovider.Instance{}, err ++ } ++ out := make([]cloudprovider.Instance, 0) ++ for _, instance := range status.Instances { ++ if instance.ID != nil && instance.AvailabilityZone != nil { ++ out = append(out, cloudprovider.Instance{Id: fmt.Sprintf("aws:///%s/%s", *instance.AvailabilityZone, *instance.ID)}) ++ } ++ } ++ return out, nil ++} ++ ++// TemplateNodeInfo returns a node template for this node group. ++func (grp *Group) TemplateNodeInfo() (*schedulerframework.NodeInfo, error) { ++ klog.Infof("No working nodes in node group %s, trying to generate from template", grp.Id()) ++ ++ template, err := grp.manager.buildGroupTemplate(grp.Id()) ++ if err != nil { ++ return nil, err ++ } ++ ++ node, err := grp.manager.buildNodeFromTemplate(grp, template) ++ if err != nil { ++ return nil, err ++ } ++ ++ nodeInfo := schedulerframework.NewNodeInfo(cloudprovider.BuildKubeProxy(grp.Id())) ++ nodeInfo.SetNode(node) ++ return nodeInfo, nil ++} ++ ++// Belongs returns true if the given node belongs to the NodeGroup. ++func (grp *Group) Belongs(node *apiv1.Node) (bool, error) { ++ instanceID, err := extractInstanceId(node.Spec.ProviderID) ++ if err != nil { ++ return false, err ++ } ++ group, err := grp.manager.GetGroupForInstance(instanceID) ++ if err != nil { ++ return false, err ++ } ++ if group == nil { ++ return false, fmt.Errorf("%s does not belong to a known group", node.Name) ++ } ++ return true, nil ++} ++ ++// Exist checks if the node group really exists on the cloud provider side. ++func (grp *Group) Exist() bool { ++ return true ++} ++ ++// Create creates the node group on the cloud provider side. ++func (grp *Group) Create() (cloudprovider.NodeGroup, error) { ++ return nil, cloudprovider.ErrAlreadyExist ++} ++ ++// Delete deletes the node group on the cloud provider side. ++func (grp *Group) Delete() error { ++ return cloudprovider.ErrNotImplemented ++} ++ ++// Autoprovisioned returns true if the node group is autoprovisioned. ++func (grp *Group) Autoprovisioned() bool { ++ return false ++} ++ ++func (grp *Group) GetOptions(defaults config.NodeGroupAutoscalingOptions) (*config.NodeGroupAutoscalingOptions, error) { ++ return nil, cloudprovider.ErrNotImplemented ++} ++ ++var ( ++ spotinstProviderRE = regexp.MustCompile(`^spotinst\:\/\/\/[-0-9a-z]*\/[-0-9a-z]*$`) ++ awsProviderRE = regexp.MustCompile(`^aws\:\/\/\/[-0-9a-z]*\/[-0-9a-z]*$`) ++) ++ ++func extractInstanceId(providerID string) (string, error) { ++ var prefix string ++ ++ if spotinstProviderRE.FindStringSubmatch(providerID) != nil { ++ prefix = "spotinst:///" ++ } ++ ++ if awsProviderRE.FindStringSubmatch(providerID) != nil { ++ prefix = "aws:///" ++ } ++ ++ if prefix == "" { ++ return "", fmt.Errorf("expected node provider ID to be one of the "+ ++ "forms `spotinst:////` or `aws:////`, got `%s`", providerID) ++ } ++ ++ parts := strings.Split(providerID[len(prefix):], "/") ++ instanceID := parts[1] ++ ++ klog.V(8).Infof("Instance ID `%s` extracted from provider `%s`", instanceID, providerID) ++ return instanceID, nil ++} +\ No newline at end of file +diff --git a/cluster-autoscaler/scripts/gpu-deploy-tmpl.yaml b/cluster-autoscaler/scripts/gpu-deploy-tmpl.yaml +new file mode 100644 +index 000000000..1055cc07d +--- /dev/null ++++ b/cluster-autoscaler/scripts/gpu-deploy-tmpl.yaml +@@ -0,0 +1,26 @@ ++apiVersion: apps/v1 ++kind: Deployment ++metadata: ++ name: gpu-pod ++ labels: ++ app: gpu-pod ++spec: ++ replicas: 0 ++ selector: ++ matchLabels: ++ app: gpu-pod ++ template: ++ metadata: ++ labels: ++ app: gpu-pod ++ spec: ++ nodeSelector: ++ visenze.component: search ++ containers: ++ - name: digits-container ++ image: nvcr.io/nvidia/digits:20.12-tensorflow-py3 ++ command: ++ - sleep ++ - infinity ++ resources: ++ limits: +\ No newline at end of file +diff --git a/cluster-autoscaler/scripts/test-ca.sh b/cluster-autoscaler/scripts/test-ca.sh +new file mode 100644 +index 000000000..6eb3df670 +--- /dev/null ++++ b/cluster-autoscaler/scripts/test-ca.sh +@@ -0,0 +1,74 @@ ++DEPLOY_NAME="gpu-pod" ++ ++function check() { ++ local target=$1 ++ local is_match="false" ++ ++ for i in $(seq 500) ++ do ++ AVAILABLE_REPLICA=$(kubectl get deploy ${DEPLOY_NAME} | awk '{print $4}' | tail -n 1) ++ NODES=$(kubectl get nodes -l visenze.component=search,visenze.gpu=true -o json | jq '.items | length') ++ if [ ${AVAILABLE_REPLICA} -eq ${target} ] && [ ${NODES} -eq ${target} ] ++ then ++ is_match="true" ++ break ++ fi ++ sleep 5 ++ done ++ echo ${is_match} ++} ++ ++function scale() { ++ kubectl scale --replicas=$1 deployment/${DEPLOY_NAME} ++} ++ ++function scale_and_check() { ++ local target=$1 ++ echo "Scaling to ${target}, checking..." ++ scale ${target} ++ if [ $(check ${target}) = "false" ] ++ then ++ echo "Scaling to ${target} doesn't work" ++ exit 1 ++ else ++ echo "Scaling to ${target} succeed" ++ fi ++} ++ ++function update_resource_limit() { ++ cp gpu-deploy-tmpl.yaml gpu-deploy-tmp.yaml ++ local append_txt="" ++ case $1 in ++ gpu_num) ++ append_txt=" nvidia.com/gpu: 1" ++ ;; ++ ++ gpu_memory) ++ append_txt=" visenze.com/nvidia-gpu-memory: 8988051968" ++ ;; ++ ++ mps_context) ++ append_txt=" visenze.com/nvidia-mps-context: 18" ++ ;; ++ ++ # *) ++ # STATEMENTS ++ # ;; ++ esac ++ echo "$append_txt" >> gpu-deploy-tmp.yaml ++ kubectl apply -f gpu-deploy-tmp.yaml ++} ++ ++function test_with_resource() { ++ echo "check the resource $1" ++ update_resource_limit $1 ++ scale_and_check 1 ++ scale_and_check 2 ++ scale_and_check 1 ++ scale_and_check 0 ++} ++ ++ ++test_with_resource "gpu_num" ++test_with_resource "gpu_memory" ++test_with_resource "mps_context" +\ No newline at end of file +diff --git a/cluster-autoscaler/utils/gpumemory/gpumemory.go b/cluster-autoscaler/utils/gpumemory/gpumemory.go +new file mode 100644 +index 000000000..7d357c10c +--- /dev/null ++++ b/cluster-autoscaler/utils/gpumemory/gpumemory.go +@@ -0,0 +1,41 @@ ++package gpumemory ++ ++import ( ++ apiv1 "k8s.io/api/core/v1" ++ "k8s.io/apimachinery/pkg/api/resource" ++) ++ ++const ( ++ // ResourceVisenzeGPUMemory is the name of the GPU Memory resource ++ ResourceVisenzeGPUMemory = "visenze.com/nvidia-gpu-memory" ++ // GPULabel is the label added to nodes with GPU resource by Visenze. ++ // If you're not scaling - this is probably the problem! ++ GPULabel = "accelerator" ++) ++ ++// NodeHasGpuMemory returns true if a given node has GPU hardware ++func NodeHasGpuMemory(node *apiv1.Node) bool { ++ _, hasGpuLabel := node.Labels[GPULabel] ++ gpuAllocatable, hasGpuAllocatable := node.Status.Allocatable[ResourceVisenzeGPUMemory] ++ return hasGpuLabel || (hasGpuAllocatable && !gpuAllocatable.IsZero()) ++} ++ ++// PodRequestsGpuMemory returns true if a given pod has GPU Memory request ++func PodRequestsGpuMemory(pod *apiv1.Pod) bool { ++ for _, container := range pod.Spec.Containers { ++ if container.Resources.Requests != nil { ++ _, gpuMemoryFound := container.Resources.Requests[ResourceVisenzeGPUMemory] ++ if gpuMemoryFound { ++ return true ++ } ++ } ++ } ++ return false ++} ++ ++// RequestInfo gives some information about hwo much GPU memory is needed ++type RequestInfo struct { ++ MaximumMemory resource.Quantity ++ TotalMemory resource.Quantity ++ Pods []*apiv1.Pod ++} +\ No newline at end of file +diff --git a/cluster-autoscaler/utils/gpumemory/gpumemory_test.go b/cluster-autoscaler/utils/gpumemory/gpumemory_test.go +new file mode 100644 +index 000000000..14507cf51 +--- /dev/null ++++ b/cluster-autoscaler/utils/gpumemory/gpumemory_test.go +@@ -0,0 +1,83 @@ ++package gpumemory ++ ++import ( ++ "testing" ++ ++ "github.com/stretchr/testify/assert" ++ apiv1 "k8s.io/api/core/v1" ++ "k8s.io/apimachinery/pkg/api/resource" ++ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ++) ++ ++func TestNodeHasGpuMemory(t *testing.T) { ++ gpuLabels := map[string]string{ ++ GPULabel: "nvidia-tesla-k80", ++ } ++ nodeGpuReady := &apiv1.Node{ ++ ObjectMeta: metav1.ObjectMeta{ ++ Name: "nodeGpuReady", ++ Labels: gpuLabels, ++ }, ++ Status: apiv1.NodeStatus{ ++ Capacity: apiv1.ResourceList{}, ++ Allocatable: apiv1.ResourceList{}, ++ }, ++ } ++ nodeGpuReady.Status.Allocatable[ResourceVisenzeGPUMemory] = *resource.NewQuantity(8e9, resource.DecimalSI) ++ nodeGpuReady.Status.Capacity[ResourceVisenzeGPUMemory] = *resource.NewQuantity(8e9, resource.DecimalSI) ++ assert.True(t, NodeHasGpuMemory(nodeGpuReady)) ++ ++ nodeGpuUnready := &apiv1.Node{ ++ ObjectMeta: metav1.ObjectMeta{ ++ Name: "nodeGpuUnready", ++ Labels: gpuLabels, ++ }, ++ Status: apiv1.NodeStatus{ ++ Capacity: apiv1.ResourceList{}, ++ Allocatable: apiv1.ResourceList{}, ++ }, ++ } ++ assert.True(t, NodeHasGpuMemory(nodeGpuUnready)) ++ ++ nodeNoGpu := &apiv1.Node{ ++ ObjectMeta: metav1.ObjectMeta{ ++ Name: "nodeNoGpu", ++ Labels: map[string]string{}, ++ }, ++ Status: apiv1.NodeStatus{ ++ Capacity: apiv1.ResourceList{}, ++ Allocatable: apiv1.ResourceList{}, ++ }, ++ } ++ assert.False(t, NodeHasGpuMemory(nodeNoGpu)) ++} ++ ++func TestPodRequestsGpuMemory(t *testing.T) { ++ podNoGpu := &apiv1.Pod{ ++ Spec: apiv1.PodSpec{ ++ Containers: []apiv1.Container{ ++ apiv1.Container{ ++ Resources: apiv1.ResourceRequirements{ ++ Requests: apiv1.ResourceList{ ++ apiv1.ResourceCPU: *resource.NewQuantity(1, resource.DecimalSI), ++ }, ++ }, ++ }, ++ }, ++ }, ++ } ++ podWithGpu := &apiv1.Pod{Spec: apiv1.PodSpec{Containers: []apiv1.Container{ ++ apiv1.Container{ ++ Resources: apiv1.ResourceRequirements{ ++ Requests: apiv1.ResourceList{ ++ apiv1.ResourceCPU: *resource.NewQuantity(1, resource.DecimalSI), ++ ResourceVisenzeGPUMemory: *resource.NewQuantity(1, resource.DecimalSI), ++ }, ++ }, ++ }, ++ }}} ++ podWithGpu.Spec.Containers[0].Resources.Requests[ResourceVisenzeGPUMemory] = *resource.NewQuantity(1, resource.DecimalSI) ++ ++ assert.False(t, PodRequestsGpuMemory(podNoGpu)) ++ assert.True(t, PodRequestsGpuMemory(podWithGpu)) ++} +\ No newline at end of file +diff --git a/cluster-autoscaler/utils/mpscontext/mpscontext.go b/cluster-autoscaler/utils/mpscontext/mpscontext.go +new file mode 100644 +index 000000000..22d9a6668 +--- /dev/null ++++ b/cluster-autoscaler/utils/mpscontext/mpscontext.go +@@ -0,0 +1,6 @@ ++package mpscontext ++ ++// Custom resource for NVIDIA MPS context ++const ( ++ ResourceVisenzeMPSContext = "visenze.com/nvidia-mps-context" ++) +\ No newline at end of file +diff --git a/cluster-autoscaler/visenze.md b/cluster-autoscaler/visenze.md +new file mode 100644 +index 000000000..e69de29bb