diff --git a/cluster-autoscaler/go.mod b/cluster-autoscaler/go.mod
index 5a7de87f9cae..94f7ba769faa 100644
--- a/cluster-autoscaler/go.mod
+++ b/cluster-autoscaler/go.mod
@@ -25,6 +25,7 @@ require (
github.com/prometheus/client_golang v1.12.1
github.com/satori/go.uuid v1.2.0
github.com/spf13/pflag v1.0.5
+ github.com/spotinst/spotinst-sdk-go v1.181.0
github.com/stretchr/testify v1.8.4
golang.org/x/crypto v0.11.0
golang.org/x/net v0.10.0
@@ -172,6 +173,7 @@ require (
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
+ gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
gopkg.in/warnings.v0 v0.1.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
diff --git a/cluster-autoscaler/go.sum b/cluster-autoscaler/go.sum
index 9101107f8cc9..1bc0cc959530 100644
--- a/cluster-autoscaler/go.sum
+++ b/cluster-autoscaler/go.sum
@@ -563,6 +563,8 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
+github.com/spotinst/spotinst-sdk-go v1.181.0 h1:y7PnxLCJ5nluVt3MSk4oAleShX6VZ61//z34refefC0=
+github.com/spotinst/spotinst-sdk-go v1.181.0/go.mod h1:XBphO/VlaxENgGAyx3x+oE/3c9Jq7/WKBynN3Xy3Qrg=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@@ -1109,6 +1111,8 @@ gopkg.in/gcfg.v1 v1.2.0 h1:0HIbH907iBTAntm+88IJV2qmJALDAh8sPekI9Vc1fm0=
gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
+gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/LICENSE b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/LICENSE
new file mode 100644
index 000000000000..261eeb9e9f8b
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/NOTICE.md b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/NOTICE.md
new file mode 100644
index 000000000000..918a63fbf6b2
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/NOTICE.md
@@ -0,0 +1,14 @@
+
diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/elastigroup.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/elastigroup.go
new file mode 100644
index 000000000000..d653e33c0cc9
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/elastigroup.go
@@ -0,0 +1,61 @@
+package elastigroup
+
+import (
+ "github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws"
+ "github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure"
+ azurev3 "github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/v3"
+ "github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gcp"
+ "github.com/spotinst/spotinst-sdk-go/spotinst"
+ "github.com/spotinst/spotinst-sdk-go/spotinst/client"
+ "github.com/spotinst/spotinst-sdk-go/spotinst/session"
+)
+
+// Service provides the API operation methods for making requests to endpoints
+// of the Spotinst API. See this package's package overview docs for details on
+// the service.
+type Service interface {
+ CloudProviderAWS() aws.Service
+ CloudProviderAzure() azure.Service
+ CloudProviderAzureV3() azurev3.Service
+ CloudProviderGCP() gcp.Service
+}
+
+type ServiceOp struct {
+ Client *client.Client
+}
+
+var _ Service = &ServiceOp{}
+
+func New(sess *session.Session, cfgs ...*spotinst.Config) *ServiceOp {
+ cfg := &spotinst.Config{}
+ cfg.Merge(sess.Config)
+ cfg.Merge(cfgs...)
+
+ return &ServiceOp{
+ Client: client.New(cfg),
+ }
+}
+
+func (s *ServiceOp) CloudProviderAWS() aws.Service {
+ return &aws.ServiceOp{
+ Client: s.Client,
+ }
+}
+
+func (s *ServiceOp) CloudProviderAzure() azure.Service {
+ return &azure.ServiceOp{
+ Client: s.Client,
+ }
+}
+
+func (s *ServiceOp) CloudProviderAzureV3() azurev3.Service {
+ return &azurev3.ServiceOp{
+ Client: s.Client,
+ }
+}
+
+func (s *ServiceOp) CloudProviderGCP() gcp.Service {
+ return &gcp.ServiceOp{
+ Client: s.Client,
+ }
+}
diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/aws.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/aws.go
new file mode 100644
index 000000000000..b813c65be26f
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/aws.go
@@ -0,0 +1,5504 @@
+package aws
+
+import (
+ "context"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+ "strconv"
+ "time"
+
+ "github.com/spotinst/spotinst-sdk-go/spotinst"
+ "github.com/spotinst/spotinst-sdk-go/spotinst/client"
+ "github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil"
+ "github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates"
+)
+
+// A Product represents the type of an operating system.
+type Product int
+
+const (
+ // ProductWindows represents the Windows product.
+ ProductWindows Product = iota
+
+ // ProductWindowsVPC represents the Windows (Amazon VPC) product.
+ ProductWindowsVPC
+
+ // ProductLinuxUnix represents the Linux/Unix product.
+ ProductLinuxUnix
+
+ // ProductLinuxUnixVPC represents the Linux/Unix (Amazon VPC) product.
+ ProductLinuxUnixVPC
+
+ // ProductSUSELinux represents the SUSE Linux product.
+ ProductSUSELinux
+
+ // ProductSUSELinuxVPC represents the SUSE Linux (Amazon VPC) product.
+ ProductSUSELinuxVPC
+)
+
+var ProductName = map[Product]string{
+ ProductWindows: "Windows",
+ ProductWindowsVPC: "Windows (Amazon VPC)",
+ ProductLinuxUnix: "Linux/UNIX",
+ ProductLinuxUnixVPC: "Linux/UNIX (Amazon VPC)",
+ ProductSUSELinux: "SUSE Linux",
+ ProductSUSELinuxVPC: "SUSE Linux (Amazon VPC)",
+}
+
+var ProductValue = map[string]Product{
+ "Windows": ProductWindows,
+ "Windows (Amazon VPC)": ProductWindowsVPC,
+ "Linux/UNIX": ProductLinuxUnix,
+ "Linux/UNIX (Amazon VPC)": ProductLinuxUnixVPC,
+ "SUSE Linux": ProductSUSELinux,
+ "SUSE Linux (Amazon VPC)": ProductSUSELinuxVPC,
+}
+
+func (p Product) String() string {
+ return ProductName[p]
+}
+
+type Group struct {
+ ID *string `json:"id,omitempty"`
+ Name *string `json:"name,omitempty"`
+ Description *string `json:"description,omitempty"`
+ Region *string `json:"region,omitempty"`
+ Capacity *Capacity `json:"capacity,omitempty"`
+ Compute *Compute `json:"compute,omitempty"`
+ Strategy *Strategy `json:"strategy,omitempty"`
+ Scaling *Scaling `json:"scaling,omitempty"`
+ Scheduling *Scheduling `json:"scheduling,omitempty"`
+ Integration *Integration `json:"thirdPartiesIntegration,omitempty"`
+
+ // Read-only fields.
+ CreatedAt *time.Time `json:"createdAt,omitempty"`
+ UpdatedAt *time.Time `json:"updatedAt,omitempty"`
+
+ // forceSendFields is a list of field names (e.g. "Keys") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ forceSendFields []string
+
+ // nullFields is a list of field names (e.g. "Keys") to include in API
+ // requests with the JSON null value. By default, fields with empty
+ // values are omitted from API requests. However, any field with an
+ // empty value appearing in NullFields will be sent to the server as
+ // null. It is an error if a field in this list has a non-empty value.
+ // This may be used to include null fields in Patch requests.
+ nullFields []string
+}
+
+type Integration struct {
+ EC2ContainerService *EC2ContainerServiceIntegration `json:"ecs,omitempty"`
+ ElasticBeanstalk *ElasticBeanstalkIntegration `json:"elasticBeanstalk,omitempty"`
+ CodeDeploy *CodeDeployIntegration `json:"codeDeploy,omitempty"`
+ OpsWorks *OpsWorksIntegration `json:"opsWorks,omitempty"`
+ Rancher *RancherIntegration `json:"rancher,omitempty"`
+ Kubernetes *KubernetesIntegration `json:"kubernetes,omitempty"`
+ Mesosphere *MesosphereIntegration `json:"mesosphere,omitempty"`
+ Multai *MultaiIntegration `json:"mlbRuntime,omitempty"`
+ Nomad *NomadIntegration `json:"nomad,omitempty"`
+ Chef *ChefIntegration `json:"chef,omitempty"`
+ Gitlab *GitlabIntegration `json:"gitlab,omitempty"`
+ Route53 *Route53Integration `json:"route53,omitempty"`
+ DockerSwarm *DockerSwarmIntegration `json:"dockerSwarm,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type InstanceHealth struct {
+ InstanceID *string `json:"instanceId,omitempty"`
+ SpotRequestID *string `json:"spotRequestId,omitempty"`
+ GroupID *string `json:"groupId,omitempty"`
+ AvailabilityZone *string `json:"availabilityZone,omitempty"`
+ LifeCycle *string `json:"lifeCycle,omitempty"`
+ HealthStatus *string `json:"healthStatus,omitempty"`
+}
+
+type AutoScale struct {
+ IsEnabled *bool `json:"isEnabled,omitempty"`
+ IsAutoConfig *bool `json:"isAutoConfig,omitempty"`
+ Cooldown *int `json:"cooldown,omitempty"`
+ Headroom *AutoScaleHeadroom `json:"headroom,omitempty"`
+ Down *AutoScaleDown `json:"down,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type AutoScaleECS struct {
+ AutoScale
+ Attributes []*AutoScaleAttributes `json:"attributes,omitempty"`
+ ShouldScaleDownNonServiceTasks *bool `json:"shouldScaleDownNonServiceTasks,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type AutoScaleKubernetes struct {
+ AutoScale
+ Labels []*AutoScaleLabel `json:"labels,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type AutoScaleNomad struct {
+ AutoScale
+ Constraints []*AutoScaleConstraint `json:"constraints,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type AutoScaleDockerSwarm struct {
+ AutoScale
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type AutoScaleHeadroom struct {
+ CPUPerUnit *int `json:"cpuPerUnit,omitempty"`
+ GPUPerUnit *int `json:"gpuPerUnit,omitempty"`
+ MemoryPerUnit *int `json:"memoryPerUnit,omitempty"`
+ NumOfUnits *int `json:"numOfUnits,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type AutoScaleDown struct {
+ EvaluationPeriods *int `json:"evaluationPeriods,omitempty"`
+ MaxScaleDownPercentage *float64 `json:"maxScaleDownPercentage,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type AutoScaleConstraint struct {
+ Key *string `json:"key,omitempty"`
+ Value *string `json:"value,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type AutoScaleLabel struct {
+ Key *string `json:"key,omitempty"`
+ Value *string `json:"value,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type AutoScaleAttributes struct {
+ Key *string `json:"key,omitempty"`
+ Value *string `json:"value,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type ElasticBeanstalkIntegration struct {
+ EnvironmentID *string `json:"environmentId,omitempty"`
+ ManagedActions *BeanstalkManagedActions `json:"managedActions,omitempty"`
+ DeploymentPreferences *BeanstalkDeploymentPreferences `json:"deploymentPreferences,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type BeanstalkManagedActions struct {
+ PlatformUpdate *BeanstalkPlatformUpdate `json:"platformUpdate,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type BeanstalkPlatformUpdate struct {
+ PerformAt *string `json:"performAt,omitempty"`
+ TimeWindow *string `json:"timeWindow,omitempty"`
+ UpdateLevel *string `json:"updateLevel,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type BeanstalkDeploymentPreferences struct {
+ AutomaticRoll *bool `json:"automaticRoll,omitempty"`
+ BatchSizePercentage *int `json:"batchSizePercentage,omitempty"`
+ GracePeriod *int `json:"gracePeriod,omitempty"`
+ Strategy *BeanstalkDeploymentStrategy `json:"strategy,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type BeanstalkDeploymentStrategy struct {
+ Action *string `json:"action,omitempty"`
+ ShouldDrainInstances *bool `json:"shouldDrainInstances,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type CodeDeployIntegration struct {
+ DeploymentGroups []*DeploymentGroup `json:"deploymentGroups,omitempty"`
+ CleanUpOnFailure *bool `json:"cleanUpOnFailure,omitempty"`
+ TerminateInstanceOnFailure *bool `json:"terminateInstanceOnFailure,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type DeploymentGroup struct {
+ ApplicationName *string `json:"applicationName,omitempty"`
+ DeploymentGroupName *string `json:"deploymentGroupName,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type OpsWorksIntegration struct {
+ LayerID *string `json:"layerId,omitempty"`
+ StackType *string `json:"stackType,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type RancherIntegration struct {
+ MasterHost *string `json:"masterHost,omitempty"`
+ AccessKey *string `json:"accessKey,omitempty"`
+ SecretKey *string `json:"secretKey,omitempty"`
+ Version *string `json:"version,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type EC2ContainerServiceIntegration struct {
+ ClusterName *string `json:"clusterName,omitempty"`
+ AutoScale *AutoScaleECS `json:"autoScale,omitempty"`
+ Batch *Batch `json:"batch,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Batch struct {
+ JobQueueNames []string `json:"jobQueueNames,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type KubernetesIntegration struct {
+ IntegrationMode *string `json:"integrationMode,omitempty"`
+ ClusterIdentifier *string `json:"clusterIdentifier,omitempty"`
+ Server *string `json:"apiServer,omitempty"`
+ Token *string `json:"token,omitempty"`
+ AutoScale *AutoScaleKubernetes `json:"autoScale,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type MesosphereIntegration struct {
+ Server *string `json:"apiServer,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type MultaiIntegration struct {
+ DeploymentID *string `json:"deploymentId,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type NomadIntegration struct {
+ MasterHost *string `json:"masterHost,omitempty"`
+ MasterPort *int `json:"masterPort,omitempty"`
+ ACLToken *string `json:"aclToken,omitempty"`
+ AutoScale *AutoScaleNomad `json:"autoScale,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type ChefIntegration struct {
+ Server *string `json:"chefServer,omitempty"`
+ Organization *string `json:"organization,omitempty"`
+ User *string `json:"user,omitempty"`
+ PEMKey *string `json:"pemKey,omitempty"`
+ Version *string `json:"chefVersion,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type DockerSwarmIntegration struct {
+ MasterHost *string `json:"masterHost,omitempty"`
+ MasterPort *int `json:"masterPort,omitempty"`
+ AutoScale *AutoScaleDockerSwarm `json:"autoScale,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Route53Integration struct {
+ Domains []*Domain `json:"domains,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Domain struct {
+ HostedZoneID *string `json:"hostedZoneId,omitempty"`
+ SpotinstAccountID *string `json:"spotinstAccountId,omitempty"`
+ RecordSetType *string `json:"recordSetType,omitempty"`
+ RecordSets []*RecordSet `json:"recordSets,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type RecordSet struct {
+ Name *string `json:"name,omitempty"`
+ UsePublicIP *bool `json:"usePublicIp,omitempty"`
+ UsePublicDNS *bool `json:"usePublicDns,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type GitlabIntegration struct {
+ Runner *GitlabRunner `json:"runner,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type GitlabRunner struct {
+ IsEnabled *bool `json:"isEnabled,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Scheduling struct {
+ Tasks []*Task `json:"tasks,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Task struct {
+ IsEnabled *bool `json:"isEnabled,omitempty"`
+ Type *string `json:"taskType,omitempty"`
+ Frequency *string `json:"frequency,omitempty"`
+ CronExpression *string `json:"cronExpression,omitempty"`
+ StartTime *string `json:"startTime,omitempty"`
+ ScaleTargetCapacity *int `json:"scaleTargetCapacity,omitempty"`
+ ScaleMinCapacity *int `json:"scaleMinCapacity,omitempty"`
+ ScaleMaxCapacity *int `json:"scaleMaxCapacity,omitempty"`
+ BatchSizePercentage *int `json:"batchSizePercentage,omitempty"`
+ GracePeriod *int `json:"gracePeriod,omitempty"`
+ TargetCapacity *int `json:"targetCapacity,omitempty"`
+ MinCapacity *int `json:"minCapacity,omitempty"`
+ MaxCapacity *int `json:"maxCapacity,omitempty"`
+ Adjustment *int `json:"adjustment,omitempty"`
+ AdjustmentPercentage *int `json:"adjustmentPercentage,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Scaling struct {
+ Up []*ScalingPolicy `json:"up,omitempty"`
+ Down []*ScalingPolicy `json:"down,omitempty"`
+ Target []*ScalingPolicy `json:"target,omitempty"`
+ MultipleMetrics *MultipleMetrics `json:"multipleMetrics,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type MultipleMetrics struct {
+ Metrics []*Metrics `json:"metrics,omitempty"`
+ Expressions []*Expressions `json:"expressions,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type ScalingPolicy struct {
+ PolicyName *string `json:"policyName,omitempty"`
+ MetricName *string `json:"metricName,omitempty"`
+ Namespace *string `json:"namespace,omitempty"`
+ Source *string `json:"source,omitempty"`
+ Statistic *string `json:"statistic,omitempty"`
+ Unit *string `json:"unit,omitempty"`
+ Threshold *float64 `json:"threshold,omitempty"`
+ Adjustment *int `json:"adjustment,omitempty"`
+ MinTargetCapacity *int `json:"minTargetCapacity,omitempty"`
+ MaxTargetCapacity *int `json:"maxTargetCapacity,omitempty"`
+ EvaluationPeriods *int `json:"evaluationPeriods,omitempty"`
+ Period *int `json:"period,omitempty"`
+ Cooldown *int `json:"cooldown,omitempty"`
+ Operator *string `json:"operator,omitempty"`
+ Dimensions []*Dimension `json:"dimensions,omitempty"`
+ Action *Action `json:"action,omitempty"`
+ Target *float64 `json:"target,omitempty"`
+ IsEnabled *bool `json:"isEnabled,omitempty"`
+ MaxCapacityPerScale *string `json:"maxCapacityPerScale,omitempty"`
+ Predictive *Predictive `json:"predictive,omitempty"`
+ StepAdjustments []*StepAdjustment `json:"stepAdjustments,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Metrics struct {
+ Name *string `json:"name,omitempty"`
+ MetricName *string `json:"metricName,omitempty"`
+ Namespace *string `json:"namespace,omitempty"`
+ Dimensions []*Dimension `json:"dimensions,omitempty"`
+ ExtendedStatistic *string `json:"extendedStatistic,omitempty"`
+ Statistic *string `json:"statistic,omitempty"`
+ Unit *string `json:"unit,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Expressions struct {
+ Expression *string `json:"expression,omitempty"`
+ Name *string `json:"name,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Action struct {
+ Type *string `json:"type,omitempty"`
+ Adjustment *string `json:"adjustment,omitempty"`
+ MinTargetCapacity *string `json:"minTargetCapacity,omitempty"`
+ MaxTargetCapacity *string `json:"maxTargetCapacity,omitempty"`
+ Maximum *string `json:"maximum,omitempty"`
+ Minimum *string `json:"minimum,omitempty"`
+ Target *string `json:"target,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Dimension struct {
+ Name *string `json:"name,omitempty"`
+ Value *string `json:"value,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Predictive struct {
+ Mode *string `json:"mode,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type StepAdjustment struct {
+ Action *Action `json:"action,omitempty"`
+ Threshold *int `json:"threshold,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Strategy struct {
+ Risk *float64 `json:"risk,omitempty"`
+ OnDemandCount *int `json:"onDemandCount,omitempty"`
+ DrainingTimeout *int `json:"drainingTimeout,omitempty"`
+ AvailabilityVsCost *string `json:"availabilityVsCost,omitempty"`
+ LifetimePeriod *string `json:"lifetimePeriod,omitempty"`
+ UtilizeReservedInstances *bool `json:"utilizeReservedInstances,omitempty"`
+ FallbackToOnDemand *bool `json:"fallbackToOd,omitempty"`
+ SpinUpTime *int `json:"spinUpTime,omitempty"`
+ Signals []*Signal `json:"signals,omitempty"`
+ Persistence *Persistence `json:"persistence,omitempty"`
+ RevertToSpot *RevertToSpot `json:"revertToSpot,omitempty"`
+ ScalingStrategy *ScalingStrategy `json:"scalingStrategy,omitempty"`
+ UtilizeCommitments *bool `json:"utilizeCommitments,omitempty"`
+ MinimumInstanceLifetime *int `json:"minimumInstanceLifetime,omitempty"`
+ ConsiderODPricing *bool `json:"considerODPricing,omitempty"`
+ ImmediateODRecoverThreshold *int `json:"immediateODRecoverThreshold,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Persistence struct {
+ ShouldPersistPrivateIP *bool `json:"shouldPersistPrivateIp,omitempty"`
+ ShouldPersistBlockDevices *bool `json:"shouldPersistBlockDevices,omitempty"`
+ ShouldPersistRootDevice *bool `json:"shouldPersistRootDevice,omitempty"`
+ BlockDevicesMode *string `json:"blockDevicesMode,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type RevertToSpot struct {
+ PerformAt *string `json:"performAt,omitempty"`
+ TimeWindows []string `json:"timeWindows,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type ScalingStrategy struct {
+ TerminateAtEndOfBillingHour *bool `json:"terminateAtEndOfBillingHour,omitempty"`
+ TerminationPolicy *string `json:"terminationPolicy,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Signal struct {
+ Name *string `json:"name,omitempty"`
+ Timeout *int `json:"timeout,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Capacity struct {
+ Minimum *int `json:"minimum,omitempty"`
+ Maximum *int `json:"maximum,omitempty"`
+ Target *int `json:"target,omitempty"`
+ Unit *string `json:"unit,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Compute struct {
+ Product *string `json:"product,omitempty"`
+ InstanceTypes *InstanceTypes `json:"instanceTypes,omitempty"`
+ LaunchSpecification *LaunchSpecification `json:"launchSpecification,omitempty"`
+ AvailabilityZones []*AvailabilityZone `json:"availabilityZones,omitempty"`
+ PreferredAvailabilityZones []string `json:"preferredAvailabilityZones,omitempty"`
+ ElasticIPs []string `json:"elasticIps,omitempty"`
+ EBSVolumePool []*EBSVolume `json:"ebsVolumePool,omitempty"`
+ PrivateIPs []string `json:"privateIps,omitempty"`
+ SubnetIDs []string `json:"subnetIds,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type EBSVolume struct {
+ DeviceName *string `json:"deviceName,omitempty"`
+ VolumeIDs []string `json:"volumeIds,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type InstanceTypes struct {
+ OnDemand *string `json:"ondemand,omitempty"`
+ Spot []string `json:"spot,omitempty"`
+ PreferredSpot []string `json:"preferredSpot,omitempty"`
+ Weights []*InstanceTypeWeight `json:"weights,omitempty"`
+ OnDemandTypes []string `json:"onDemandTypes,omitempty"`
+ ResourceRequirements *ResourceRequirements `json:"resourceRequirements,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type InstanceTypeWeight struct {
+ InstanceType *string `json:"instanceType,omitempty"`
+ Weight *int `json:"weightedCapacity,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type ResourceRequirements struct {
+ ExcludedInstanceFamilies []string `json:"excludedInstanceFamilies,omitempty"`
+ ExcludedInstanceGenerations []string `json:"excludedInstanceGenerations,omitempty"`
+ ExcludedInstanceTypes []string `json:"excludedInstanceTypes,omitempty"`
+ RequiredGpu *RequiredGpu `json:"requiredGpu,omitempty"`
+ RequiredMemory *RequiredMemory `json:"requiredMemory,omitempty"`
+ RequiredVCpu *RequiredVCpu `json:"requiredVCpu,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type RequiredGpu struct {
+ Maximum *int `json:"maximum,omitempty"`
+ Minimum *int `json:"minimum,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type RequiredMemory struct {
+ Maximum *int `json:"maximum,omitempty"`
+ Minimum *int `json:"minimum,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type RequiredVCpu struct {
+ Maximum *int `json:"maximum,omitempty"`
+ Minimum *int `json:"minimum,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type AvailabilityZone struct {
+ Name *string `json:"name,omitempty"`
+ SubnetID *string `json:"subnetId,omitempty"`
+ PlacementGroupName *string `json:"placementGroupName,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type LaunchSpecification struct {
+ LoadBalancerNames []string `json:"loadBalancerNames,omitempty"`
+ LoadBalancersConfig *LoadBalancersConfig `json:"loadBalancersConfig,omitempty"`
+ SecurityGroupIDs []string `json:"securityGroupIds,omitempty"`
+ HealthCheckType *string `json:"healthCheckType,omitempty"`
+ HealthCheckGracePeriod *int `json:"healthCheckGracePeriod,omitempty"`
+ HealthCheckUnhealthyDurationBeforeReplacement *int `json:"healthCheckUnhealthyDurationBeforeReplacement,omitempty"`
+ Images []*Image `json:"images,omitempty"`
+ ImageID *string `json:"imageId,omitempty"`
+ KeyPair *string `json:"keyPair,omitempty"`
+ UserData *string `json:"userData,omitempty"`
+ ShutdownScript *string `json:"shutdownScript,omitempty"`
+ Tenancy *string `json:"tenancy,omitempty"`
+ Monitoring *bool `json:"monitoring,omitempty"`
+ EBSOptimized *bool `json:"ebsOptimized,omitempty"`
+ IAMInstanceProfile *IAMInstanceProfile `json:"iamRole,omitempty"`
+ CreditSpecification *CreditSpecification `json:"creditSpecification,omitempty"`
+ BlockDeviceMappings []*BlockDeviceMapping `json:"blockDeviceMappings,omitempty"`
+ NetworkInterfaces []*NetworkInterface `json:"networkInterfaces,omitempty"`
+ Tags []*Tag `json:"tags,omitempty"`
+ MetadataOptions *MetadataOptions `json:"metadataOptions,omitempty"`
+ CPUOptions *CPUOptions `json:"cpuOptions,omitempty"`
+ ResourceTagSpecification *ResourceTagSpecification `json:"resourceTagSpecification,omitempty"`
+ ITF *ITF `json:"itf,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type ITF struct {
+ LoadBalancers []*ITFLoadBalancer `json:"loadBalancers,omitempty"`
+ MigrationHealthinessThreshold *int `json:"migrationHealthinessThreshold,omitempty"`
+ FixedTargetGroups *bool `json:"fixedTargetGroups,omitempty"`
+ WeightStrategy *string `json:"weightStrategy,omitempty"`
+ TargetGroupConfig *TargetGroupConfig `json:"targetGroupConfig,omitempty"`
+ DefaultStaticTargetGroups []*StaticTargetGroup `json:"defaultStaticTargetGroups,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type ITFLoadBalancer struct {
+ ListenerRules []*ListenerRule `json:"listenerRules,omitempty"`
+ LoadBalancerARN *string `json:"loadBalancerArn,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type ListenerRule struct {
+ RuleARN *string `json:"ruleArn,omitempty"`
+ StaticTargetGroups []*StaticTargetGroup `json:"staticTargetGroups,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type StaticTargetGroup struct {
+ StaticTargetGroupARN *string `json:"arn,omitempty"`
+ Percentage *float64 `json:"percentage,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type TargetGroupConfig struct {
+ VPCID *string `json:"vpcId,omitempty"`
+ HealthCheckIntervalSeconds *int `json:"healthCheckIntervalSeconds,omitempty"`
+ HealthCheckPath *string `json:"healthCheckPath,omitempty"`
+ HealthCheckPort *string `json:"healthCheckPort,omitempty"`
+ HealthCheckProtocol *string `json:"healthCheckProtocol,omitempty"`
+ HealthCheckTimeoutSeconds *int `json:"healthCheckTimeoutSeconds,omitempty"`
+ HealthyThresholdCount *int `json:"healthyThresholdCount,omitempty"`
+ UnhealthyThresholdCount *int `json:"unhealthyThresholdCount,omitempty"`
+ Port *int `json:"port,omitempty"`
+ Protocol *string `json:"protocol,omitempty"`
+ ProtocolVersion *string `json:"protocolVersion,omitempty"`
+ Matcher *Matcher `json:"matcher,omitempty"`
+ Tags []*Tag `json:"tags,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Matcher struct {
+ HTTPCode *string `json:"httpCode,omitempty"`
+ GRPCCode *string `json:"grpcCode,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type MetadataOptions struct {
+ HTTPTokens *string `json:"httpTokens,omitempty"`
+ HTTPPutResponseHopLimit *int `json:"httpPutResponseHopLimit,omitempty"`
+ InstanceMetadataTags *string `json:"instanceMetadataTags,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type CPUOptions struct {
+ ThreadsPerCore *int `json:"threadsPerCore,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type ResourceTagSpecification struct {
+ Volumes *Volumes `json:"volumes,omitempty"`
+ Snapshots *Snapshots `json:"snapshots,omitempty"`
+ ENIs *ENIs `json:"enis,omitempty"`
+ AMIs *AMIs `json:"amis,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Volumes struct {
+ ShouldTag *bool `json:"shouldTag,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Snapshots struct {
+ ShouldTag *bool `json:"shouldTag,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type ENIs struct {
+ ShouldTag *bool `json:"shouldTag,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type AMIs struct {
+ ShouldTag *bool `json:"shouldTag,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Image struct {
+ Id *string `json:"id,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type LoadBalancersConfig struct {
+ LoadBalancers []*LoadBalancer `json:"loadBalancers,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type LoadBalancer struct {
+ Name *string `json:"name,omitempty"`
+ Arn *string `json:"arn,omitempty"`
+ Type *string `json:"type,omitempty"`
+ BalancerID *string `json:"balancerId,omitempty"`
+ TargetSetID *string `json:"targetSetId,omitempty"`
+ ZoneAwareness *bool `json:"azAwareness,omitempty"`
+ AutoWeight *bool `json:"autoWeight,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type NetworkInterface struct {
+ ID *string `json:"networkInterfaceId,omitempty"`
+ Description *string `json:"description,omitempty"`
+ DeviceIndex *int `json:"deviceIndex,omitempty"`
+ SecondaryPrivateIPAddressCount *int `json:"secondaryPrivateIpAddressCount,omitempty"`
+ AssociatePublicIPAddress *bool `json:"associatePublicIpAddress,omitempty"`
+ AssociateIPV6Address *bool `json:"associateIpv6Address,omitempty"`
+ DeleteOnTermination *bool `json:"deleteOnTermination,omitempty"`
+ SecurityGroupsIDs []string `json:"groups,omitempty"`
+ PrivateIPAddress *string `json:"privateIpAddress,omitempty"`
+ SubnetID *string `json:"subnetId,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type BlockDeviceMapping struct {
+ DeviceName *string `json:"deviceName,omitempty"`
+ VirtualName *string `json:"virtualName,omitempty"`
+ EBS *EBS `json:"ebs,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type EBS struct {
+ DeleteOnTermination *bool `json:"deleteOnTermination,omitempty"`
+ Encrypted *bool `json:"encrypted,omitempty"`
+ KmsKeyId *string `json:"kmsKeyId,omitempty"`
+ SnapshotID *string `json:"snapshotId,omitempty"`
+ VolumeType *string `json:"volumeType,omitempty"`
+ VolumeSize *int `json:"volumeSize,omitempty"`
+ IOPS *int `json:"iops,omitempty"`
+ Throughput *int `json:"throughput,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type IAMInstanceProfile struct {
+ Name *string `json:"name,omitempty"`
+ Arn *string `json:"arn,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type CreditSpecification struct {
+ CPUCredits *string `json:"cpuCredits,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Instance struct {
+ ID *string `json:"instanceId,omitempty"`
+ SpotRequestID *string `json:"spotInstanceRequestId,omitempty"`
+ InstanceType *string `json:"instanceType,omitempty"`
+ Status *string `json:"status,omitempty"`
+ Product *string `json:"product,omitempty"`
+ AvailabilityZone *string `json:"availabilityZone,omitempty"`
+ PrivateIP *string `json:"privateIp,omitempty"`
+ PublicIP *string `json:"publicIp,omitempty"`
+ CreatedAt *time.Time `json:"createdAt,omitempty"`
+ IPv6Address *string `json:"ipv6Address,omitempty"`
+}
+
+type RollStrategy struct {
+ Action *string `json:"action,omitempty"`
+ ShouldDrainInstances *bool `json:"shouldDrainInstances,omitempty"`
+ BatchMinHealthyPercentage *int `json:"batchMinHealthyPercentage,omitempty"`
+ OnFailure *OnFailure `json:"onFailure,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type OnFailure struct {
+ ActionType *string `json:"actionType,omitempty"`
+ ShouldHandleAllBatches *bool `json:"shouldHandleAllBatches,omitempty"`
+ BatchNum *int `json:"batchNum,omitempty"`
+ DrainingTimeout *int `json:"drainingTimeout,omitempty"`
+ ShouldDecrementTargetCapacity *bool `json:"shouldDecrementTargetCapacity,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type StatefulDeallocation struct {
+ ShouldDeleteImages *bool `json:"shouldDeleteImages,omitempty"`
+ ShouldDeleteNetworkInterfaces *bool `json:"shouldDeleteNetworkInterfaces,omitempty"`
+ ShouldDeleteVolumes *bool `json:"shouldDeleteVolumes,omitempty"`
+ ShouldDeleteSnapshots *bool `json:"shouldDeleteSnapshots,omitempty"`
+}
+
+type GetInstanceHealthinessInput struct {
+ GroupID *string `json:"groupId,omitempty"`
+}
+
+type GetInstanceHealthinessOutput struct {
+ Instances []*InstanceHealth `json:"instances,omitempty"`
+}
+
+type GetGroupEventsInput struct {
+ GroupID *string `json:"groupId,omitempty"`
+ FromDate *string `json:"fromDate,omitempty"`
+}
+
+type GetGroupEventsOutput struct {
+ GroupEvents []*GroupEvent `json:"groupEvents,omitempty"`
+}
+
+type GroupEvent struct {
+ GroupID *string `json:"groupId,omitempty"`
+ EventType *string `json:"eventType,omitempty"`
+ CreatedAt *string `json:"createdAt,omitempty"`
+ SubEvents []*SubEvent `json:"subEvents,omitempty"`
+}
+
+type SubEvent struct {
+ // common fields
+ Type *string `json:"type,omitempty"`
+
+ // type scaleUp
+ NewSpots []*Spot `json:"newSpots,omitempty"`
+ NewInstances []*NewInstance `json:"newInstances,omitempty"`
+
+ // type scaleDown
+ TerminatedSpots []*Spot `json:"terminatedSpots,omitempty"`
+ TerminatedInstances []*TerminatedInstance `json:"terminatedInstances,omitempty"`
+
+ // type scaleReason
+ ScalingPolicyName *string `json:"scalingPolicyName,omitempty"`
+ Value *int `json:"value,omitempty"`
+ Unit *string `json:"unit,omitempty"`
+ Threshold *int `json:"threshold,omitempty"`
+
+ // type detachedInstance
+ InstanceID *string `json:"instanceId,omitempty"`
+
+ // type unhealthyInstances
+ InstanceIDs []*string `json:"instanceIds,omitempty"`
+
+ // type rollInfo
+ ID *string `json:"id,omitempty"`
+ GroupID *string `json:"groupId,omitempty"`
+ CurrentBatch *int `json:"currentBatch,omitempty"`
+ Status *string `json:"status,omitempty"`
+ CreatedAt *string `json:"createdAt,omitempty"`
+ NumberOfBatches *int `json:"numOfBatches,omitempty"`
+ GracePeriod *int `json:"gracePeriod,omitempty"`
+
+ // type recoverInstances
+ OldSpotRequestIDs []*string `json:"oldSpotRequestIDs,omitempty"`
+ NewSpotRequestIDs []*string `json:"newSpotRequestIDs,omitempty"`
+ OldInstanceIDs []*string `json:"oldInstanceIDs,omitempty"`
+ NewInstanceIDs []*string `json:"newInstanceIDs,omitempty"`
+}
+
+type Spot struct {
+ SpotInstanceRequestID *string `json:"spotInstanceRequestId,omitempty"`
+}
+
+type NewInstance struct {
+}
+
+type TerminatedInstance struct {
+}
+
+type StatefulInstance struct {
+ StatefulInstanceID *string `json:"id,omitempty"`
+ InstanceID *string `json:"instanceId,omitempty"`
+ State *string `json:"state,omitempty"`
+ PrivateIP *string `json:"privateIp,omitempty"`
+ ImageID *string `json:"imageId,omitempty"`
+ Devices []*Device `json:"devices,omitempty"`
+ CreatedAt *string `json:"createdAt,omitempty"`
+ LaunchedAt *string `json:"launchedAt,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Device struct {
+ DeviceName *string `json:"deviceName,omitempty"`
+ VolumeID *string `json:"volumeId,omitempty"`
+ SnapshotID *string `json:"snapshotId,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type ListGroupsInput struct{}
+
+type ListGroupsOutput struct {
+ Groups []*Group `json:"groups,omitempty"`
+}
+
+type CreateGroupInput struct {
+ Group *Group `json:"group,omitempty"`
+}
+
+type CreateGroupOutput struct {
+ Group *Group `json:"group,omitempty"`
+}
+
+type ReadGroupInput struct {
+ GroupID *string `json:"groupId,omitempty"`
+}
+
+type ReadGroupOutput struct {
+ Group *Group `json:"group,omitempty"`
+}
+
+type UpdateGroupInput struct {
+ Group *Group `json:"group,omitempty"`
+ ShouldResumeStateful *bool `json:"-"`
+ AutoApplyTags *bool `json:"-"`
+}
+
+type UpdateGroupOutput struct {
+ Group *Group `json:"group,omitempty"`
+}
+
+type DeleteGroupInput struct {
+ GroupID *string `json:"groupId,omitempty"`
+ StatefulDeallocation *StatefulDeallocation `json:"statefulDeallocation,omitempty"`
+}
+
+type DeleteGroupOutput struct{}
+
+type StatusGroupInput struct {
+ GroupID *string `json:"groupId,omitempty"`
+}
+
+type StatusGroupOutput struct {
+ Instances []*Instance `json:"instances,omitempty"`
+}
+
+type DetachGroupInput struct {
+ GroupID *string `json:"groupId,omitempty"`
+ InstanceIDs []string `json:"instancesToDetach,omitempty"`
+ ShouldDecrementTargetCapacity *bool `json:"shouldDecrementTargetCapacity,omitempty"`
+ ShouldTerminateInstances *bool `json:"shouldTerminateInstances,omitempty"`
+ DrainingTimeout *int `json:"drainingTimeout,omitempty"`
+}
+
+type DetachGroupOutput struct{}
+
+type DeploymentStatusInput struct {
+ GroupID *string `json:"groupId,omitempty"`
+ RollID *string `json:"id,omitempty"`
+}
+
+type Roll struct {
+ Status *string `json:"status,omitempty"`
+}
+
+type RollGroupInput struct {
+ GroupID *string `json:"groupId,omitempty"`
+ BatchSizePercentage *int `json:"batchSizePercentage,omitempty"`
+ GracePeriod *int `json:"gracePeriod,omitempty"`
+ HealthCheckType *string `json:"healthCheckType,omitempty"`
+ Strategy *RollStrategy `json:"strategy,omitempty"`
+}
+
+type RollECSGroupInput struct {
+ GroupID *string `json:"groupId,omitempty"`
+ Roll *RollECSWrapper `json:"roll,omitempty"`
+}
+
+type RollECSWrapper struct {
+ BatchSizePercentage *int `json:"batchSizePercentage,omitempty"`
+ Comment *string `json:"comment,omitempty"`
+}
+
+type RollGroupOutput struct {
+ RollGroupStatus []*RollGroupStatus `json:"groupDeploymentStatus,omitempty"`
+}
+
+type RollGroupStatus struct {
+ RollID *string `json:"id,omitempty"`
+ RollStatus *string `json:"status,omitempty"`
+ Progress *Progress `json:"progress,omitempty"`
+ CreatedAt *string `json:"createdAt,omitempty"`
+ UpdatedAt *string `json:"updatedAt,omitempty"`
+}
+
+type Progress struct {
+ Unit *string `json:"unit,omitempty"`
+ Value *float64 `json:"value,omitempty"`
+}
+
+type StopDeploymentInput struct {
+ GroupID *string `json:"groupId,omitempty"`
+ RollID *string `json:"id,omitempty"`
+ Roll *Roll `json:"roll,omitempty"`
+}
+
+type StopDeploymentOutput struct{}
+
+type ListStatefulInstancesInput struct {
+ GroupID *string `json:"groupId,omitempty"`
+}
+
+type ListStatefulInstancesOutput struct {
+ StatefulInstances []*StatefulInstance `json:"statefulInstances,omitempty"`
+}
+
+type PauseStatefulInstanceInput struct {
+ GroupID *string `json:"groupId,omitempty"`
+ StatefulInstanceID *string `json:"statefulInstanceId,omitempty"`
+}
+
+type PauseStatefulInstanceOutput struct{}
+
+type ResumeStatefulInstanceInput struct {
+ GroupID *string `json:"groupId,omitempty"`
+ StatefulInstanceID *string `json:"statefulInstanceId,omitempty"`
+}
+
+type ResumeStatefulInstanceOutput struct{}
+
+type RecycleStatefulInstanceInput struct {
+ GroupID *string `json:"groupId,omitempty"`
+ StatefulInstanceID *string `json:"statefulInstanceId,omitempty"`
+}
+
+type RecycleStatefulInstanceOutput struct{}
+
+type DeallocateStatefulInstanceInput struct {
+ GroupID *string `json:"groupId,omitempty"`
+ StatefulInstanceID *string `json:"statefulInstanceId,omitempty"`
+}
+
+type DeallocateStatefulInstanceOutput struct{}
+
+func deploymentStatusFromJSON(in []byte) (*RollGroupStatus, error) {
+ b := new(RollGroupStatus)
+ if err := json.Unmarshal(in, b); err != nil {
+ return nil, err
+ }
+ return b, nil
+}
+
+func deploymentStatusesFromJSON(in []byte) ([]*RollGroupStatus, error) {
+ var rw client.Response
+ if err := json.Unmarshal(in, &rw); err != nil {
+ return nil, err
+ }
+ out := make([]*RollGroupStatus, len(rw.Response.Items))
+ if len(out) == 0 {
+ return out, nil
+ }
+ for i, rb := range rw.Response.Items {
+ b, err := deploymentStatusFromJSON(rb)
+ if err != nil {
+ return nil, err
+ }
+ out[i] = b
+ }
+ return out, nil
+}
+
+func deploymentStatusFromHttpResponse(resp *http.Response) ([]*RollGroupStatus, error) {
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+ return deploymentStatusesFromJSON(body)
+}
+
+func groupFromJSON(in []byte) (*Group, error) {
+ b := new(Group)
+ if err := json.Unmarshal(in, b); err != nil {
+ return nil, err
+ }
+ return b, nil
+}
+
+func groupsFromJSON(in []byte) ([]*Group, error) {
+ var rw client.Response
+ if err := json.Unmarshal(in, &rw); err != nil {
+ return nil, err
+ }
+ out := make([]*Group, len(rw.Response.Items))
+ if len(out) == 0 {
+ return out, nil
+ }
+ for i, rb := range rw.Response.Items {
+ b, err := groupFromJSON(rb)
+ if err != nil {
+ return nil, err
+ }
+ out[i] = b
+ }
+ return out, nil
+}
+
+func groupsFromHttpResponse(resp *http.Response) ([]*Group, error) {
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+ return groupsFromJSON(body)
+}
+
+func instanceFromJSON(in []byte) (*Instance, error) {
+ b := new(Instance)
+ if err := json.Unmarshal(in, b); err != nil {
+ return nil, err
+ }
+ return b, nil
+}
+
+func instancesFromJSON(in []byte) ([]*Instance, error) {
+ var rw client.Response
+ if err := json.Unmarshal(in, &rw); err != nil {
+ return nil, err
+ }
+ out := make([]*Instance, len(rw.Response.Items))
+ if len(out) == 0 {
+ return out, nil
+ }
+ for i, rb := range rw.Response.Items {
+ b, err := instanceFromJSON(rb)
+ if err != nil {
+ return nil, err
+ }
+ out[i] = b
+ }
+ return out, nil
+}
+
+func instancesFromHttpResponse(resp *http.Response) ([]*Instance, error) {
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+ return instancesFromJSON(body)
+}
+
+func instanceHealthFromJSON(in []byte) (*InstanceHealth, error) {
+ b := new(InstanceHealth)
+ if err := json.Unmarshal(in, b); err != nil {
+ return nil, err
+ }
+ return b, nil
+}
+
+func listOfInstanceHealthFromJSON(in []byte) ([]*InstanceHealth, error) {
+ var rw client.Response
+ if err := json.Unmarshal(in, &rw); err != nil {
+ return nil, err
+ }
+ out := make([]*InstanceHealth, len(rw.Response.Items))
+ if len(out) == 0 {
+ return out, nil
+ }
+ for i, rb := range rw.Response.Items {
+ b, err := instanceHealthFromJSON(rb)
+ if err != nil {
+ return nil, err
+ }
+ out[i] = b
+ }
+ return out, nil
+}
+
+func listOfInstanceHealthFromHttp(resp *http.Response) ([]*InstanceHealth, error) {
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+ return listOfInstanceHealthFromJSON(body)
+}
+
+func groupEventFromJSON(in []byte) (*GroupEvent, error) {
+ b := new(GroupEvent)
+ if err := json.Unmarshal(in, b); err != nil {
+ return nil, err
+ }
+ return b, nil
+}
+
+func groupEventsFromJSON(in []byte) ([]*GroupEvent, error) {
+ var rw client.Response
+ if err := json.Unmarshal(in, &rw); err != nil {
+ return nil, err
+ }
+ out := make([]*GroupEvent, len(rw.Response.Items))
+ if len(out) == 0 {
+ return out, nil
+ }
+ for i, rb := range rw.Response.Items {
+ b, err := groupEventFromJSON(rb)
+ if err != nil {
+ return nil, err
+ }
+ out[i] = b
+ }
+ return out, nil
+}
+
+func groupEventsFromHttpResponse(resp *http.Response) ([]*GroupEvent, error) {
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+ return groupEventsFromJSON(body)
+}
+
+func StatefulInstanceFromJSON(in []byte) (*StatefulInstance, error) {
+ b := new(StatefulInstance)
+ if err := json.Unmarshal(in, b); err != nil {
+ return nil, err
+ }
+ return b, nil
+}
+
+func statefulInstancesFromJSON(in []byte) ([]*StatefulInstance, error) {
+ var rw client.Response
+ if err := json.Unmarshal(in, &rw); err != nil {
+ return nil, err
+ }
+ out := make([]*StatefulInstance, len(rw.Response.Items))
+ if len(out) == 0 {
+ return out, nil
+ }
+ for i, rb := range rw.Response.Items {
+ b, err := StatefulInstanceFromJSON(rb)
+ if err != nil {
+ return nil, err
+ }
+ out[i] = b
+ }
+ return out, nil
+}
+
+func statefulInstancesFromHttpResponse(resp *http.Response) ([]*StatefulInstance, error) {
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+ return statefulInstancesFromJSON(body)
+}
+
+func (s *ServiceOp) List(ctx context.Context, input *ListGroupsInput) (*ListGroupsOutput, error) {
+ r := client.NewRequest(http.MethodGet, "/aws/ec2/group")
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ gs, err := groupsFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ return &ListGroupsOutput{Groups: gs}, nil
+}
+
+func (s *ServiceOp) Create(ctx context.Context, input *CreateGroupInput) (*CreateGroupOutput, error) {
+ r := client.NewRequest(http.MethodPost, "/aws/ec2/group")
+ r.Obj = input
+
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ gs, err := groupsFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ output := new(CreateGroupOutput)
+ if len(gs) > 0 {
+ output.Group = gs[0]
+ }
+
+ return output, nil
+}
+
+func (s *ServiceOp) Read(ctx context.Context, input *ReadGroupInput) (*ReadGroupOutput, error) {
+ path, err := uritemplates.Expand("/aws/ec2/group/{groupId}", uritemplates.Values{
+ "groupId": spotinst.StringValue(input.GroupID),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ r := client.NewRequest(http.MethodGet, path)
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ gs, err := groupsFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ output := new(ReadGroupOutput)
+ if len(gs) > 0 {
+ output.Group = gs[0]
+ }
+
+ return output, nil
+}
+
+func (s *ServiceOp) Update(ctx context.Context, input *UpdateGroupInput) (*UpdateGroupOutput, error) {
+ path, err := uritemplates.Expand("/aws/ec2/group/{groupId}", uritemplates.Values{
+ "groupId": spotinst.StringValue(input.Group.ID),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // We do NOT need the ID anymore, so let's drop it.
+ input.Group.ID = nil
+
+ r := client.NewRequest(http.MethodPut, path)
+ r.Obj = input
+
+ if input.ShouldResumeStateful != nil {
+ r.Params.Set("shouldResumeStateful",
+ strconv.FormatBool(spotinst.BoolValue(input.ShouldResumeStateful)))
+ }
+
+ if input.AutoApplyTags != nil {
+ r.Params.Set("autoApplyTags",
+ strconv.FormatBool(spotinst.BoolValue(input.AutoApplyTags)))
+ }
+
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ gs, err := groupsFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ output := new(UpdateGroupOutput)
+ if len(gs) > 0 {
+ output.Group = gs[0]
+ }
+
+ return output, nil
+}
+
+func (s *ServiceOp) Delete(ctx context.Context, input *DeleteGroupInput) (*DeleteGroupOutput, error) {
+ path, err := uritemplates.Expand("/aws/ec2/group/{groupId}", uritemplates.Values{
+ "groupId": spotinst.StringValue(input.GroupID),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ r := client.NewRequest(http.MethodDelete, path)
+
+ if input.StatefulDeallocation != nil {
+ r.Obj = &DeleteGroupInput{
+ StatefulDeallocation: input.StatefulDeallocation,
+ }
+ }
+
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ return &DeleteGroupOutput{}, nil
+}
+
+func (s *ServiceOp) Status(ctx context.Context, input *StatusGroupInput) (*StatusGroupOutput, error) {
+ path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/status", uritemplates.Values{
+ "groupId": spotinst.StringValue(input.GroupID),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ r := client.NewRequest(http.MethodGet, path)
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ is, err := instancesFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ return &StatusGroupOutput{Instances: is}, nil
+}
+
+func (s *ServiceOp) DeploymentStatus(ctx context.Context, input *DeploymentStatusInput) (*RollGroupOutput, error) {
+ path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/roll/{rollId}", uritemplates.Values{
+ "groupId": spotinst.StringValue(input.GroupID),
+ "rollId": spotinst.StringValue(input.RollID),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ r := client.NewRequest(http.MethodGet, path)
+
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ deployments, err := deploymentStatusFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ return &RollGroupOutput{deployments}, nil
+}
+
+func (s *ServiceOp) DeploymentStatusECS(ctx context.Context, input *DeploymentStatusInput) (*RollGroupOutput, error) {
+ path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/clusterRoll/{rollId}", uritemplates.Values{
+ "groupId": spotinst.StringValue(input.GroupID),
+ "rollId": spotinst.StringValue(input.RollID),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ r := client.NewRequest(http.MethodGet, path)
+
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ deployments, err := deploymentStatusFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ return &RollGroupOutput{deployments}, nil
+}
+
+func (s *ServiceOp) StopDeployment(ctx context.Context, input *StopDeploymentInput) (*StopDeploymentOutput, error) {
+ path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/roll/{rollId}", uritemplates.Values{
+ "groupId": spotinst.StringValue(input.GroupID),
+ "rollId": spotinst.StringValue(input.RollID),
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ input.GroupID = nil
+ input.RollID = nil
+
+ r := client.NewRequest(http.MethodPut, path)
+ input.Roll = &Roll{
+ Status: spotinst.String("STOPPED"),
+ }
+ r.Obj = input
+
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ return &StopDeploymentOutput{}, nil
+}
+
+func (s *ServiceOp) Detach(ctx context.Context, input *DetachGroupInput) (*DetachGroupOutput, error) {
+ path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/detachInstances", uritemplates.Values{
+ "groupId": spotinst.StringValue(input.GroupID),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // We do not need the ID anymore so let's drop it.
+ input.GroupID = nil
+
+ r := client.NewRequest(http.MethodPut, path)
+ r.Obj = input
+
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ return &DetachGroupOutput{}, nil
+}
+
+func (s *ServiceOp) Roll(ctx context.Context, input *RollGroupInput) (*RollGroupOutput, error) {
+ path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/roll", uritemplates.Values{
+ "groupId": spotinst.StringValue(input.GroupID),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // We do not need the ID anymore so let's drop it.
+ input.GroupID = nil
+
+ r := client.NewRequest(http.MethodPut, path)
+ r.Obj = input
+
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ deployments, err := deploymentStatusFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ return &RollGroupOutput{deployments}, nil
+}
+
+func (s *ServiceOp) RollECS(ctx context.Context, input *RollECSGroupInput) (*RollGroupOutput, error) {
+ path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/clusterRoll", uritemplates.Values{
+ "groupId": spotinst.StringValue(input.GroupID),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // We do not need the ID anymore so let's drop it.
+ input.GroupID = nil
+
+ r := client.NewRequest(http.MethodPost, path)
+ r.Obj = input
+
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ deployments, err := deploymentStatusFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ return &RollGroupOutput{deployments}, nil
+}
+
+func (s *ServiceOp) GetInstanceHealthiness(ctx context.Context, input *GetInstanceHealthinessInput) (*GetInstanceHealthinessOutput, error) {
+ path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/instanceHealthiness", uritemplates.Values{
+ "groupId": spotinst.StringValue(input.GroupID),
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ r := client.NewRequest(http.MethodGet, path)
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ instances, err := listOfInstanceHealthFromHttp(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ return &GetInstanceHealthinessOutput{Instances: instances}, nil
+}
+
+func (s *ServiceOp) GetGroupEvents(ctx context.Context, input *GetGroupEventsInput) (*GetGroupEventsOutput, error) {
+ path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/events", uritemplates.Values{
+ "groupId": spotinst.StringValue(input.GroupID),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ r := client.NewRequest(http.MethodGet, path)
+ if input.FromDate != nil {
+ r.Params.Set("fromDate", *input.FromDate)
+ }
+ r.Obj = input
+
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ events, err := groupEventsFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+ return &GetGroupEventsOutput{GroupEvents: events}, nil
+}
+
+func (s *ServiceOp) ListStatefulInstances(ctx context.Context, input *ListStatefulInstancesInput) (*ListStatefulInstancesOutput, error) {
+ path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/statefulInstance", uritemplates.Values{
+ "groupId": spotinst.StringValue(input.GroupID),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // We do not need the group ID anymore so let's drop it.
+ input.GroupID = nil
+
+ r := client.NewRequest(http.MethodGet, path)
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ statefulInstances, err := statefulInstancesFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ return &ListStatefulInstancesOutput{StatefulInstances: statefulInstances}, nil
+}
+
+func (s *ServiceOp) PauseStatefulInstance(ctx context.Context, input *PauseStatefulInstanceInput) (*PauseStatefulInstanceOutput, error) {
+ path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/statefulInstance/{statefulInstanceId}/pause", uritemplates.Values{
+ "groupId": spotinst.StringValue(input.GroupID),
+ "statefulInstanceId": spotinst.StringValue(input.StatefulInstanceID),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ r := client.NewRequest(http.MethodPut, path)
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ return &PauseStatefulInstanceOutput{}, nil
+}
+
+func (s *ServiceOp) ResumeStatefulInstance(ctx context.Context, input *ResumeStatefulInstanceInput) (*ResumeStatefulInstanceOutput, error) {
+ path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/statefulInstance/{statefulInstanceId}/resume", uritemplates.Values{
+ "groupId": spotinst.StringValue(input.GroupID),
+ "statefulInstanceId": spotinst.StringValue(input.StatefulInstanceID),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ r := client.NewRequest(http.MethodPut, path)
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ return &ResumeStatefulInstanceOutput{}, nil
+}
+
+func (s *ServiceOp) RecycleStatefulInstance(ctx context.Context, input *RecycleStatefulInstanceInput) (*RecycleStatefulInstanceOutput, error) {
+ path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/statefulInstance/{statefulInstanceId}/recycle", uritemplates.Values{
+ "groupId": spotinst.StringValue(input.GroupID),
+ "statefulInstanceId": spotinst.StringValue(input.StatefulInstanceID),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ r := client.NewRequest(http.MethodPut, path)
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ return &RecycleStatefulInstanceOutput{}, nil
+}
+
+func (s *ServiceOp) DeallocateStatefulInstance(ctx context.Context, input *DeallocateStatefulInstanceInput) (*DeallocateStatefulInstanceOutput, error) {
+ path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/statefulInstance/{statefulInstanceId}/deallocate", uritemplates.Values{
+ "groupId": spotinst.StringValue(input.GroupID),
+ "statefulInstanceId": spotinst.StringValue(input.StatefulInstanceID),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ r := client.NewRequest(http.MethodPut, path)
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ return &DeallocateStatefulInstanceOutput{}, nil
+}
+
+// region Elastic Beanstalk
+
+type ImportBeanstalkInput struct {
+ EnvironmentId *string `json:"environmentId,omitempty"`
+ EnvironmentName *string `json:"environmentName,omitempty"`
+ Region *string `json:"region,omitempty"`
+}
+
+type ImportBeanstalkOutput struct {
+ Group *Group `json:"group,omitempty"`
+}
+
+type BeanstalkMaintenanceInput struct {
+ GroupID *string `json:"groupId,omitempty"`
+}
+
+type BeanstalkMaintenanceItem struct {
+ Status *string `json:"status,omitempty"`
+}
+
+type BeanstalkMaintenanceOutput struct {
+ Items []*BeanstalkMaintenanceItem `json:"items,omitempty"`
+ Status *string `json:"status,omitempty"`
+}
+
+func beanstalkMaintResponseFromJSON(in []byte) (*BeanstalkMaintenanceOutput, error) {
+ var rw client.Response
+ if err := json.Unmarshal(in, &rw); err != nil {
+ return nil, err
+ }
+
+ var retVal BeanstalkMaintenanceOutput
+ retVal.Items = make([]*BeanstalkMaintenanceItem, len(rw.Response.Items))
+ for i, rb := range rw.Response.Items {
+ b, err := beanstalkMaintItemFromJSON(rb)
+ if err != nil {
+ return nil, err
+ }
+ retVal.Items[i] = b
+ retVal.Status = b.Status
+ }
+ return &retVal, nil
+}
+
+func beanstalkMaintItemFromJSON(in []byte) (*BeanstalkMaintenanceItem, error) {
+ var rw *BeanstalkMaintenanceItem
+ if err := json.Unmarshal(in, &rw); err != nil {
+ return nil, err
+ }
+ return rw, nil
+}
+
+func beanstalkMaintFromHttpResponse(resp *http.Response) (*BeanstalkMaintenanceOutput, error) {
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+ return beanstalkMaintResponseFromJSON(body)
+}
+
+func (s *ServiceOp) ImportBeanstalkEnv(ctx context.Context, input *ImportBeanstalkInput) (*ImportBeanstalkOutput, error) {
+ path := "/aws/ec2/group/beanstalk/import"
+ r := client.NewRequest(http.MethodGet, path)
+
+ if input.EnvironmentId != nil {
+ r.Params["environmentId"] = []string{spotinst.StringValue(input.EnvironmentId)}
+ } else if input.EnvironmentName != nil {
+ r.Params["environmentName"] = []string{spotinst.StringValue(input.EnvironmentName)}
+ }
+
+ r.Params["region"] = []string{spotinst.StringValue(input.Region)}
+
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ gs, err := groupsFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ output := new(ImportBeanstalkOutput)
+ if len(gs) > 0 {
+ output.Group = gs[0]
+ }
+
+ return output, nil
+}
+
+func (s *ServiceOp) StartBeanstalkMaintenance(ctx context.Context, input *BeanstalkMaintenanceInput) (*BeanstalkMaintenanceOutput, error) {
+ path, err := uritemplates.Expand("/aws/ec2/group/{groupID}/beanstalk/maintenance/start", uritemplates.Values{
+ "groupID": spotinst.StringValue(input.GroupID),
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ r := client.NewRequest(http.MethodPut, path)
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ return &BeanstalkMaintenanceOutput{}, nil
+}
+
+func (s *ServiceOp) GetBeanstalkMaintenanceStatus(ctx context.Context, input *BeanstalkMaintenanceInput) (*string, error) {
+ path, err := uritemplates.Expand("/aws/ec2/group/{groupID}/beanstalk/maintenance/status", uritemplates.Values{
+ "groupID": spotinst.StringValue(input.GroupID),
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ r := client.NewRequest(http.MethodGet, path)
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ output, err := beanstalkMaintFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ return output.Status, nil
+}
+
+func (s *ServiceOp) FinishBeanstalkMaintenance(ctx context.Context, input *BeanstalkMaintenanceInput) (*BeanstalkMaintenanceOutput, error) {
+ path, err := uritemplates.Expand("/aws/ec2/group/{groupID}/beanstalk/maintenance/finish", uritemplates.Values{
+ "groupID": spotinst.StringValue(input.GroupID),
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ r := client.NewRequest(http.MethodPut, path)
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ return &BeanstalkMaintenanceOutput{}, nil
+}
+
+// endregion
+
+// region Group
+
+func (o Group) MarshalJSON() ([]byte, error) {
+ type noMethod Group
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Group) SetId(v *string) *Group {
+ if o.ID = v; o.ID == nil {
+ o.nullFields = append(o.nullFields, "ID")
+ }
+ return o
+}
+
+func (o *Group) SetName(v *string) *Group {
+ if o.Name = v; o.Name == nil {
+ o.nullFields = append(o.nullFields, "Name")
+ }
+ return o
+}
+
+func (o *Group) SetDescription(v *string) *Group {
+ if o.Description = v; o.Description == nil {
+ o.nullFields = append(o.nullFields, "Description")
+ }
+ return o
+}
+
+func (o *Group) SetCapacity(v *Capacity) *Group {
+ if o.Capacity = v; o.Capacity == nil {
+ o.nullFields = append(o.nullFields, "Capacity")
+ }
+ return o
+}
+
+func (o *Group) SetCompute(v *Compute) *Group {
+ if o.Compute = v; o.Compute == nil {
+ o.nullFields = append(o.nullFields, "Compute")
+ }
+ return o
+}
+
+func (o *Group) SetStrategy(v *Strategy) *Group {
+ if o.Strategy = v; o.Strategy == nil {
+ o.nullFields = append(o.nullFields, "Strategy")
+ }
+ return o
+}
+
+func (o *Group) SetScaling(v *Scaling) *Group {
+ if o.Scaling = v; o.Scaling == nil {
+ o.nullFields = append(o.nullFields, "Scaling")
+ }
+ return o
+}
+
+func (o *Group) SetScheduling(v *Scheduling) *Group {
+ if o.Scheduling = v; o.Scheduling == nil {
+ o.nullFields = append(o.nullFields, "Scheduling")
+ }
+ return o
+}
+
+func (o *Group) SetIntegration(v *Integration) *Group {
+ if o.Integration = v; o.Integration == nil {
+ o.nullFields = append(o.nullFields, "Integration")
+ }
+ return o
+}
+
+func (o *Group) SetRegion(v *string) *Group {
+ if o.Region = v; o.Region == nil {
+ o.nullFields = append(o.nullFields, "Region")
+ }
+ return o
+}
+
+// endregion
+
+// region Integration
+
+func (o Integration) MarshalJSON() ([]byte, error) {
+ type noMethod Integration
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Integration) SetRoute53(v *Route53Integration) *Integration {
+ if o.Route53 = v; o.Route53 == nil {
+ o.nullFields = append(o.nullFields, "Route53")
+ }
+ return o
+}
+
+func (o *Integration) SetDockerSwarm(v *DockerSwarmIntegration) *Integration {
+ if o.DockerSwarm = v; o.DockerSwarm == nil {
+ o.nullFields = append(o.nullFields, "DockerSwarm")
+ }
+ return o
+}
+
+func (o *Integration) SetEC2ContainerService(v *EC2ContainerServiceIntegration) *Integration {
+ if o.EC2ContainerService = v; o.EC2ContainerService == nil {
+ o.nullFields = append(o.nullFields, "EC2ContainerService")
+ }
+ return o
+}
+
+func (o *Integration) SetElasticBeanstalk(v *ElasticBeanstalkIntegration) *Integration {
+ if o.ElasticBeanstalk = v; o.ElasticBeanstalk == nil {
+ o.nullFields = append(o.nullFields, "ElasticBeanstalk")
+ }
+ return o
+}
+
+func (o *Integration) SetCodeDeploy(v *CodeDeployIntegration) *Integration {
+ if o.CodeDeploy = v; o.CodeDeploy == nil {
+ o.nullFields = append(o.nullFields, "CodeDeploy")
+ }
+ return o
+}
+
+func (o *Integration) SetOpsWorks(v *OpsWorksIntegration) *Integration {
+ if o.OpsWorks = v; o.OpsWorks == nil {
+ o.nullFields = append(o.nullFields, "OpsWorks")
+ }
+ return o
+}
+
+func (o *Integration) SetRancher(v *RancherIntegration) *Integration {
+ if o.Rancher = v; o.Rancher == nil {
+ o.nullFields = append(o.nullFields, "Rancher")
+ }
+ return o
+}
+
+func (o *Integration) SetKubernetes(v *KubernetesIntegration) *Integration {
+ if o.Kubernetes = v; o.Kubernetes == nil {
+ o.nullFields = append(o.nullFields, "Kubernetes")
+ }
+ return o
+}
+
+func (o *Integration) SetMesosphere(v *MesosphereIntegration) *Integration {
+ if o.Mesosphere = v; o.Mesosphere == nil {
+ o.nullFields = append(o.nullFields, "Mesosphere")
+ }
+ return o
+}
+
+func (o *Integration) SetMultai(v *MultaiIntegration) *Integration {
+ if o.Multai = v; o.Multai == nil {
+ o.nullFields = append(o.nullFields, "Multai")
+ }
+ return o
+}
+
+func (o *Integration) SetNomad(v *NomadIntegration) *Integration {
+ if o.Nomad = v; o.Nomad == nil {
+ o.nullFields = append(o.nullFields, "Nomad")
+ }
+ return o
+}
+
+func (o *Integration) SetChef(v *ChefIntegration) *Integration {
+ if o.Chef = v; o.Chef == nil {
+ o.nullFields = append(o.nullFields, "Chef")
+ }
+ return o
+}
+
+func (o *Integration) SetGitlab(v *GitlabIntegration) *Integration {
+ if o.Gitlab = v; o.Gitlab == nil {
+ o.nullFields = append(o.nullFields, "Gitlab")
+ }
+ return o
+}
+
+// endregion
+
+// region RancherIntegration
+
+func (o RancherIntegration) MarshalJSON() ([]byte, error) {
+ type noMethod RancherIntegration
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *RancherIntegration) SetMasterHost(v *string) *RancherIntegration {
+ if o.MasterHost = v; o.MasterHost == nil {
+ o.nullFields = append(o.nullFields, "MasterHost")
+ }
+ return o
+}
+
+func (o *RancherIntegration) SetAccessKey(v *string) *RancherIntegration {
+ if o.AccessKey = v; o.AccessKey == nil {
+ o.nullFields = append(o.nullFields, "AccessKey")
+ }
+ return o
+}
+
+func (o *RancherIntegration) SetSecretKey(v *string) *RancherIntegration {
+ if o.SecretKey = v; o.SecretKey == nil {
+ o.nullFields = append(o.nullFields, "SecretKey")
+ }
+ return o
+}
+
+func (o *RancherIntegration) SetVersion(v *string) *RancherIntegration {
+ if o.Version = v; o.Version == nil {
+ o.nullFields = append(o.nullFields, "Version")
+ }
+ return o
+}
+
+// endregion
+
+// region ElasticBeanstalkIntegration
+
+func (o ElasticBeanstalkIntegration) MarshalJSON() ([]byte, error) {
+ type noMethod ElasticBeanstalkIntegration
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *ElasticBeanstalkIntegration) SetEnvironmentID(v *string) *ElasticBeanstalkIntegration {
+ if o.EnvironmentID = v; o.EnvironmentID == nil {
+ o.nullFields = append(o.nullFields, "EnvironmentID")
+ }
+ return o
+}
+
+func (o *ElasticBeanstalkIntegration) SetManagedActions(v *BeanstalkManagedActions) *ElasticBeanstalkIntegration {
+ if o.ManagedActions = v; o.ManagedActions == nil {
+ o.nullFields = append(o.nullFields, "ManagedActions")
+ }
+ return o
+}
+
+func (o *ElasticBeanstalkIntegration) SetDeploymentPreferences(v *BeanstalkDeploymentPreferences) *ElasticBeanstalkIntegration {
+ if o.DeploymentPreferences = v; o.DeploymentPreferences == nil {
+ o.nullFields = append(o.nullFields, "DeploymentPreferences")
+ }
+ return o
+}
+
+// endregion
+
+// region BeanstalkManagedActions
+
+func (o BeanstalkManagedActions) MarshalJSON() ([]byte, error) {
+ type noMethod BeanstalkManagedActions
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *BeanstalkManagedActions) SetPlatformUpdate(v *BeanstalkPlatformUpdate) *BeanstalkManagedActions {
+ if o.PlatformUpdate = v; o.PlatformUpdate == nil {
+ o.nullFields = append(o.nullFields, "PlatformUpdate")
+ }
+ return o
+}
+
+// endregion
+
+// region BeanstalkPlatformUpdate
+
+func (o BeanstalkPlatformUpdate) MarshalJSON() ([]byte, error) {
+ type noMethod BeanstalkPlatformUpdate
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *BeanstalkPlatformUpdate) SetPerformAt(v *string) *BeanstalkPlatformUpdate {
+ if o.PerformAt = v; o.PerformAt == nil {
+ o.nullFields = append(o.nullFields, "PerformAt")
+ }
+ return o
+}
+
+func (o *BeanstalkPlatformUpdate) SetTimeWindow(v *string) *BeanstalkPlatformUpdate {
+ if o.TimeWindow = v; o.TimeWindow == nil {
+ o.nullFields = append(o.nullFields, "TimeWindow")
+ }
+ return o
+}
+
+func (o *BeanstalkPlatformUpdate) SetUpdateLevel(v *string) *BeanstalkPlatformUpdate {
+ if o.UpdateLevel = v; o.UpdateLevel == nil {
+ o.nullFields = append(o.nullFields, "UpdateLevel")
+ }
+ return o
+}
+
+// endregion
+
+// region BeanstalkDeploymentPreferences
+
+func (o BeanstalkDeploymentPreferences) MarshalJSON() ([]byte, error) {
+ type noMethod BeanstalkDeploymentPreferences
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *BeanstalkDeploymentPreferences) SetAutomaticRoll(v *bool) *BeanstalkDeploymentPreferences {
+ if o.AutomaticRoll = v; o.AutomaticRoll == nil {
+ o.nullFields = append(o.nullFields, "AutomaticRoll")
+ }
+ return o
+}
+
+func (o *BeanstalkDeploymentPreferences) SetBatchSizePercentage(v *int) *BeanstalkDeploymentPreferences {
+ if o.BatchSizePercentage = v; o.BatchSizePercentage == nil {
+ o.nullFields = append(o.nullFields, "BatchSizePercentage")
+ }
+ return o
+}
+
+func (o *BeanstalkDeploymentPreferences) SetGracePeriod(v *int) *BeanstalkDeploymentPreferences {
+ if o.GracePeriod = v; o.GracePeriod == nil {
+ o.nullFields = append(o.nullFields, "GracePeriod")
+ }
+ return o
+}
+
+func (o *BeanstalkDeploymentPreferences) SetStrategy(v *BeanstalkDeploymentStrategy) *BeanstalkDeploymentPreferences {
+ if o.Strategy = v; o.Strategy == nil {
+ o.nullFields = append(o.nullFields, "Strategy")
+ }
+ return o
+}
+
+// endregion
+
+// region BeanstalkDeploymentStrategy
+
+func (o BeanstalkDeploymentStrategy) MarshalJSON() ([]byte, error) {
+ type noMethod BeanstalkDeploymentStrategy
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *BeanstalkDeploymentStrategy) SetAction(v *string) *BeanstalkDeploymentStrategy {
+ if o.Action = v; o.Action == nil {
+ o.nullFields = append(o.nullFields, "Action")
+ }
+ return o
+}
+
+func (o *BeanstalkDeploymentStrategy) SetShouldDrainInstances(v *bool) *BeanstalkDeploymentStrategy {
+ if o.ShouldDrainInstances = v; o.ShouldDrainInstances == nil {
+ o.nullFields = append(o.nullFields, "ShouldDrainInstances")
+ }
+ return o
+}
+
+// endregion
+
+// region EC2ContainerServiceIntegration
+
+func (o EC2ContainerServiceIntegration) MarshalJSON() ([]byte, error) {
+ type noMethod EC2ContainerServiceIntegration
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *EC2ContainerServiceIntegration) SetClusterName(v *string) *EC2ContainerServiceIntegration {
+ if o.ClusterName = v; o.ClusterName == nil {
+ o.nullFields = append(o.nullFields, "ClusterName")
+ }
+ return o
+}
+
+func (o *EC2ContainerServiceIntegration) SetAutoScale(v *AutoScaleECS) *EC2ContainerServiceIntegration {
+ if o.AutoScale = v; o.AutoScale == nil {
+ o.nullFields = append(o.nullFields, "AutoScale")
+ }
+ return o
+}
+
+func (o *EC2ContainerServiceIntegration) SetBatch(v *Batch) *EC2ContainerServiceIntegration {
+ if o.Batch = v; o.Batch == nil {
+ o.nullFields = append(o.nullFields, "Batch")
+ }
+ return o
+}
+
+// endregion
+
+// region AutoScaleECS
+
+func (o AutoScaleECS) MarshalJSON() ([]byte, error) {
+ type noMethod AutoScaleECS
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *AutoScaleECS) SetAttributes(v []*AutoScaleAttributes) *AutoScaleECS {
+ if o.Attributes = v; o.Attributes == nil {
+ o.nullFields = append(o.nullFields, "Attributes")
+ }
+ return o
+}
+
+func (o *AutoScaleECS) SetShouldScaleDownNonServiceTasks(v *bool) *AutoScaleECS {
+ if o.ShouldScaleDownNonServiceTasks = v; o.ShouldScaleDownNonServiceTasks == nil {
+ o.nullFields = append(o.nullFields, "ShouldScaleDownNonServiceTasks")
+ }
+ return o
+}
+
+// endregion
+
+// region Batch
+
+func (o Batch) MarshalJSON() ([]byte, error) {
+ type noMethod Batch
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Batch) SetJobQueueNames(v []string) *Batch {
+ if o.JobQueueNames = v; o.JobQueueNames == nil {
+ o.nullFields = append(o.nullFields, "JobQueueNames")
+ }
+ return o
+}
+
+// endregion
+
+// region DockerSwarmIntegration
+
+func (o DockerSwarmIntegration) MarshalJSON() ([]byte, error) {
+ type noMethod DockerSwarmIntegration
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *DockerSwarmIntegration) SetMasterHost(v *string) *DockerSwarmIntegration {
+ if o.MasterHost = v; o.MasterHost == nil {
+ o.nullFields = append(o.nullFields, "MasterHost")
+ }
+ return o
+}
+
+func (o *DockerSwarmIntegration) SetMasterPort(v *int) *DockerSwarmIntegration {
+ if o.MasterPort = v; o.MasterPort == nil {
+ o.nullFields = append(o.nullFields, "MasterPort")
+ }
+ return o
+}
+
+func (o *DockerSwarmIntegration) SetAutoScale(v *AutoScaleDockerSwarm) *DockerSwarmIntegration {
+ if o.AutoScale = v; o.AutoScale == nil {
+ o.nullFields = append(o.nullFields, "AutoScale")
+ }
+ return o
+}
+
+func (o AutoScaleDockerSwarm) MarshalJSON() ([]byte, error) {
+ type noMethod AutoScaleDockerSwarm
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+// endregion
+
+// region Route53
+
+func (o Route53Integration) MarshalJSON() ([]byte, error) {
+ type noMethod Route53Integration
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Route53Integration) SetDomains(v []*Domain) *Route53Integration {
+ if o.Domains = v; o.Domains == nil {
+ o.nullFields = append(o.nullFields, "Domains")
+ }
+ return o
+}
+
+// endregion
+
+// region Domain
+
+func (o Domain) MarshalJSON() ([]byte, error) {
+ type noMethod Domain
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Domain) SetHostedZoneID(v *string) *Domain {
+ if o.HostedZoneID = v; o.HostedZoneID == nil {
+ o.nullFields = append(o.nullFields, "HostedZoneID")
+ }
+ return o
+}
+
+func (o *Domain) SetSpotinstAccountID(v *string) *Domain {
+ if o.SpotinstAccountID = v; o.SpotinstAccountID == nil {
+ o.nullFields = append(o.nullFields, "SpotinstAccountID")
+ }
+ return o
+}
+
+func (o *Domain) SetRecordSetType(v *string) *Domain {
+ if o.RecordSetType = v; o.RecordSetType == nil {
+ o.nullFields = append(o.nullFields, "RecordSetType")
+ }
+ return o
+}
+
+func (o *Domain) SetRecordSets(v []*RecordSet) *Domain {
+ if o.RecordSets = v; o.RecordSets == nil {
+ o.nullFields = append(o.nullFields, "RecordSets")
+ }
+ return o
+}
+
+// endregion
+
+// region RecordSets
+
+func (o RecordSet) MarshalJSON() ([]byte, error) {
+ type noMethod RecordSet
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *RecordSet) SetName(v *string) *RecordSet {
+ if o.Name = v; o.Name == nil {
+ o.nullFields = append(o.nullFields, "Name")
+ }
+ return o
+}
+
+func (o *RecordSet) SetUsePublicIP(v *bool) *RecordSet {
+ if o.UsePublicIP = v; o.UsePublicIP == nil {
+ o.nullFields = append(o.nullFields, "UsePublicIP")
+ }
+ return o
+}
+
+func (o *RecordSet) SetUsePublicDNS(v *bool) *RecordSet {
+ if o.UsePublicDNS = v; o.UsePublicDNS == nil {
+ o.nullFields = append(o.nullFields, "UsePublicDNS")
+ }
+ return o
+}
+
+// endregion
+
+// region AutoScale
+
+func (o AutoScale) MarshalJSON() ([]byte, error) {
+ type noMethod AutoScale
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *AutoScale) SetIsEnabled(v *bool) *AutoScale {
+ if o.IsEnabled = v; o.IsEnabled == nil {
+ o.nullFields = append(o.nullFields, "IsEnabled")
+ }
+ return o
+}
+
+func (o *AutoScale) SetIsAutoConfig(v *bool) *AutoScale {
+ if o.IsAutoConfig = v; o.IsAutoConfig == nil {
+ o.nullFields = append(o.nullFields, "IsAutoConfig")
+ }
+ return o
+}
+
+func (o *AutoScale) SetCooldown(v *int) *AutoScale {
+ if o.Cooldown = v; o.Cooldown == nil {
+ o.nullFields = append(o.nullFields, "Cooldown")
+ }
+ return o
+}
+
+func (o *AutoScale) SetHeadroom(v *AutoScaleHeadroom) *AutoScale {
+ if o.Headroom = v; o.Headroom == nil {
+ o.nullFields = append(o.nullFields, "Headroom")
+ }
+ return o
+}
+
+func (o *AutoScale) SetDown(v *AutoScaleDown) *AutoScale {
+ if o.Down = v; o.Down == nil {
+ o.nullFields = append(o.nullFields, "Down")
+ }
+ return o
+}
+
+// endregion
+
+// region AutoScaleHeadroom
+
+func (o AutoScaleHeadroom) MarshalJSON() ([]byte, error) {
+ type noMethod AutoScaleHeadroom
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *AutoScaleHeadroom) SetCPUPerUnit(v *int) *AutoScaleHeadroom {
+ if o.CPUPerUnit = v; o.CPUPerUnit == nil {
+ o.nullFields = append(o.nullFields, "CPUPerUnit")
+ }
+ return o
+}
+
+func (o *AutoScaleHeadroom) SetGPUPerUnit(v *int) *AutoScaleHeadroom {
+ if o.GPUPerUnit = v; o.GPUPerUnit == nil {
+ o.nullFields = append(o.nullFields, "GPUPerUnit")
+ }
+ return o
+}
+
+func (o *AutoScaleHeadroom) SetMemoryPerUnit(v *int) *AutoScaleHeadroom {
+ if o.MemoryPerUnit = v; o.MemoryPerUnit == nil {
+ o.nullFields = append(o.nullFields, "MemoryPerUnit")
+ }
+ return o
+}
+
+func (o *AutoScaleHeadroom) SetNumOfUnits(v *int) *AutoScaleHeadroom {
+ if o.NumOfUnits = v; o.NumOfUnits == nil {
+ o.nullFields = append(o.nullFields, "NumOfUnits")
+ }
+ return o
+}
+
+// endregion
+
+// region AutoScaleDown
+
+func (o AutoScaleDown) MarshalJSON() ([]byte, error) {
+ type noMethod AutoScaleDown
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *AutoScaleDown) SetEvaluationPeriods(v *int) *AutoScaleDown {
+ if o.EvaluationPeriods = v; o.EvaluationPeriods == nil {
+ o.nullFields = append(o.nullFields, "EvaluationPeriods")
+ }
+ return o
+}
+
+func (o *AutoScaleDown) SetMaxScaleDownPercentage(v *float64) *AutoScaleDown {
+ if o.MaxScaleDownPercentage = v; o.MaxScaleDownPercentage == nil {
+ o.nullFields = append(o.nullFields, "MaxScaleDownPercentage")
+ }
+ return o
+}
+
+// endregion
+
+// region AutoScaleConstraint
+
+func (o AutoScaleConstraint) MarshalJSON() ([]byte, error) {
+ type noMethod AutoScaleConstraint
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *AutoScaleConstraint) SetKey(v *string) *AutoScaleConstraint {
+ if o.Key = v; o.Key == nil {
+ o.nullFields = append(o.nullFields, "Key")
+ }
+ return o
+}
+
+func (o *AutoScaleConstraint) SetValue(v *string) *AutoScaleConstraint {
+ if o.Value = v; o.Value == nil {
+ o.nullFields = append(o.nullFields, "Value")
+ }
+ return o
+}
+
+// endregion
+
+// region AutoScaleLabel
+
+func (o AutoScaleLabel) MarshalJSON() ([]byte, error) {
+ type noMethod AutoScaleLabel
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *AutoScaleLabel) SetKey(v *string) *AutoScaleLabel {
+ if o.Key = v; o.Key == nil {
+ o.nullFields = append(o.nullFields, "Key")
+ }
+ return o
+}
+
+func (o *AutoScaleLabel) SetValue(v *string) *AutoScaleLabel {
+ if o.Value = v; o.Value == nil {
+ o.nullFields = append(o.nullFields, "Value")
+ }
+ return o
+}
+
+// endregion
+
+// region KubernetesIntegration
+
+func (o KubernetesIntegration) MarshalJSON() ([]byte, error) {
+ type noMethod KubernetesIntegration
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *KubernetesIntegration) SetIntegrationMode(v *string) *KubernetesIntegration {
+ if o.IntegrationMode = v; o.IntegrationMode == nil {
+ o.nullFields = append(o.nullFields, "IntegrationMode")
+ }
+ return o
+}
+
+func (o *KubernetesIntegration) SetClusterIdentifier(v *string) *KubernetesIntegration {
+ if o.ClusterIdentifier = v; o.ClusterIdentifier == nil {
+ o.nullFields = append(o.nullFields, "ClusterIdentifier")
+ }
+ return o
+}
+
+func (o *KubernetesIntegration) SetServer(v *string) *KubernetesIntegration {
+ if o.Server = v; o.Server == nil {
+ o.nullFields = append(o.nullFields, "Server")
+ }
+ return o
+}
+
+func (o *KubernetesIntegration) SetToken(v *string) *KubernetesIntegration {
+ if o.Token = v; o.Token == nil {
+ o.nullFields = append(o.nullFields, "Token")
+ }
+ return o
+}
+
+func (o *KubernetesIntegration) SetAutoScale(v *AutoScaleKubernetes) *KubernetesIntegration {
+ if o.AutoScale = v; o.AutoScale == nil {
+ o.nullFields = append(o.nullFields, "AutoScale")
+ }
+ return o
+}
+
+func (o AutoScaleKubernetes) MarshalJSON() ([]byte, error) {
+ type noMethod AutoScaleKubernetes
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *AutoScaleKubernetes) SetLabels(v []*AutoScaleLabel) *AutoScaleKubernetes {
+ if o.Labels = v; o.Labels == nil {
+ o.nullFields = append(o.nullFields, "Labels")
+ }
+ return o
+}
+
+// endregion
+
+// region MesosphereIntegration
+
+func (o MesosphereIntegration) MarshalJSON() ([]byte, error) {
+ type noMethod MesosphereIntegration
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *MesosphereIntegration) SetServer(v *string) *MesosphereIntegration {
+ if o.Server = v; o.Server == nil {
+ o.nullFields = append(o.nullFields, "Server")
+ }
+ return o
+}
+
+// endregion
+
+// region MultaiIntegration
+
+func (o MultaiIntegration) MarshalJSON() ([]byte, error) {
+ type noMethod MultaiIntegration
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *MultaiIntegration) SetDeploymentId(v *string) *MultaiIntegration {
+ if o.DeploymentID = v; o.DeploymentID == nil {
+ o.nullFields = append(o.nullFields, "DeploymentID")
+ }
+ return o
+}
+
+// endregion
+
+// region NomadIntegration
+
+func (o NomadIntegration) MarshalJSON() ([]byte, error) {
+ type noMethod NomadIntegration
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *NomadIntegration) SetMasterHost(v *string) *NomadIntegration {
+ if o.MasterHost = v; o.MasterHost == nil {
+ o.nullFields = append(o.nullFields, "MasterHost")
+ }
+ return o
+}
+
+func (o *NomadIntegration) SetMasterPort(v *int) *NomadIntegration {
+ if o.MasterPort = v; o.MasterPort == nil {
+ o.nullFields = append(o.nullFields, "MasterPort")
+ }
+ return o
+}
+
+func (o *NomadIntegration) SetAclToken(v *string) *NomadIntegration {
+ if o.ACLToken = v; o.ACLToken == nil {
+ o.nullFields = append(o.nullFields, "ACLToken")
+ }
+ return o
+}
+
+func (o *NomadIntegration) SetAutoScale(v *AutoScaleNomad) *NomadIntegration {
+ if o.AutoScale = v; o.AutoScale == nil {
+ o.nullFields = append(o.nullFields, "AutoScale")
+ }
+ return o
+}
+
+func (o AutoScaleNomad) MarshalJSON() ([]byte, error) {
+ type noMethod AutoScaleNomad
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *AutoScaleNomad) SetConstraints(v []*AutoScaleConstraint) *AutoScaleNomad {
+ if o.Constraints = v; o.Constraints == nil {
+ o.nullFields = append(o.nullFields, "Constraints")
+ }
+ return o
+}
+
+// endregion
+
+// region ChefIntegration
+
+func (o ChefIntegration) MarshalJSON() ([]byte, error) {
+ type noMethod ChefIntegration
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *ChefIntegration) SetServer(v *string) *ChefIntegration {
+ if o.Server = v; o.Server == nil {
+ o.nullFields = append(o.nullFields, "Server")
+ }
+ return o
+}
+
+func (o *ChefIntegration) SetOrganization(v *string) *ChefIntegration {
+ if o.Organization = v; o.Organization == nil {
+ o.nullFields = append(o.nullFields, "Organization")
+ }
+ return o
+}
+
+func (o *ChefIntegration) SetUser(v *string) *ChefIntegration {
+ if o.User = v; o.User == nil {
+ o.nullFields = append(o.nullFields, "User")
+ }
+ return o
+}
+
+func (o *ChefIntegration) SetPEMKey(v *string) *ChefIntegration {
+ if o.PEMKey = v; o.PEMKey == nil {
+ o.nullFields = append(o.nullFields, "PEMKey")
+ }
+ return o
+}
+
+func (o *ChefIntegration) SetVersion(v *string) *ChefIntegration {
+ if o.Version = v; o.Version == nil {
+ o.nullFields = append(o.nullFields, "Version")
+ }
+ return o
+}
+
+// endregion
+
+// region Gitlab
+
+func (o GitlabIntegration) MarshalJSON() ([]byte, error) {
+ type noMethod GitlabIntegration
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *GitlabIntegration) SetRunner(v *GitlabRunner) *GitlabIntegration {
+ if o.Runner = v; o.Runner == nil {
+ o.nullFields = append(o.nullFields, "Runner")
+ }
+ return o
+}
+
+func (o GitlabRunner) MarshalJSON() ([]byte, error) {
+ type noMethod GitlabRunner
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *GitlabRunner) SetIsEnabled(v *bool) *GitlabRunner {
+ if o.IsEnabled = v; o.IsEnabled == nil {
+ o.nullFields = append(o.nullFields, "IsEnabled")
+ }
+ return o
+}
+
+// endregion
+
+// region Scheduling
+
+func (o Scheduling) MarshalJSON() ([]byte, error) {
+ type noMethod Scheduling
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Scheduling) SetTasks(v []*Task) *Scheduling {
+ if o.Tasks = v; o.Tasks == nil {
+ o.nullFields = append(o.nullFields, "Tasks")
+ }
+ return o
+}
+
+// endregion
+
+// region Task
+
+func (o Task) MarshalJSON() ([]byte, error) {
+ type noMethod Task
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Task) SetIsEnabled(v *bool) *Task {
+ if o.IsEnabled = v; o.IsEnabled == nil {
+ o.nullFields = append(o.nullFields, "IsEnabled")
+ }
+ return o
+}
+
+func (o *Task) SetType(v *string) *Task {
+ if o.Type = v; o.Type == nil {
+ o.nullFields = append(o.nullFields, "Type")
+ }
+ return o
+}
+
+func (o *Task) SetFrequency(v *string) *Task {
+ if o.Frequency = v; o.Frequency == nil {
+ o.nullFields = append(o.nullFields, "Frequency")
+ }
+ return o
+}
+
+func (o *Task) SetCronExpression(v *string) *Task {
+ if o.CronExpression = v; o.CronExpression == nil {
+ o.nullFields = append(o.nullFields, "CronExpression")
+ }
+ return o
+}
+
+func (o *Task) SetStartTime(v *string) *Task {
+ if o.StartTime = v; o.StartTime == nil {
+ o.nullFields = append(o.nullFields, "StartTime")
+ }
+ return o
+}
+
+func (o *Task) SetScaleTargetCapacity(v *int) *Task {
+ if o.ScaleTargetCapacity = v; o.ScaleTargetCapacity == nil {
+ o.nullFields = append(o.nullFields, "ScaleTargetCapacity")
+ }
+ return o
+}
+
+func (o *Task) SetScaleMinCapacity(v *int) *Task {
+ if o.ScaleMinCapacity = v; o.ScaleMinCapacity == nil {
+ o.nullFields = append(o.nullFields, "ScaleMinCapacity")
+ }
+ return o
+}
+
+func (o *Task) SetScaleMaxCapacity(v *int) *Task {
+ if o.ScaleMaxCapacity = v; o.ScaleMaxCapacity == nil {
+ o.nullFields = append(o.nullFields, "ScaleMaxCapacity")
+ }
+ return o
+}
+
+func (o *Task) SetBatchSizePercentage(v *int) *Task {
+ if o.BatchSizePercentage = v; o.BatchSizePercentage == nil {
+ o.nullFields = append(o.nullFields, "BatchSizePercentage")
+ }
+ return o
+}
+
+func (o *Task) SetGracePeriod(v *int) *Task {
+ if o.GracePeriod = v; o.GracePeriod == nil {
+ o.nullFields = append(o.nullFields, "GracePeriod")
+ }
+ return o
+}
+
+func (o *Task) SetTargetCapacity(v *int) *Task {
+ if o.TargetCapacity = v; o.TargetCapacity == nil {
+ o.nullFields = append(o.nullFields, "TargetCapacity")
+ }
+ return o
+}
+
+func (o *Task) SetMinCapacity(v *int) *Task {
+ if o.MinCapacity = v; o.MinCapacity == nil {
+ o.nullFields = append(o.nullFields, "MinCapacity")
+ }
+ return o
+}
+
+func (o *Task) SetMaxCapacity(v *int) *Task {
+ if o.MaxCapacity = v; o.MaxCapacity == nil {
+ o.nullFields = append(o.nullFields, "MaxCapacity")
+ }
+ return o
+}
+
+func (o *Task) SetAdjustment(v *int) *Task {
+ if o.Adjustment = v; o.Adjustment == nil {
+ o.nullFields = append(o.nullFields, "Adjustment")
+ }
+ return o
+}
+
+func (o *Task) SetAdjustmentPercentage(v *int) *Task {
+ if o.AdjustmentPercentage = v; o.AdjustmentPercentage == nil {
+ o.nullFields = append(o.nullFields, "AdjustmentPercentage")
+ }
+ return o
+}
+
+// endregion
+
+// region Scaling
+
+func (o Scaling) MarshalJSON() ([]byte, error) {
+ type noMethod Scaling
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Scaling) SetUp(v []*ScalingPolicy) *Scaling {
+ if o.Up = v; o.Up == nil {
+ o.nullFields = append(o.nullFields, "Up")
+ }
+ return o
+}
+
+func (o *Scaling) SetDown(v []*ScalingPolicy) *Scaling {
+ if o.Down = v; o.Down == nil {
+ o.nullFields = append(o.nullFields, "Down")
+ }
+ return o
+}
+
+func (o *Scaling) SetTarget(v []*ScalingPolicy) *Scaling {
+ if o.Target = v; o.Target == nil {
+ o.nullFields = append(o.nullFields, "Target")
+ }
+ return o
+}
+
+func (o *Scaling) SetMultipleMetrics(v *MultipleMetrics) *Scaling {
+ if o.MultipleMetrics = v; o.MultipleMetrics == nil {
+ o.nullFields = append(o.nullFields, "MultipleMetrics")
+ }
+ return o
+}
+
+// endregion
+
+// region ScalingPolicy
+
+func (o ScalingPolicy) MarshalJSON() ([]byte, error) {
+ type noMethod ScalingPolicy
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *ScalingPolicy) SetPolicyName(v *string) *ScalingPolicy {
+ if o.PolicyName = v; o.PolicyName == nil {
+ o.nullFields = append(o.nullFields, "PolicyName")
+ }
+ return o
+}
+
+func (o *ScalingPolicy) SetMetricName(v *string) *ScalingPolicy {
+ if o.MetricName = v; o.MetricName == nil {
+ o.nullFields = append(o.nullFields, "MetricName")
+ }
+ return o
+}
+
+func (o *ScalingPolicy) SetNamespace(v *string) *ScalingPolicy {
+ if o.Namespace = v; o.Namespace == nil {
+ o.nullFields = append(o.nullFields, "Namespace")
+ }
+ return o
+}
+
+func (o *ScalingPolicy) SetSource(v *string) *ScalingPolicy {
+ if o.Source = v; o.Source == nil {
+ o.nullFields = append(o.nullFields, "Source")
+ }
+ return o
+}
+
+func (o *ScalingPolicy) SetStatistic(v *string) *ScalingPolicy {
+ if o.Statistic = v; o.Statistic == nil {
+ o.nullFields = append(o.nullFields, "Statistic")
+ }
+ return o
+}
+
+func (o *ScalingPolicy) SetUnit(v *string) *ScalingPolicy {
+ if o.Unit = v; o.Unit == nil {
+ o.nullFields = append(o.nullFields, "Unit")
+ }
+ return o
+}
+
+func (o *ScalingPolicy) SetThreshold(v *float64) *ScalingPolicy {
+ if o.Threshold = v; o.Threshold == nil {
+ o.nullFields = append(o.nullFields, "Threshold")
+ }
+ return o
+}
+
+func (o *ScalingPolicy) SetAdjustment(v *int) *ScalingPolicy {
+ if o.Adjustment = v; o.Adjustment == nil {
+ o.nullFields = append(o.nullFields, "Adjustment")
+ }
+ return o
+}
+
+func (o *ScalingPolicy) SetMinTargetCapacity(v *int) *ScalingPolicy {
+ if o.MinTargetCapacity = v; o.MinTargetCapacity == nil {
+ o.nullFields = append(o.nullFields, "MinTargetCapacity")
+ }
+ return o
+}
+
+func (o *ScalingPolicy) SetMaxTargetCapacity(v *int) *ScalingPolicy {
+ if o.MaxTargetCapacity = v; o.MaxTargetCapacity == nil {
+ o.nullFields = append(o.nullFields, "MaxTargetCapacity")
+ }
+ return o
+}
+
+func (o *ScalingPolicy) SetEvaluationPeriods(v *int) *ScalingPolicy {
+ if o.EvaluationPeriods = v; o.EvaluationPeriods == nil {
+ o.nullFields = append(o.nullFields, "EvaluationPeriods")
+ }
+ return o
+}
+
+func (o *ScalingPolicy) SetPeriod(v *int) *ScalingPolicy {
+ if o.Period = v; o.Period == nil {
+ o.nullFields = append(o.nullFields, "Period")
+ }
+ return o
+}
+
+func (o *ScalingPolicy) SetCooldown(v *int) *ScalingPolicy {
+ if o.Cooldown = v; o.Cooldown == nil {
+ o.nullFields = append(o.nullFields, "Cooldown")
+ }
+ return o
+}
+
+func (o *ScalingPolicy) SetOperator(v *string) *ScalingPolicy {
+ if o.Operator = v; o.Operator == nil {
+ o.nullFields = append(o.nullFields, "Operator")
+ }
+ return o
+}
+
+func (o *ScalingPolicy) SetDimensions(v []*Dimension) *ScalingPolicy {
+ if o.Dimensions = v; o.Dimensions == nil {
+ o.nullFields = append(o.nullFields, "Dimensions")
+ }
+ return o
+}
+
+func (o *ScalingPolicy) SetPredictive(v *Predictive) *ScalingPolicy {
+ if o.Predictive = v; o.Predictive == nil {
+ o.nullFields = append(o.nullFields, "Predictive")
+ }
+ return o
+}
+
+func (o *ScalingPolicy) SetAction(v *Action) *ScalingPolicy {
+ if o.Action = v; o.Action == nil {
+ o.nullFields = append(o.nullFields, "Action")
+ }
+ return o
+}
+
+func (o *ScalingPolicy) SetTarget(v *float64) *ScalingPolicy {
+ if o.Target = v; o.Target == nil {
+ o.nullFields = append(o.nullFields, "Target")
+ }
+ return o
+}
+
+func (o *ScalingPolicy) SetIsEnabled(v *bool) *ScalingPolicy {
+ if o.IsEnabled = v; o.IsEnabled == nil {
+ o.nullFields = append(o.nullFields, "IsEnabled")
+ }
+ return o
+}
+
+func (o *ScalingPolicy) SetMaxCapacityPerScale(v *string) *ScalingPolicy {
+ if o.MaxCapacityPerScale = v; o.MaxCapacityPerScale == nil {
+ o.nullFields = append(o.nullFields, "MaxCapacityPerScale")
+ }
+ return o
+}
+
+func (o *ScalingPolicy) SetStepAdjustments(v []*StepAdjustment) *ScalingPolicy {
+ if o.StepAdjustments = v; o.StepAdjustments == nil {
+ o.nullFields = append(o.nullFields, "StepAdjustments")
+ }
+ return o
+}
+
+// endregion
+
+// region MultipleMetrics
+
+func (o MultipleMetrics) MarshalJSON() ([]byte, error) {
+ type noMethod MultipleMetrics
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *MultipleMetrics) SetExpressions(v []*Expressions) *MultipleMetrics {
+ if o.Expressions = v; o.Expressions == nil {
+ o.nullFields = append(o.nullFields, "Expressions")
+ }
+ return o
+}
+
+func (o *MultipleMetrics) SetMetrics(v []*Metrics) *MultipleMetrics {
+ if o.Metrics = v; o.Metrics == nil {
+ o.nullFields = append(o.nullFields, "Metrics")
+ }
+ return o
+}
+
+// endregion
+
+// region Metrics
+
+func (o Metrics) MarshalJSON() ([]byte, error) {
+ type noMethod Metrics
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Metrics) SetMetricName(v *string) *Metrics {
+ if o.MetricName = v; o.MetricName == nil {
+ o.nullFields = append(o.nullFields, "MetricName")
+ }
+ return o
+}
+
+func (o *Metrics) SetNamespace(v *string) *Metrics {
+ if o.Namespace = v; o.Namespace == nil {
+ o.nullFields = append(o.nullFields, "Namespace")
+ }
+ return o
+}
+
+func (o *Metrics) SetDimensions(v []*Dimension) *Metrics {
+ if o.Dimensions = v; o.Dimensions == nil {
+ o.nullFields = append(o.nullFields, "Dimensions")
+ }
+ return o
+}
+
+func (o *Metrics) SetName(v *string) *Metrics {
+ if o.Name = v; o.Name == nil {
+ o.nullFields = append(o.nullFields, "Name")
+ }
+ return o
+}
+
+func (o *Metrics) SetExtendedStatistic(v *string) *Metrics {
+ if o.ExtendedStatistic = v; o.ExtendedStatistic == nil {
+ o.nullFields = append(o.nullFields, "ExtendedStatistic")
+ }
+ return o
+}
+
+func (o *Metrics) SetStatistic(v *string) *Metrics {
+ if o.Statistic = v; o.Statistic == nil {
+ o.nullFields = append(o.nullFields, "Statistic")
+ }
+ return o
+}
+
+func (o *Metrics) SetUnit(v *string) *Metrics {
+ if o.Unit = v; o.Unit == nil {
+ o.nullFields = append(o.nullFields, "Unit")
+ }
+ return o
+}
+
+// endregion
+
+// region Expression
+
+func (o Expressions) MarshalJSON() ([]byte, error) {
+ type noMethod Expressions
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Expressions) SetExpression(v *string) *Expressions {
+ if o.Expression = v; o.Expression == nil {
+ o.nullFields = append(o.nullFields, "Expression")
+ }
+ return o
+}
+
+func (o *Expressions) SetName(v *string) *Expressions {
+ if o.Name = v; o.Name == nil {
+ o.nullFields = append(o.nullFields, "Name")
+ }
+ return o
+}
+
+// endregion
+
+// region Action
+
+func (o Action) MarshalJSON() ([]byte, error) {
+ type noMethod Action
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Action) SetType(v *string) *Action {
+ if o.Type = v; o.Type == nil {
+ o.nullFields = append(o.nullFields, "Type")
+ }
+ return o
+}
+
+func (o *Action) SetAdjustment(v *string) *Action {
+ if o.Adjustment = v; o.Adjustment == nil {
+ o.nullFields = append(o.nullFields, "Adjustment")
+ }
+ return o
+}
+
+func (o *Action) SetMinTargetCapacity(v *string) *Action {
+ if o.MinTargetCapacity = v; o.MinTargetCapacity == nil {
+ o.nullFields = append(o.nullFields, "MinTargetCapacity")
+ }
+ return o
+}
+
+func (o *Action) SetMaxTargetCapacity(v *string) *Action {
+ if o.MaxTargetCapacity = v; o.MaxTargetCapacity == nil {
+ o.nullFields = append(o.nullFields, "MaxTargetCapacity")
+ }
+ return o
+}
+
+func (o *Action) SetMaximum(v *string) *Action {
+ if o.Maximum = v; o.Maximum == nil {
+ o.nullFields = append(o.nullFields, "Maximum")
+ }
+ return o
+}
+
+func (o *Action) SetMinimum(v *string) *Action {
+ if o.Minimum = v; o.Minimum == nil {
+ o.nullFields = append(o.nullFields, "Minimum")
+ }
+ return o
+}
+
+func (o *Action) SetTarget(v *string) *Action {
+ if o.Target = v; o.Target == nil {
+ o.nullFields = append(o.nullFields, "Target")
+ }
+ return o
+}
+
+// endregion
+
+// region Dimension
+
+func (o Dimension) MarshalJSON() ([]byte, error) {
+ type noMethod Dimension
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Dimension) SetName(v *string) *Dimension {
+ if o.Name = v; o.Name == nil {
+ o.nullFields = append(o.nullFields, "Name")
+ }
+ return o
+}
+
+func (o *Dimension) SetValue(v *string) *Dimension {
+ if o.Value = v; o.Value == nil {
+ o.nullFields = append(o.nullFields, "Value")
+ }
+ return o
+}
+
+// endregion
+
+// region Predictive
+
+func (o *Predictive) MarshalJSON() ([]byte, error) {
+ type noMethod Predictive
+ raw := noMethod(*o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Predictive) SetMode(v *string) *Predictive {
+ if o.Mode = v; o.Mode == nil {
+ o.nullFields = append(o.nullFields, "Mode")
+ }
+ return o
+}
+
+// endregion
+
+// region StepAdjustments
+
+func (o StepAdjustment) MarshalJSON() ([]byte, error) {
+ type noMethod StepAdjustment
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *StepAdjustment) SetAction(v *Action) *StepAdjustment {
+ if o.Action = v; o.Action == nil {
+ o.nullFields = append(o.nullFields, "Action")
+ }
+ return o
+}
+
+func (o *StepAdjustment) SetThreshold(v *int) *StepAdjustment {
+ if o.Threshold = v; o.Threshold == nil {
+ o.nullFields = append(o.nullFields, "Threshold")
+ }
+ return o
+}
+
+// endregion
+
+// region Strategy
+
+func (o Strategy) MarshalJSON() ([]byte, error) {
+ type noMethod Strategy
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Strategy) SetRisk(v *float64) *Strategy {
+ if o.Risk = v; o.Risk == nil {
+ o.nullFields = append(o.nullFields, "Risk")
+ }
+ return o
+}
+
+func (o *Strategy) SetOnDemandCount(v *int) *Strategy {
+ if o.OnDemandCount = v; o.OnDemandCount == nil {
+ o.nullFields = append(o.nullFields, "OnDemandCount")
+ }
+ return o
+}
+
+func (o *Strategy) SetImmediateODRecoverThreshold(v *int) *Strategy {
+ if o.ImmediateODRecoverThreshold = v; o.ImmediateODRecoverThreshold == nil {
+ o.nullFields = append(o.nullFields, "ImmediateODRecoverThreshold")
+ }
+ return o
+}
+
+func (o *Strategy) SetDrainingTimeout(v *int) *Strategy {
+ if o.DrainingTimeout = v; o.DrainingTimeout == nil {
+ o.nullFields = append(o.nullFields, "DrainingTimeout")
+ }
+ return o
+}
+
+func (o *Strategy) SetAvailabilityVsCost(v *string) *Strategy {
+ if o.AvailabilityVsCost = v; o.AvailabilityVsCost == nil {
+ o.nullFields = append(o.nullFields, "AvailabilityVsCost")
+ }
+ return o
+}
+
+func (o *Strategy) SetLifetimePeriod(v *string) *Strategy {
+ if o.LifetimePeriod = v; o.LifetimePeriod == nil {
+ o.nullFields = append(o.nullFields, "LifetimePeriod")
+ }
+ return o
+}
+
+func (o *Strategy) SetUtilizeReservedInstances(v *bool) *Strategy {
+ if o.UtilizeReservedInstances = v; o.UtilizeReservedInstances == nil {
+ o.nullFields = append(o.nullFields, "UtilizeReservedInstances")
+ }
+ return o
+}
+
+func (o *Strategy) SetFallbackToOnDemand(v *bool) *Strategy {
+ if o.FallbackToOnDemand = v; o.FallbackToOnDemand == nil {
+ o.nullFields = append(o.nullFields, "FallbackToOnDemand")
+ }
+ return o
+}
+
+func (o *Strategy) SetSpinUpTime(v *int) *Strategy {
+ if o.SpinUpTime = v; o.SpinUpTime == nil {
+ o.nullFields = append(o.nullFields, "SpinUpTime")
+ }
+ return o
+}
+
+func (o *Strategy) SetSignals(v []*Signal) *Strategy {
+ if o.Signals = v; o.Signals == nil {
+ o.nullFields = append(o.nullFields, "Signals")
+ }
+ return o
+}
+
+func (o *Strategy) SetPersistence(v *Persistence) *Strategy {
+ if o.Persistence = v; o.Persistence == nil {
+ o.nullFields = append(o.nullFields, "Persistence")
+ }
+ return o
+}
+
+func (o *Strategy) SetRevertToSpot(v *RevertToSpot) *Strategy {
+ if o.RevertToSpot = v; o.RevertToSpot == nil {
+ o.nullFields = append(o.nullFields, "RevertToSpot")
+ }
+ return o
+}
+
+func (o *Strategy) SetScalingStrategy(v *ScalingStrategy) *Strategy {
+ if o.ScalingStrategy = v; o.ScalingStrategy == nil {
+ o.nullFields = append(o.nullFields, "ScalingStrategy")
+ }
+ return o
+}
+
+func (o *Strategy) SetUtilizeCommitments(v *bool) *Strategy {
+ if o.UtilizeCommitments = v; o.UtilizeCommitments == nil {
+ o.nullFields = append(o.nullFields, "UtilizeCommitments")
+ }
+ return o
+}
+
+func (o *Strategy) SetMinimumInstanceLifetime(v *int) *Strategy {
+ if o.MinimumInstanceLifetime = v; o.MinimumInstanceLifetime == nil {
+ o.nullFields = append(o.nullFields, "MinimumInstanceLifetime")
+ }
+ return o
+}
+func (o *Strategy) SetConsiderODPricing(v *bool) *Strategy {
+ if o.ConsiderODPricing = v; o.ConsiderODPricing == nil {
+ o.nullFields = append(o.nullFields, "ConsiderODPricing")
+ }
+ return o
+}
+
+// endregion
+
+// region ScalingStrategy
+
+func (o ScalingStrategy) MarshalJSON() ([]byte, error) {
+ type noMethod ScalingStrategy
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *ScalingStrategy) SetTerminationPolicy(v *string) *ScalingStrategy {
+ if o.TerminationPolicy = v; o.TerminationPolicy == nil {
+ o.nullFields = append(o.nullFields, "TerminationPolicy")
+ }
+ return o
+}
+
+func (o *ScalingStrategy) SetTerminateAtEndOfBillingHour(v *bool) *ScalingStrategy {
+ if o.TerminateAtEndOfBillingHour = v; o.TerminateAtEndOfBillingHour == nil {
+ o.nullFields = append(o.nullFields, "TerminateAtEndOfBillingHour")
+ }
+ return o
+}
+
+// endregion
+
+// region Persistence
+
+func (o Persistence) MarshalJSON() ([]byte, error) {
+ type noMethod Persistence
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Persistence) SetShouldPersistPrivateIP(v *bool) *Persistence {
+ if o.ShouldPersistPrivateIP = v; o.ShouldPersistPrivateIP == nil {
+ o.nullFields = append(o.nullFields, "ShouldPersistPrivateIP")
+ }
+ return o
+}
+
+func (o *Persistence) SetShouldPersistBlockDevices(v *bool) *Persistence {
+ if o.ShouldPersistBlockDevices = v; o.ShouldPersistBlockDevices == nil {
+ o.nullFields = append(o.nullFields, "ShouldPersistBlockDevices")
+ }
+ return o
+}
+
+func (o *Persistence) SetShouldPersistRootDevice(v *bool) *Persistence {
+ if o.ShouldPersistRootDevice = v; o.ShouldPersistRootDevice == nil {
+ o.nullFields = append(o.nullFields, "ShouldPersistRootDevice")
+ }
+ return o
+}
+
+func (o *Persistence) SetBlockDevicesMode(v *string) *Persistence {
+ if o.BlockDevicesMode = v; o.BlockDevicesMode == nil {
+ o.nullFields = append(o.nullFields, "BlockDevicesMode")
+ }
+ return o
+}
+
+// endregion
+
+// region RevertToSpot
+
+func (o RevertToSpot) MarshalJSON() ([]byte, error) {
+ type noMethod RevertToSpot
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *RevertToSpot) SetPerformAt(v *string) *RevertToSpot {
+ if o.PerformAt = v; o.PerformAt == nil {
+ o.nullFields = append(o.nullFields, "PerformAt")
+ }
+ return o
+}
+
+func (o *RevertToSpot) SetTimeWindows(v []string) *RevertToSpot {
+ if o.TimeWindows = v; o.TimeWindows == nil {
+ o.nullFields = append(o.nullFields, "TimeWindows")
+ }
+ return o
+}
+
+// endregion
+
+// region Signal
+
+func (o Signal) MarshalJSON() ([]byte, error) {
+ type noMethod Signal
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Signal) SetName(v *string) *Signal {
+ if o.Name = v; o.Name == nil {
+ o.nullFields = append(o.nullFields, "Name")
+ }
+ return o
+}
+
+func (o *Signal) SetTimeout(v *int) *Signal {
+ if o.Timeout = v; o.Timeout == nil {
+ o.nullFields = append(o.nullFields, "Timeout")
+ }
+ return o
+}
+
+// endregion
+
+// region Capacity
+
+func (o Capacity) MarshalJSON() ([]byte, error) {
+ type noMethod Capacity
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Capacity) SetMinimum(v *int) *Capacity {
+ if o.Minimum = v; o.Minimum == nil {
+ o.nullFields = append(o.nullFields, "Minimum")
+ }
+ return o
+}
+
+func (o *Capacity) SetMaximum(v *int) *Capacity {
+ if o.Maximum = v; o.Maximum == nil {
+ o.nullFields = append(o.nullFields, "Maximum")
+ }
+ return o
+}
+
+func (o *Capacity) SetTarget(v *int) *Capacity {
+ if o.Target = v; o.Target == nil {
+ o.nullFields = append(o.nullFields, "Target")
+ }
+ return o
+}
+
+func (o *Capacity) SetUnit(v *string) *Capacity {
+ if o.Unit = v; o.Unit == nil {
+ o.nullFields = append(o.nullFields, "Unit")
+ }
+ return o
+}
+
+// endregion
+
+// region Compute
+
+func (o Compute) MarshalJSON() ([]byte, error) {
+ type noMethod Compute
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Compute) SetProduct(v *string) *Compute {
+ if o.Product = v; o.Product == nil {
+ o.nullFields = append(o.nullFields, "Product")
+ }
+
+ return o
+}
+
+func (o *Compute) SetPrivateIPs(v []string) *Compute {
+ if o.PrivateIPs = v; o.PrivateIPs == nil {
+ o.nullFields = append(o.nullFields, "PrivateIPs")
+ }
+
+ return o
+}
+
+func (o *Compute) SetInstanceTypes(v *InstanceTypes) *Compute {
+ if o.InstanceTypes = v; o.InstanceTypes == nil {
+ o.nullFields = append(o.nullFields, "InstanceTypes")
+ }
+ return o
+}
+
+func (o *Compute) SetLaunchSpecification(v *LaunchSpecification) *Compute {
+ if o.LaunchSpecification = v; o.LaunchSpecification == nil {
+ o.nullFields = append(o.nullFields, "LaunchSpecification")
+ }
+ return o
+}
+
+func (o *Compute) SetAvailabilityZones(v []*AvailabilityZone) *Compute {
+ if o.AvailabilityZones = v; o.AvailabilityZones == nil {
+ o.nullFields = append(o.nullFields, "AvailabilityZones")
+ }
+ return o
+}
+
+func (o *Compute) SetPreferredAvailabilityZones(v []string) *Compute {
+ if o.PreferredAvailabilityZones = v; o.PreferredAvailabilityZones == nil {
+ o.nullFields = append(o.nullFields, "PreferredAvailabilityZones")
+ }
+ return o
+}
+
+func (o *Compute) SetElasticIPs(v []string) *Compute {
+ if o.ElasticIPs = v; o.ElasticIPs == nil {
+ o.nullFields = append(o.nullFields, "ElasticIPs")
+ }
+ return o
+}
+
+func (o *Compute) SetEBSVolumePool(v []*EBSVolume) *Compute {
+ if o.EBSVolumePool = v; o.EBSVolumePool == nil {
+ o.nullFields = append(o.nullFields, "EBSVolumePool")
+ }
+ return o
+}
+
+func (o *Compute) SetSubnetIDs(v []string) *Compute {
+ if o.SubnetIDs = v; o.SubnetIDs == nil {
+ o.nullFields = append(o.nullFields, "SubnetIDs")
+ }
+ return o
+}
+
+// endregion
+
+// region EBSVolume
+
+func (o EBSVolume) MarshalJSON() ([]byte, error) {
+ type noMethod EBSVolume
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *EBSVolume) SetDeviceName(v *string) *EBSVolume {
+ if o.DeviceName = v; o.DeviceName == nil {
+ o.nullFields = append(o.nullFields, "DeviceName")
+ }
+ return o
+}
+
+func (o *EBSVolume) SetVolumeIDs(v []string) *EBSVolume {
+ if o.VolumeIDs = v; o.VolumeIDs == nil {
+ o.nullFields = append(o.nullFields, "VolumeIDs")
+ }
+ return o
+}
+
+// endregion
+
+// region InstanceTypes
+
+func (o InstanceTypes) MarshalJSON() ([]byte, error) {
+ type noMethod InstanceTypes
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *InstanceTypes) SetOnDemand(v *string) *InstanceTypes {
+ if o.OnDemand = v; o.OnDemand == nil {
+ o.nullFields = append(o.nullFields, "OnDemand")
+ }
+ return o
+}
+
+func (o *InstanceTypes) SetSpot(v []string) *InstanceTypes {
+ if o.Spot = v; o.Spot == nil {
+ o.nullFields = append(o.nullFields, "Spot")
+ }
+ return o
+}
+
+func (o *InstanceTypes) SetPreferredSpot(v []string) *InstanceTypes {
+ if o.PreferredSpot = v; o.PreferredSpot == nil {
+ o.nullFields = append(o.nullFields, "PreferredSpot")
+ }
+ return o
+}
+
+func (o *InstanceTypes) SetWeights(v []*InstanceTypeWeight) *InstanceTypes {
+ if o.Weights = v; o.Weights == nil {
+ o.nullFields = append(o.nullFields, "Weights")
+ }
+ return o
+}
+
+func (o *InstanceTypes) SetOnDemandTypes(v []string) *InstanceTypes {
+ if o.OnDemandTypes = v; o.OnDemandTypes == nil {
+ o.nullFields = append(o.nullFields, "OnDemandTypes")
+ }
+ return o
+}
+
+func (o *InstanceTypes) SetResourceRequirements(v *ResourceRequirements) *InstanceTypes {
+ if o.ResourceRequirements = v; o.ResourceRequirements == nil {
+ o.nullFields = append(o.nullFields, "ResourceRequirements")
+ }
+ return o
+}
+
+// endregion
+
+// region InstanceTypeWeight
+
+func (o InstanceTypeWeight) MarshalJSON() ([]byte, error) {
+ type noMethod InstanceTypeWeight
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *InstanceTypeWeight) SetInstanceType(v *string) *InstanceTypeWeight {
+ if o.InstanceType = v; o.InstanceType == nil {
+ o.nullFields = append(o.nullFields, "InstanceType")
+ }
+ return o
+}
+
+func (o *InstanceTypeWeight) SetWeight(v *int) *InstanceTypeWeight {
+ if o.Weight = v; o.Weight == nil {
+ o.nullFields = append(o.nullFields, "Weight")
+ }
+ return o
+}
+
+// endregion
+
+func (o ResourceRequirements) MarshalJSON() ([]byte, error) {
+ type noMethod ResourceRequirements
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *ResourceRequirements) SetExcludedInstanceFamilies(v []string) *ResourceRequirements {
+ if o.ExcludedInstanceFamilies = v; o.ExcludedInstanceFamilies == nil {
+ o.nullFields = append(o.nullFields, "ExcludedInstanceFamilies")
+ }
+ return o
+}
+
+func (o *ResourceRequirements) SetExcludedInstanceGenerations(v []string) *ResourceRequirements {
+ if o.ExcludedInstanceGenerations = v; o.ExcludedInstanceGenerations == nil {
+ o.nullFields = append(o.nullFields, "ExcludedInstanceGenerations")
+ }
+ return o
+}
+
+func (o *ResourceRequirements) SetExcludedInstanceTypes(v []string) *ResourceRequirements {
+ if o.ExcludedInstanceTypes = v; o.ExcludedInstanceTypes == nil {
+ o.nullFields = append(o.nullFields, "ExcludedInstanceTypes")
+ }
+ return o
+}
+
+func (o *ResourceRequirements) SetRequiredGpu(v *RequiredGpu) *ResourceRequirements {
+ if o.RequiredGpu = v; o.RequiredGpu == nil {
+ o.nullFields = append(o.nullFields, "RequiredGpu")
+ }
+ return o
+}
+
+func (o *ResourceRequirements) SetRequiredVCpu(v *RequiredVCpu) *ResourceRequirements {
+ if o.RequiredVCpu = v; o.RequiredVCpu == nil {
+ o.nullFields = append(o.nullFields, "RequiredVCpu")
+ }
+ return o
+}
+
+func (o *ResourceRequirements) SetRequiredMemory(v *RequiredMemory) *ResourceRequirements {
+ if o.RequiredMemory = v; o.RequiredMemory == nil {
+ o.nullFields = append(o.nullFields, "RequiredMemory")
+ }
+ return o
+}
+
+func (o RequiredGpu) MarshalJSON() ([]byte, error) {
+ type noMethod RequiredGpu
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *RequiredGpu) SetMaximum(v *int) *RequiredGpu {
+ if o.Maximum = v; o.Maximum == nil {
+ o.nullFields = append(o.nullFields, "Maximum")
+ }
+ return o
+}
+
+func (o *RequiredGpu) SetMinimum(v *int) *RequiredGpu {
+ if o.Minimum = v; o.Minimum == nil {
+ o.nullFields = append(o.nullFields, "Minimum")
+ }
+ return o
+}
+
+func (o RequiredMemory) MarshalJSON() ([]byte, error) {
+ type noMethod RequiredMemory
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *RequiredMemory) SetMaximum(v *int) *RequiredMemory {
+ if o.Maximum = v; o.Maximum == nil {
+ o.nullFields = append(o.nullFields, "Maximum")
+ }
+ return o
+}
+
+func (o *RequiredMemory) SetMinimum(v *int) *RequiredMemory {
+ if o.Minimum = v; o.Minimum == nil {
+ o.nullFields = append(o.nullFields, "Minimum")
+ }
+ return o
+}
+
+func (o RequiredVCpu) MarshalJSON() ([]byte, error) {
+ type noMethod RequiredVCpu
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *RequiredVCpu) SetMaximum(v *int) *RequiredVCpu {
+ if o.Maximum = v; o.Maximum == nil {
+ o.nullFields = append(o.nullFields, "Maximum")
+ }
+ return o
+}
+
+func (o *RequiredVCpu) SetMinimum(v *int) *RequiredVCpu {
+ if o.Minimum = v; o.Minimum == nil {
+ o.nullFields = append(o.nullFields, "Minimum")
+ }
+ return o
+}
+
+// region AvailabilityZone
+
+func (o AvailabilityZone) MarshalJSON() ([]byte, error) {
+ type noMethod AvailabilityZone
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *AvailabilityZone) SetName(v *string) *AvailabilityZone {
+ if o.Name = v; o.Name == nil {
+ o.nullFields = append(o.nullFields, "Name")
+ }
+ return o
+}
+
+func (o *AvailabilityZone) SetSubnetId(v *string) *AvailabilityZone {
+ if o.SubnetID = v; o.SubnetID == nil {
+ o.nullFields = append(o.nullFields, "SubnetID")
+ }
+ return o
+}
+
+func (o *AvailabilityZone) SetPlacementGroupName(v *string) *AvailabilityZone {
+ if o.PlacementGroupName = v; o.PlacementGroupName == nil {
+ o.nullFields = append(o.nullFields, "PlacementGroupName")
+ }
+ return o
+}
+
+// endregion
+
+// region LaunchSpecification
+
+func (o LaunchSpecification) MarshalJSON() ([]byte, error) {
+ type noMethod LaunchSpecification
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *LaunchSpecification) SetLoadBalancerNames(v []string) *LaunchSpecification {
+ if o.LoadBalancerNames = v; o.LoadBalancerNames == nil {
+ o.nullFields = append(o.nullFields, "LoadBalancerNames")
+ }
+ return o
+}
+
+func (o *LaunchSpecification) SetLoadBalancersConfig(v *LoadBalancersConfig) *LaunchSpecification {
+ if o.LoadBalancersConfig = v; o.LoadBalancersConfig == nil {
+ o.nullFields = append(o.nullFields, "LoadBalancersConfig")
+ }
+ return o
+}
+
+func (o *LaunchSpecification) SetSecurityGroupIDs(v []string) *LaunchSpecification {
+ if o.SecurityGroupIDs = v; o.SecurityGroupIDs == nil {
+ o.nullFields = append(o.nullFields, "SecurityGroupIDs")
+ }
+ return o
+}
+
+func (o *LaunchSpecification) SetHealthCheckType(v *string) *LaunchSpecification {
+ if o.HealthCheckType = v; o.HealthCheckType == nil {
+ o.nullFields = append(o.nullFields, "HealthCheckType")
+ }
+ return o
+}
+
+func (o *LaunchSpecification) SetHealthCheckGracePeriod(v *int) *LaunchSpecification {
+ if o.HealthCheckGracePeriod = v; o.HealthCheckGracePeriod == nil {
+ o.nullFields = append(o.nullFields, "HealthCheckGracePeriod")
+ }
+ return o
+}
+
+func (o *LaunchSpecification) SetHealthCheckUnhealthyDurationBeforeReplacement(v *int) *LaunchSpecification {
+ if o.HealthCheckUnhealthyDurationBeforeReplacement = v; o.HealthCheckUnhealthyDurationBeforeReplacement == nil {
+ o.nullFields = append(o.nullFields, "HealthCheckUnhealthyDurationBeforeReplacement")
+ }
+ return o
+}
+
+func (o *LaunchSpecification) SetImages(v []*Image) *LaunchSpecification {
+ if o.Images = v; o.Images == nil {
+ o.nullFields = append(o.nullFields, "Images")
+ }
+ return o
+}
+
+func (o *LaunchSpecification) SetImageId(v *string) *LaunchSpecification {
+ if o.ImageID = v; o.ImageID == nil {
+ o.nullFields = append(o.nullFields, "ImageID")
+
+ }
+ return o
+}
+
+func (o *LaunchSpecification) SetKeyPair(v *string) *LaunchSpecification {
+ if o.KeyPair = v; o.KeyPair == nil {
+ o.nullFields = append(o.nullFields, "KeyPair")
+ }
+ return o
+}
+
+func (o *LaunchSpecification) SetUserData(v *string) *LaunchSpecification {
+ if o.UserData = v; o.UserData == nil {
+ o.nullFields = append(o.nullFields, "UserData")
+ }
+ return o
+}
+
+func (o *LaunchSpecification) SetShutdownScript(v *string) *LaunchSpecification {
+ if o.ShutdownScript = v; o.ShutdownScript == nil {
+ o.nullFields = append(o.nullFields, "ShutdownScript")
+ }
+ return o
+}
+
+func (o *LaunchSpecification) SetTenancy(v *string) *LaunchSpecification {
+ if o.Tenancy = v; o.Tenancy == nil {
+ o.nullFields = append(o.nullFields, "Tenancy")
+ }
+ return o
+}
+
+func (o *LaunchSpecification) SetMonitoring(v *bool) *LaunchSpecification {
+ if o.Monitoring = v; o.Monitoring == nil {
+ o.nullFields = append(o.nullFields, "Monitoring")
+ }
+ return o
+}
+
+func (o *LaunchSpecification) SetEBSOptimized(v *bool) *LaunchSpecification {
+ if o.EBSOptimized = v; o.EBSOptimized == nil {
+ o.nullFields = append(o.nullFields, "EBSOptimized")
+ }
+ return o
+}
+
+func (o *LaunchSpecification) SetIAMInstanceProfile(v *IAMInstanceProfile) *LaunchSpecification {
+ if o.IAMInstanceProfile = v; o.IAMInstanceProfile == nil {
+ o.nullFields = append(o.nullFields, "IAMInstanceProfile")
+ }
+ return o
+}
+
+func (o *LaunchSpecification) SetCreditSpecification(v *CreditSpecification) *LaunchSpecification {
+ if o.CreditSpecification = v; o.CreditSpecification == nil {
+ o.nullFields = append(o.nullFields, "CreditSpecification")
+ }
+ return o
+}
+
+func (o *LaunchSpecification) SetBlockDeviceMappings(v []*BlockDeviceMapping) *LaunchSpecification {
+ if o.BlockDeviceMappings = v; o.BlockDeviceMappings == nil {
+ o.nullFields = append(o.nullFields, "BlockDeviceMappings")
+ }
+ return o
+}
+
+func (o *LaunchSpecification) SetNetworkInterfaces(v []*NetworkInterface) *LaunchSpecification {
+ if o.NetworkInterfaces = v; o.NetworkInterfaces == nil {
+ o.nullFields = append(o.nullFields, "NetworkInterfaces")
+ }
+ return o
+}
+
+func (o *LaunchSpecification) SetTags(v []*Tag) *LaunchSpecification {
+ if o.Tags = v; o.Tags == nil {
+ o.nullFields = append(o.nullFields, "Tags")
+ }
+ return o
+}
+
+func (o *LaunchSpecification) SetMetadataOptions(v *MetadataOptions) *LaunchSpecification {
+ if o.MetadataOptions = v; o.MetadataOptions == nil {
+ o.nullFields = append(o.nullFields, "MetadataOptions")
+ }
+ return o
+}
+
+func (o *LaunchSpecification) SetCPUOptions(v *CPUOptions) *LaunchSpecification {
+ if o.CPUOptions = v; o.CPUOptions == nil {
+ o.nullFields = append(o.nullFields, "CPUOptions")
+ }
+ return o
+}
+
+func (o *LaunchSpecification) SetResourceTagSpecification(v *ResourceTagSpecification) *LaunchSpecification {
+ if o.ResourceTagSpecification = v; o.ResourceTagSpecification == nil {
+ o.nullFields = append(o.nullFields, "ResourceTagSpecification")
+ }
+ return o
+}
+
+func (o *LaunchSpecification) SetITF(v *ITF) *LaunchSpecification {
+ if o.ITF = v; o.ITF == nil {
+ o.nullFields = append(o.nullFields, "ITF")
+ }
+ return o
+}
+
+// endregion
+
+// region Matcher
+
+func (o Matcher) MarshalJSON() ([]byte, error) {
+ type noMethod Matcher
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Matcher) SetHTTPCode(v *string) *Matcher {
+ if o.HTTPCode = v; o.HTTPCode == nil {
+ o.nullFields = append(o.nullFields, "HTTPCode")
+ }
+ return o
+}
+
+func (o *Matcher) SetGRPCCode(v *string) *Matcher {
+ if o.GRPCCode = v; o.GRPCCode == nil {
+ o.nullFields = append(o.nullFields, "GRPCCode")
+ }
+ return o
+}
+
+// endregion
+
+// region ITF
+
+func (o ITF) MarshalJSON() ([]byte, error) {
+ type noMethod ITF
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *ITF) SetLoadBalancers(v []*ITFLoadBalancer) *ITF {
+ if o.LoadBalancers = v; o.LoadBalancers == nil {
+ o.nullFields = append(o.nullFields, "LoadBalancers")
+ }
+ return o
+}
+
+func (o *ITF) SetMigrationHealthinessThreshold(v *int) *ITF {
+ if o.MigrationHealthinessThreshold = v; o.MigrationHealthinessThreshold == nil {
+ o.nullFields = append(o.nullFields, "MigrationHealthinessThreshold")
+ }
+ return o
+}
+
+func (o *ITF) SetFixedTargetGroups(v *bool) *ITF {
+ if o.FixedTargetGroups = v; o.FixedTargetGroups == nil {
+ o.nullFields = append(o.nullFields, "FixedTargetGroups")
+ }
+ return o
+}
+
+func (o *ITF) SetWeightStrategy(v *string) *ITF {
+ if o.WeightStrategy = v; o.WeightStrategy == nil {
+ o.nullFields = append(o.nullFields, "WeightStrategy")
+ }
+ return o
+}
+
+func (o *ITF) SetTargetGroupConfig(v *TargetGroupConfig) *ITF {
+ if o.TargetGroupConfig = v; o.TargetGroupConfig == nil {
+ o.nullFields = append(o.nullFields, "TargetGroupConfig")
+ }
+ return o
+}
+
+func (o *ITF) SetDefaultStaticTargetGroups(v []*StaticTargetGroup) *ITF {
+ if o.DefaultStaticTargetGroups = v; o.DefaultStaticTargetGroups == nil {
+ o.nullFields = append(o.nullFields, "DefaultStaticTargetGroups")
+ }
+ return o
+}
+
+// endregion
+
+// region ITFLoadBalancer
+
+func (o ITFLoadBalancer) MarshalJSON() ([]byte, error) {
+ type noMethod ITFLoadBalancer
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *ITFLoadBalancer) SetListenerRules(v []*ListenerRule) *ITFLoadBalancer {
+ if o.ListenerRules = v; o.ListenerRules == nil {
+ o.nullFields = append(o.nullFields, "ListenerRules")
+ }
+ return o
+}
+
+func (o *ITFLoadBalancer) SetLoadBalancerARN(v *string) *ITFLoadBalancer {
+ if o.LoadBalancerARN = v; o.LoadBalancerARN == nil {
+ o.nullFields = append(o.nullFields, "LoadBalancerARN")
+ }
+ return o
+}
+
+// endregion
+
+// region ListenerRule
+
+func (o ListenerRule) MarshalJSON() ([]byte, error) {
+ type noMethod ListenerRule
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *ListenerRule) SetRuleARN(v *string) *ListenerRule {
+ if o.RuleARN = v; o.RuleARN == nil {
+ o.nullFields = append(o.nullFields, "RuleARN")
+ }
+ return o
+}
+
+func (o *ListenerRule) SetStaticTargetGroups(v []*StaticTargetGroup) *ListenerRule {
+ if o.StaticTargetGroups = v; o.StaticTargetGroups == nil {
+ o.nullFields = append(o.nullFields, "StaticTargetGroups")
+ }
+ return o
+}
+
+// endregion
+
+// region StaticTargetGroup
+
+func (o StaticTargetGroup) MarshalJSON() ([]byte, error) {
+ type noMethod StaticTargetGroup
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *StaticTargetGroup) SetStaticTargetGroupARN(v *string) *StaticTargetGroup {
+ if o.StaticTargetGroupARN = v; o.StaticTargetGroupARN == nil {
+ o.nullFields = append(o.nullFields, "StaticTargetGroupARN")
+ }
+ return o
+}
+
+func (o *StaticTargetGroup) SetPercentage(v *float64) *StaticTargetGroup {
+ if o.Percentage = v; o.Percentage == nil {
+ o.nullFields = append(o.nullFields, "Percentage")
+ }
+ return o
+}
+
+// region TargetGroupConfig
+
+func (o TargetGroupConfig) MarshalJSON() ([]byte, error) {
+ type noMethod TargetGroupConfig
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *TargetGroupConfig) SetVPCId(v *string) *TargetGroupConfig {
+ if o.VPCID = v; o.VPCID == nil {
+ o.nullFields = append(o.nullFields, "VPCID")
+ }
+ return o
+}
+
+func (o *TargetGroupConfig) SetHealthCheckIntervalSeconds(v *int) *TargetGroupConfig {
+ if o.HealthCheckIntervalSeconds = v; o.HealthCheckIntervalSeconds == nil {
+ o.nullFields = append(o.nullFields, "HealthCheckIntervalSeconds")
+ }
+ return o
+}
+
+func (o *TargetGroupConfig) SetHealthCheckPath(v *string) *TargetGroupConfig {
+ if o.HealthCheckPath = v; o.HealthCheckPath == nil {
+ o.nullFields = append(o.nullFields, "HealthCheckPath")
+ }
+ return o
+}
+
+func (o *TargetGroupConfig) SetHealthCheckPort(v *string) *TargetGroupConfig {
+ if o.HealthCheckPort = v; o.HealthCheckPort == nil {
+ o.nullFields = append(o.nullFields, "HealthCheckPort")
+ }
+ return o
+}
+
+func (o *TargetGroupConfig) SetHealthCheckProtocol(v *string) *TargetGroupConfig {
+ if o.HealthCheckProtocol = v; o.HealthCheckProtocol == nil {
+ o.nullFields = append(o.nullFields, "HealthCheckProtocol")
+ }
+ return o
+}
+
+func (o *TargetGroupConfig) SetHealthyThresholdCount(v *int) *TargetGroupConfig {
+ if o.HealthyThresholdCount = v; o.HealthyThresholdCount == nil {
+ o.nullFields = append(o.nullFields, "HealthyThresholdCount")
+ }
+ return o
+}
+
+func (o *TargetGroupConfig) SetUnhealthyThresholdCount(v *int) *TargetGroupConfig {
+ if o.UnhealthyThresholdCount = v; o.UnhealthyThresholdCount == nil {
+ o.nullFields = append(o.nullFields, "UnhealthyThresholdCount")
+ }
+ return o
+}
+
+func (o *TargetGroupConfig) SetHealthCheckTimeoutSeconds(v *int) *TargetGroupConfig {
+ if o.HealthCheckTimeoutSeconds = v; o.HealthCheckTimeoutSeconds == nil {
+ o.nullFields = append(o.nullFields, "HealthCheckTimeoutSeconds")
+ }
+ return o
+}
+
+func (o *TargetGroupConfig) SetPort(v *int) *TargetGroupConfig {
+ if o.Port = v; o.Port == nil {
+ o.nullFields = append(o.nullFields, "Port")
+ }
+ return o
+}
+
+func (o *TargetGroupConfig) SetProtocol(v *string) *TargetGroupConfig {
+ if o.Protocol = v; o.Protocol == nil {
+ o.nullFields = append(o.nullFields, "Protocol")
+ }
+ return o
+}
+
+func (o *TargetGroupConfig) SetProtocolVersion(v *string) *TargetGroupConfig {
+ if o.ProtocolVersion = v; o.ProtocolVersion == nil {
+ o.nullFields = append(o.nullFields, "ProtocolVersion")
+ }
+ return o
+}
+
+func (o *TargetGroupConfig) SetMatcher(v *Matcher) *TargetGroupConfig {
+ if o.Matcher = v; o.Matcher == nil {
+ o.nullFields = append(o.nullFields, "Matcher")
+ }
+ return o
+}
+
+func (o *TargetGroupConfig) SetTags(v []*Tag) *TargetGroupConfig {
+ if o.Tags = v; o.Tags == nil {
+ o.nullFields = append(o.nullFields, "Tags")
+ }
+ return o
+}
+
+// endregion
+
+// region Image
+
+func (o Image) MarshalJSON() ([]byte, error) {
+ type noMethod Image
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Image) SetId(v *string) *Image {
+ if o.Id = v; o.Id == nil {
+ o.nullFields = append(o.nullFields, "Id")
+ }
+ return o
+}
+
+// endregion
+
+// region LoadBalancersConfig
+
+func (o LoadBalancersConfig) MarshalJSON() ([]byte, error) {
+ type noMethod LoadBalancersConfig
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *LoadBalancersConfig) SetLoadBalancers(v []*LoadBalancer) *LoadBalancersConfig {
+ if o.LoadBalancers = v; o.LoadBalancers == nil {
+ o.nullFields = append(o.nullFields, "LoadBalancers")
+ }
+ return o
+}
+
+// endregion
+
+// region LoadBalancer
+
+func (o LoadBalancer) MarshalJSON() ([]byte, error) {
+ type noMethod LoadBalancer
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *LoadBalancer) SetName(v *string) *LoadBalancer {
+ if o.Name = v; o.Name == nil {
+ o.nullFields = append(o.nullFields, "Name")
+ }
+ return o
+}
+
+func (o *LoadBalancer) SetArn(v *string) *LoadBalancer {
+ if o.Arn = v; o.Arn == nil {
+ o.nullFields = append(o.nullFields, "Arn")
+ }
+ return o
+}
+
+func (o *LoadBalancer) SetType(v *string) *LoadBalancer {
+ if o.Type = v; o.Type == nil {
+ o.nullFields = append(o.nullFields, "Type")
+ }
+ return o
+}
+
+func (o *LoadBalancer) SetBalancerId(v *string) *LoadBalancer {
+ if o.BalancerID = v; o.BalancerID == nil {
+ o.nullFields = append(o.nullFields, "BalancerID")
+ }
+ return o
+}
+
+func (o *LoadBalancer) SetTargetSetId(v *string) *LoadBalancer {
+ if o.TargetSetID = v; o.TargetSetID == nil {
+ o.nullFields = append(o.nullFields, "TargetSetID")
+ }
+ return o
+}
+
+func (o *LoadBalancer) SetZoneAwareness(v *bool) *LoadBalancer {
+ if o.ZoneAwareness = v; o.ZoneAwareness == nil {
+ o.nullFields = append(o.nullFields, "ZoneAwareness")
+ }
+ return o
+}
+
+func (o *LoadBalancer) SetAutoWeight(v *bool) *LoadBalancer {
+ if o.AutoWeight = v; o.AutoWeight == nil {
+ o.nullFields = append(o.nullFields, "AutoWeight")
+ }
+ return o
+}
+
+// endregion
+
+// region NetworkInterface
+
+func (o NetworkInterface) MarshalJSON() ([]byte, error) {
+ type noMethod NetworkInterface
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *NetworkInterface) SetId(v *string) *NetworkInterface {
+ if o.ID = v; o.ID == nil {
+ o.nullFields = append(o.nullFields, "ID")
+ }
+ return o
+}
+
+func (o *NetworkInterface) SetDescription(v *string) *NetworkInterface {
+ if o.Description = v; o.Description == nil {
+ o.nullFields = append(o.nullFields, "Description")
+ }
+ return o
+}
+
+func (o *NetworkInterface) SetDeviceIndex(v *int) *NetworkInterface {
+ if o.DeviceIndex = v; o.DeviceIndex == nil {
+ o.nullFields = append(o.nullFields, "DeviceIndex")
+ }
+ return o
+}
+
+func (o *NetworkInterface) SetSecondaryPrivateIPAddressCount(v *int) *NetworkInterface {
+ if o.SecondaryPrivateIPAddressCount = v; o.SecondaryPrivateIPAddressCount == nil {
+ o.nullFields = append(o.nullFields, "SecondaryPrivateIPAddressCount")
+ }
+ return o
+}
+
+func (o *NetworkInterface) SetAssociatePublicIPAddress(v *bool) *NetworkInterface {
+ if o.AssociatePublicIPAddress = v; o.AssociatePublicIPAddress == nil {
+ o.nullFields = append(o.nullFields, "AssociatePublicIPAddress")
+ }
+ return o
+}
+
+func (o *NetworkInterface) SetAssociateIPV6Address(v *bool) *NetworkInterface {
+ if o.AssociateIPV6Address = v; o.AssociateIPV6Address == nil {
+ o.nullFields = append(o.nullFields, "AssociateIPV6Address")
+ }
+ return o
+}
+
+func (o *NetworkInterface) SetDeleteOnTermination(v *bool) *NetworkInterface {
+ if o.DeleteOnTermination = v; o.DeleteOnTermination == nil {
+ o.nullFields = append(o.nullFields, "DeleteOnTermination")
+ }
+ return o
+}
+
+func (o *NetworkInterface) SetSecurityGroupsIDs(v []string) *NetworkInterface {
+ if o.SecurityGroupsIDs = v; o.SecurityGroupsIDs == nil {
+ o.nullFields = append(o.nullFields, "SecurityGroupsIDs")
+ }
+ return o
+}
+
+func (o *NetworkInterface) SetPrivateIPAddress(v *string) *NetworkInterface {
+ if o.PrivateIPAddress = v; o.PrivateIPAddress == nil {
+ o.nullFields = append(o.nullFields, "PrivateIPAddress")
+ }
+ return o
+}
+
+func (o *NetworkInterface) SetSubnetId(v *string) *NetworkInterface {
+ if o.SubnetID = v; o.SubnetID == nil {
+ o.nullFields = append(o.nullFields, "SubnetID")
+ }
+ return o
+}
+
+// endregion
+
+// region BlockDeviceMapping
+
+func (o BlockDeviceMapping) MarshalJSON() ([]byte, error) {
+ type noMethod BlockDeviceMapping
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *BlockDeviceMapping) SetDeviceName(v *string) *BlockDeviceMapping {
+ if o.DeviceName = v; o.DeviceName == nil {
+ o.nullFields = append(o.nullFields, "DeviceName")
+ }
+ return o
+}
+
+func (o *BlockDeviceMapping) SetVirtualName(v *string) *BlockDeviceMapping {
+ if o.VirtualName = v; o.VirtualName == nil {
+ o.nullFields = append(o.nullFields, "VirtualName")
+ }
+ return o
+}
+
+func (o *BlockDeviceMapping) SetEBS(v *EBS) *BlockDeviceMapping {
+ if o.EBS = v; o.EBS == nil {
+ o.nullFields = append(o.nullFields, "EBS")
+ }
+ return o
+}
+
+// endregion
+
+// region EBS
+
+func (o EBS) MarshalJSON() ([]byte, error) {
+ type noMethod EBS
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *EBS) SetDeleteOnTermination(v *bool) *EBS {
+ if o.DeleteOnTermination = v; o.DeleteOnTermination == nil {
+ o.nullFields = append(o.nullFields, "DeleteOnTermination")
+ }
+ return o
+}
+
+func (o *EBS) SetEncrypted(v *bool) *EBS {
+ if o.Encrypted = v; o.Encrypted == nil {
+ o.nullFields = append(o.nullFields, "Encrypted")
+ }
+ return o
+}
+
+func (o *EBS) SetKmsKeyId(v *string) *EBS {
+ if o.KmsKeyId = v; o.KmsKeyId == nil {
+ o.nullFields = append(o.nullFields, "KmsKeyId")
+ }
+ return o
+}
+
+func (o *EBS) SetSnapshotId(v *string) *EBS {
+ if o.SnapshotID = v; o.SnapshotID == nil {
+ o.nullFields = append(o.nullFields, "SnapshotID")
+ }
+ return o
+}
+
+func (o *EBS) SetVolumeType(v *string) *EBS {
+ if o.VolumeType = v; o.VolumeType == nil {
+ o.nullFields = append(o.nullFields, "VolumeType")
+ }
+ return o
+}
+
+func (o *EBS) SetVolumeSize(v *int) *EBS {
+ if o.VolumeSize = v; o.VolumeSize == nil {
+ o.nullFields = append(o.nullFields, "VolumeSize")
+ }
+ return o
+}
+
+func (o *EBS) SetIOPS(v *int) *EBS {
+ if o.IOPS = v; o.IOPS == nil {
+ o.nullFields = append(o.nullFields, "IOPS")
+ }
+ return o
+}
+
+func (o *EBS) SetThroughput(v *int) *EBS {
+ if o.Throughput = v; o.Throughput == nil {
+ o.nullFields = append(o.nullFields, "Throughput")
+ }
+ return o
+}
+
+// endregion
+
+// region IAMInstanceProfile
+
+func (o IAMInstanceProfile) MarshalJSON() ([]byte, error) {
+ type noMethod IAMInstanceProfile
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *IAMInstanceProfile) SetName(v *string) *IAMInstanceProfile {
+ if o.Name = v; o.Name == nil {
+ o.nullFields = append(o.nullFields, "Name")
+ }
+ return o
+}
+
+func (o *IAMInstanceProfile) SetArn(v *string) *IAMInstanceProfile {
+ if o.Arn = v; o.Arn == nil {
+ o.nullFields = append(o.nullFields, "Arn")
+ }
+ return o
+}
+
+// endregion
+
+// region CreditSpecification
+
+func (o CreditSpecification) MarshalJSON() ([]byte, error) {
+ type noMethod CreditSpecification
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *CreditSpecification) SetCPUCredits(v *string) *CreditSpecification {
+ if o.CPUCredits = v; o.CPUCredits == nil {
+ o.nullFields = append(o.nullFields, "CPUCredits")
+ }
+ return o
+}
+
+// endregion
+
+// region RollStrategy
+
+func (o RollStrategy) MarshalJSON() ([]byte, error) {
+ type noMethod RollStrategy
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *RollStrategy) SetAction(v *string) *RollStrategy {
+ if o.Action = v; o.Action == nil {
+ o.nullFields = append(o.nullFields, "Action")
+ }
+ return o
+}
+
+func (o *RollStrategy) SetShouldDrainInstances(v *bool) *RollStrategy {
+ if o.ShouldDrainInstances = v; o.ShouldDrainInstances == nil {
+ o.nullFields = append(o.nullFields, "ShouldDrainInstances")
+ }
+ return o
+}
+
+func (o *RollStrategy) SetOnFailure(v *OnFailure) *RollStrategy {
+ if o.OnFailure = v; o.OnFailure == nil {
+ o.nullFields = append(o.nullFields, "OnFailure")
+ }
+ return o
+}
+
+// endregion
+
+// region RollStrategy
+
+func (o OnFailure) MarshalJSON() ([]byte, error) {
+ type noMethod OnFailure
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *OnFailure) SetActionType(v *string) *OnFailure {
+ if o.ActionType = v; o.ActionType == nil {
+ o.nullFields = append(o.nullFields, "ActionType")
+ }
+ return o
+}
+
+func (o *OnFailure) SetShouldHandleAllBatches(v *bool) *OnFailure {
+ if o.ShouldHandleAllBatches = v; o.ShouldHandleAllBatches == nil {
+ o.nullFields = append(o.nullFields, "ShouldHandleAllBatches")
+ }
+ return o
+}
+
+func (o *OnFailure) SetBatchNum(v *int) *OnFailure {
+ if o.BatchNum = v; o.BatchNum == nil {
+ o.nullFields = append(o.nullFields, "BatchNum")
+ }
+ return o
+}
+
+func (o *OnFailure) SetDrainingTimeout(v *int) *OnFailure {
+ if o.DrainingTimeout = v; o.DrainingTimeout == nil {
+ o.nullFields = append(o.nullFields, "DrainingTimeout")
+ }
+ return o
+}
+
+func (o *OnFailure) SetShouldDecrementTargetCapacity(v *bool) *OnFailure {
+ if o.ShouldDecrementTargetCapacity = v; o.ShouldDecrementTargetCapacity == nil {
+ o.nullFields = append(o.nullFields, "ShouldDecrementTargetCapacity")
+ }
+ return o
+}
+
+// endregion
+
+// region CodeDeployIntegration
+
+func (o CodeDeployIntegration) MarshalJSON() ([]byte, error) {
+ type noMethod CodeDeployIntegration
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *CodeDeployIntegration) SetDeploymentGroups(v []*DeploymentGroup) *CodeDeployIntegration {
+ if o.DeploymentGroups = v; o.DeploymentGroups == nil {
+ o.nullFields = append(o.nullFields, "DeploymentGroups")
+ }
+ return o
+}
+
+func (o *CodeDeployIntegration) SetCleanUpOnFailure(v *bool) *CodeDeployIntegration {
+ if o.CleanUpOnFailure = v; o.CleanUpOnFailure == nil {
+ o.nullFields = append(o.nullFields, "CleanUpOnFailure")
+ }
+ return o
+}
+
+func (o *CodeDeployIntegration) SetTerminateInstanceOnFailure(v *bool) *CodeDeployIntegration {
+ if o.TerminateInstanceOnFailure = v; o.TerminateInstanceOnFailure == nil {
+ o.nullFields = append(o.nullFields, "TerminateInstanceOnFailure")
+ }
+ return o
+}
+
+// endregion
+
+// region DeploymentGroup
+
+func (o DeploymentGroup) MarshalJSON() ([]byte, error) {
+ type noMethod DeploymentGroup
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *DeploymentGroup) SetApplicationName(v *string) *DeploymentGroup {
+ if o.ApplicationName = v; o.ApplicationName == nil {
+ o.nullFields = append(o.nullFields, "ApplicationName")
+ }
+ return o
+}
+
+func (o *DeploymentGroup) SetDeploymentGroupName(v *string) *DeploymentGroup {
+ if o.DeploymentGroupName = v; o.DeploymentGroupName == nil {
+ o.nullFields = append(o.nullFields, "DeploymentGroupName")
+ }
+ return o
+}
+
+// endregion
+
+// region OpsWorksIntegration
+
+func (o OpsWorksIntegration) MarshalJSON() ([]byte, error) {
+ type noMethod OpsWorksIntegration
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *OpsWorksIntegration) SetLayerId(v *string) *OpsWorksIntegration {
+ if o.LayerID = v; o.LayerID == nil {
+ o.nullFields = append(o.nullFields, "LayerID")
+ }
+ return o
+}
+
+func (o *OpsWorksIntegration) SetStackType(v *string) *OpsWorksIntegration {
+ if o.StackType = v; o.StackType == nil {
+ o.nullFields = append(o.nullFields, "StackType")
+ }
+ return o
+}
+
+// endregion
+
+// region Scale Request
+
+type ScaleUpSpotItem struct {
+ SpotInstanceRequestID *string `json:"spotInstanceRequestId,omitempty"`
+ AvailabilityZone *string `json:"availabilityZone,omitempty"`
+ InstanceType *string `json:"instanceType,omitempty"`
+}
+
+type ScaleUpOnDemandItem struct {
+ InstanceID *string `json:"instanceId,omitempty"`
+ AvailabilityZone *string `json:"availabilityZone,omitempty"`
+ InstanceType *string `json:"instanceType,omitempty"`
+}
+
+type ScaleDownSpotItem struct {
+ SpotInstanceRequestID *string `json:"spotInstanceRequestId,omitempty"`
+}
+
+type ScaleDownOnDemandItem struct {
+ InstanceID *string `json:"instanceId,omitempty"`
+}
+
+type ScaleItem struct {
+ NewSpotRequests []*ScaleUpSpotItem `json:"newSpotRequests,omitempty"`
+ NewInstances []*ScaleUpOnDemandItem `json:"newInstances,omitempty"`
+ VictimSpotRequests []*ScaleDownSpotItem `json:"victimSpotRequests,omitempty"`
+ VictimInstances []*ScaleDownOnDemandItem `json:"victimInstances,omitempty"`
+}
+
+type ScaleGroupInput struct {
+ GroupID *string `json:"groupId,omitempty"`
+ ScaleType *string `json:"type,omitempty"`
+ Adjustment *int `json:"adjustment,omitempty"`
+}
+
+type ScaleGroupOutput struct {
+ Items []*ScaleItem `json:"items"`
+}
+
+func scaleUpResponseFromJSON(in []byte) (*ScaleGroupOutput, error) {
+ var rw client.Response
+ if err := json.Unmarshal(in, &rw); err != nil {
+ return nil, err
+ }
+
+ var retVal ScaleGroupOutput
+ retVal.Items = make([]*ScaleItem, len(rw.Response.Items))
+ for i, rb := range rw.Response.Items {
+ b, err := scaleUpItemFromJSON(rb)
+ if err != nil {
+ return nil, err
+ }
+ retVal.Items[i] = b
+ }
+
+ return &retVal, nil
+}
+
+func scaleUpItemFromJSON(in []byte) (*ScaleItem, error) {
+ var rw *ScaleItem
+ if err := json.Unmarshal(in, &rw); err != nil {
+ return nil, err
+ }
+ return rw, nil
+}
+
+func scaleFromHttpResponse(resp *http.Response) (*ScaleGroupOutput, error) {
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+ return scaleUpResponseFromJSON(body)
+}
+
+func (s *ServiceOp) Scale(ctx context.Context, input *ScaleGroupInput) (*ScaleGroupOutput, error) {
+ path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/scale/{type}", uritemplates.Values{
+ "groupId": spotinst.StringValue(input.GroupID),
+ "type": spotinst.StringValue(input.ScaleType),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // We do not need the ID anymore so let's drop it.
+ input.GroupID = nil
+
+ r := client.NewRequest(http.MethodPut, path)
+
+ if input.Adjustment != nil {
+ r.Params.Set("adjustment", strconv.Itoa(*input.Adjustment))
+ }
+ r.Obj = input
+
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ output, err := scaleFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ return output, err
+}
+
+// endregion
+
+// region SuspendProcesses
+
+type SuspendProcesses struct {
+ Suspensions []*Suspension `json:"suspensions,omitempty"`
+ Processes []string `json:"processes,omitempty"`
+}
+
+type Suspension struct {
+ Name *string `json:"name,omitempty"`
+ TTLInMinutes *int `json:"ttlInMinutes,omitempty"`
+
+ // Read-only fields.
+ ExpiresAt *time.Time `json:"expiresAt,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type CreateSuspensionsInput struct {
+ GroupID *string `json:"groupId,omitempty"`
+ Suspensions []*Suspension `json:"suspensions,omitempty"`
+}
+
+type CreateSuspensionsOutput struct {
+ SuspendProcesses *SuspendProcesses `json:"suspendProcesses,omitempty"`
+}
+
+type ListSuspensionsInput struct {
+ GroupID *string `json:"groupId,omitempty"`
+}
+
+type ListSuspensionsOutput struct {
+ SuspendProcesses *SuspendProcesses `json:"suspendProcesses,omitempty"`
+}
+
+type DeleteSuspensionsInput struct {
+ GroupID *string `json:"groupId,omitempty"`
+ Processes []string `json:"processes,omitempty"`
+}
+
+type DeleteSuspensionsOutput struct{}
+
+func suspendProcessesFromHttpResponse(resp *http.Response) ([]*SuspendProcesses, error) {
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+ return suspendProcessesFromJSON(body)
+}
+
+func suspendProcessesObjFromJSON(in []byte) (*SuspendProcesses, error) {
+ v := new(SuspendProcesses)
+ if err := json.Unmarshal(in, v); err != nil {
+ return nil, err
+ }
+ return v, nil
+}
+
+func suspendProcessesFromJSON(in []byte) ([]*SuspendProcesses, error) {
+ var rw client.Response
+ if err := json.Unmarshal(in, &rw); err != nil {
+ return nil, err
+ }
+ out := make([]*SuspendProcesses, len(rw.Response.Items))
+ if len(out) == 0 {
+ return out, nil
+ }
+ for i, rb := range rw.Response.Items {
+ v, err := suspendProcessesObjFromJSON(rb)
+ if err != nil {
+ return nil, err
+ }
+ out[i] = v
+ }
+ return out, nil
+}
+
+func (s *ServiceOp) CreateSuspensions(ctx context.Context, input *CreateSuspensionsInput) (*CreateSuspensionsOutput, error) {
+ path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/suspension", uritemplates.Values{
+ "groupId": spotinst.StringValue(input.GroupID),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // We do not need the ID anymore so let's drop it.
+ input.GroupID = nil
+
+ r := client.NewRequest(http.MethodPost, path)
+ r.Obj = input
+
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ suspendProcesses, err := suspendProcessesFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ output := new(CreateSuspensionsOutput)
+ if len(suspendProcesses) > 0 {
+ output.SuspendProcesses = suspendProcesses[0]
+ }
+
+ return output, nil
+}
+
+func (s *ServiceOp) ListSuspensions(ctx context.Context, input *ListSuspensionsInput) (*ListSuspensionsOutput, error) {
+ path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/suspension", uritemplates.Values{
+ "groupId": spotinst.StringValue(input.GroupID),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ r := client.NewRequest(http.MethodGet, path)
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ suspendProcesses, err := suspendProcessesFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ output := new(ListSuspensionsOutput)
+ if len(suspendProcesses) > 0 {
+ output.SuspendProcesses = suspendProcesses[0]
+ }
+
+ return output, nil
+}
+
+func (s *ServiceOp) DeleteSuspensions(ctx context.Context, input *DeleteSuspensionsInput) (*DeleteSuspensionsOutput, error) {
+ path, err := uritemplates.Expand("/aws/ec2/group/{groupId}/suspension", uritemplates.Values{
+ "groupId": spotinst.StringValue(input.GroupID),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // We do not need the ID anymore so let's drop it.
+ input.GroupID = nil
+
+ r := client.NewRequest(http.MethodDelete, path)
+ r.Obj = input
+
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ return &DeleteSuspensionsOutput{}, nil
+}
+
+func (o Suspension) MarshalJSON() ([]byte, error) {
+ type noMethod Suspension
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Suspension) SetName(v *string) *Suspension {
+ if o.Name = v; o.Name == nil {
+ o.nullFields = append(o.nullFields, "Name")
+ }
+ return o
+}
+
+func (o *Suspension) SetTTLInMinutes(v *int) *Suspension {
+ if o.TTLInMinutes = v; o.TTLInMinutes == nil {
+ o.nullFields = append(o.nullFields, "TTLInMinutes")
+ }
+ return o
+}
+
+// endregion
+
+// region MetadataOptions
+
+func (o MetadataOptions) MarshalJSON() ([]byte, error) {
+ type noMethod MetadataOptions
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *MetadataOptions) SetHTTPTokens(v *string) *MetadataOptions {
+ if o.HTTPTokens = v; o.HTTPTokens == nil {
+ o.nullFields = append(o.nullFields, "HTTPTokens")
+ }
+ return o
+}
+
+func (o *MetadataOptions) SetHTTPPutResponseHopLimit(v *int) *MetadataOptions {
+ if o.HTTPPutResponseHopLimit = v; o.HTTPPutResponseHopLimit == nil {
+ o.nullFields = append(o.nullFields, "HTTPPutResponseHopLimit")
+ }
+ return o
+}
+
+func (o *MetadataOptions) SetInstanceMetadataTags(v *string) *MetadataOptions {
+ if o.InstanceMetadataTags = v; o.InstanceMetadataTags == nil {
+ o.nullFields = append(o.nullFields, "InstanceMetadataTags")
+ }
+ return o
+}
+
+// endregion
+
+// region CPUOptions
+
+func (o CPUOptions) MarshalJSON() ([]byte, error) {
+ type noMethod CPUOptions
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+func (o *CPUOptions) SetThreadsPerCore(v *int) *CPUOptions {
+ if o.ThreadsPerCore = v; o.ThreadsPerCore == nil {
+ o.nullFields = append(o.nullFields, "ThreadsPerCore")
+ }
+ return o
+}
+
+// endregion
+
+// region StatefulInstance
+
+func (o StatefulInstance) MarshalJSON() ([]byte, error) {
+ type noMethod StatefulInstance
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *StatefulInstance) SetStatefulInstanceID(v *string) *StatefulInstance {
+ if o.StatefulInstanceID = v; o.StatefulInstanceID == nil {
+ o.nullFields = append(o.nullFields, "StatefulInstanceID")
+ }
+ return o
+}
+
+func (o *StatefulInstance) SetInstanceID(v *string) *StatefulInstance {
+ if o.InstanceID = v; o.InstanceID == nil {
+ o.nullFields = append(o.nullFields, "InstanceID")
+ }
+ return o
+}
+
+func (o *StatefulInstance) SetState(v *string) *StatefulInstance {
+ if o.State = v; o.State == nil {
+ o.nullFields = append(o.nullFields, "State")
+ }
+ return o
+}
+
+func (o *StatefulInstance) SetPrivateIP(v *string) *StatefulInstance {
+ if o.PrivateIP = v; o.PrivateIP == nil {
+ o.nullFields = append(o.nullFields, "PrivateIP")
+ }
+ return o
+}
+
+func (o *StatefulInstance) SetImageID(v *string) *StatefulInstance {
+ if o.ImageID = v; o.ImageID == nil {
+ o.nullFields = append(o.nullFields, "ImageID")
+ }
+ return o
+}
+
+func (o *StatefulInstance) SetDevices(v []*Device) *StatefulInstance {
+ if o.Devices = v; o.Devices == nil {
+ o.nullFields = append(o.nullFields, "Devices")
+ }
+ return o
+}
+
+// endregion
+
+// region Device
+
+func (o Device) MarshalJSON() ([]byte, error) {
+ type noMethod Device
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Device) SetDeviceName(v *string) *Device {
+ if o.DeviceName = v; o.DeviceName == nil {
+ o.nullFields = append(o.nullFields, "DeviceName")
+ }
+ return o
+}
+
+func (o *Device) SetVolumeID(v *string) *Device {
+ if o.VolumeID = v; o.VolumeID == nil {
+ o.nullFields = append(o.nullFields, "VolumeID")
+ }
+ return o
+}
+
+func (o *Device) SetSnapshotID(v *string) *Device {
+ if o.SnapshotID = v; o.SnapshotID == nil {
+ o.nullFields = append(o.nullFields, "SnapshotID")
+ }
+ return o
+}
+
+// endregion
+
+// region ResourceTagSpecification
+
+func (o ResourceTagSpecification) MarshalJSON() ([]byte, error) {
+ type noMethod ResourceTagSpecification
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *ResourceTagSpecification) SetVolumes(v *Volumes) *ResourceTagSpecification {
+ if o.Volumes = v; o.Volumes == nil {
+ o.nullFields = append(o.nullFields, "Volumes")
+ }
+ return o
+}
+
+func (o *ResourceTagSpecification) SetSnapshots(v *Snapshots) *ResourceTagSpecification {
+ if o.Snapshots = v; o.Snapshots == nil {
+ o.nullFields = append(o.nullFields, "Snapshots")
+ }
+ return o
+}
+
+func (o *ResourceTagSpecification) SetENIs(v *ENIs) *ResourceTagSpecification {
+ if o.ENIs = v; o.ENIs == nil {
+ o.nullFields = append(o.nullFields, "ENIs")
+ }
+ return o
+}
+
+func (o *ResourceTagSpecification) SetAMIs(v *AMIs) *ResourceTagSpecification {
+ if o.AMIs = v; o.AMIs == nil {
+ o.nullFields = append(o.nullFields, "AMIs")
+ }
+ return o
+}
+
+// endregion
+
+// region Volumes
+
+func (o Volumes) MarshalJSON() ([]byte, error) {
+ type noMethod Volumes
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Volumes) SetShouldTag(v *bool) *Volumes {
+ if o.ShouldTag = v; o.ShouldTag == nil {
+ o.nullFields = append(o.nullFields, "ShouldTag")
+ }
+ return o
+}
+
+// endregion
+
+// region Snapshots
+
+func (o Snapshots) MarshalJSON() ([]byte, error) {
+ type noMethod Snapshots
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Snapshots) SetShouldTag(v *bool) *Snapshots {
+ if o.ShouldTag = v; o.ShouldTag == nil {
+ o.nullFields = append(o.nullFields, "ShouldTag")
+ }
+ return o
+}
+
+// endregion
+
+// region ENIs
+
+func (o ENIs) MarshalJSON() ([]byte, error) {
+ type noMethod ENIs
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *ENIs) SetShouldTag(v *bool) *ENIs {
+ if o.ShouldTag = v; o.ShouldTag == nil {
+ o.nullFields = append(o.nullFields, "ShouldTag")
+ }
+ return o
+}
+
+// endregion
+
+// region AMIs
+
+func (o AMIs) MarshalJSON() ([]byte, error) {
+ type noMethod AMIs
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *AMIs) SetShouldTag(v *bool) *AMIs {
+ if o.ShouldTag = v; o.ShouldTag == nil {
+ o.nullFields = append(o.nullFields, "ShouldTag")
+ }
+ return o
+}
+
+// endregion
diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/service.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/service.go
new file mode 100644
index 000000000000..944f3e6805b2
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/service.go
@@ -0,0 +1,64 @@
+package aws
+
+import (
+ "context"
+
+ "github.com/spotinst/spotinst-sdk-go/spotinst"
+ "github.com/spotinst/spotinst-sdk-go/spotinst/client"
+ "github.com/spotinst/spotinst-sdk-go/spotinst/session"
+)
+
+// Service provides the API operation methods for making requests to endpoints
+// of the Spotinst API. See this package's package overview docs for details on
+// the service.
+type Service interface {
+ List(context.Context, *ListGroupsInput) (*ListGroupsOutput, error)
+ Create(context.Context, *CreateGroupInput) (*CreateGroupOutput, error)
+ Read(context.Context, *ReadGroupInput) (*ReadGroupOutput, error)
+ Update(context.Context, *UpdateGroupInput) (*UpdateGroupOutput, error)
+ Delete(context.Context, *DeleteGroupInput) (*DeleteGroupOutput, error)
+ Status(context.Context, *StatusGroupInput) (*StatusGroupOutput, error)
+ Scale(context.Context, *ScaleGroupInput) (*ScaleGroupOutput, error)
+ Detach(context.Context, *DetachGroupInput) (*DetachGroupOutput, error)
+
+ DeploymentStatus(context.Context, *DeploymentStatusInput) (*RollGroupOutput, error)
+ DeploymentStatusECS(context.Context, *DeploymentStatusInput) (*RollGroupOutput, error)
+ StopDeployment(context.Context, *StopDeploymentInput) (*StopDeploymentOutput, error)
+
+ Roll(context.Context, *RollGroupInput) (*RollGroupOutput, error)
+ RollECS(context.Context, *RollECSGroupInput) (*RollGroupOutput, error)
+
+ GetInstanceHealthiness(context.Context, *GetInstanceHealthinessInput) (*GetInstanceHealthinessOutput, error)
+ GetGroupEvents(context.Context, *GetGroupEventsInput) (*GetGroupEventsOutput, error)
+
+ ImportBeanstalkEnv(context.Context, *ImportBeanstalkInput) (*ImportBeanstalkOutput, error)
+ StartBeanstalkMaintenance(context.Context, *BeanstalkMaintenanceInput) (*BeanstalkMaintenanceOutput, error)
+ FinishBeanstalkMaintenance(context.Context, *BeanstalkMaintenanceInput) (*BeanstalkMaintenanceOutput, error)
+ GetBeanstalkMaintenanceStatus(context.Context, *BeanstalkMaintenanceInput) (*string, error)
+
+ CreateSuspensions(context.Context, *CreateSuspensionsInput) (*CreateSuspensionsOutput, error)
+ ListSuspensions(context.Context, *ListSuspensionsInput) (*ListSuspensionsOutput, error)
+ DeleteSuspensions(context.Context, *DeleteSuspensionsInput) (*DeleteSuspensionsOutput, error)
+
+ ListStatefulInstances(context.Context, *ListStatefulInstancesInput) (*ListStatefulInstancesOutput, error)
+ PauseStatefulInstance(context.Context, *PauseStatefulInstanceInput) (*PauseStatefulInstanceOutput, error)
+ ResumeStatefulInstance(context.Context, *ResumeStatefulInstanceInput) (*ResumeStatefulInstanceOutput, error)
+ RecycleStatefulInstance(context.Context, *RecycleStatefulInstanceInput) (*RecycleStatefulInstanceOutput, error)
+ DeallocateStatefulInstance(context.Context, *DeallocateStatefulInstanceInput) (*DeallocateStatefulInstanceOutput, error)
+}
+
+type ServiceOp struct {
+ Client *client.Client
+}
+
+var _ Service = &ServiceOp{}
+
+func New(sess *session.Session, cfgs ...*spotinst.Config) *ServiceOp {
+ cfg := &spotinst.Config{}
+ cfg.Merge(sess.Config)
+ cfg.Merge(cfgs...)
+
+ return &ServiceOp{
+ Client: client.New(sess.Config),
+ }
+}
diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/tag.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/tag.go
new file mode 100644
index 000000000000..c552efcb993f
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws/tag.go
@@ -0,0 +1,31 @@
+package aws
+
+import "github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil"
+
+type Tag struct {
+ Key *string `json:"tagKey,omitempty"`
+ Value *string `json:"tagValue,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+func (o Tag) MarshalJSON() ([]byte, error) {
+ type noMethod Tag
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Tag) SetKey(v *string) *Tag {
+ if o.Key = v; o.Key == nil {
+ o.nullFields = append(o.nullFields, "Key")
+ }
+ return o
+}
+
+func (o *Tag) SetValue(v *string) *Tag {
+ if o.Value = v; o.Value == nil {
+ o.nullFields = append(o.nullFields, "Value")
+ }
+ return o
+}
diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/azure.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/azure.go
new file mode 100644
index 000000000000..5b65689ef4d2
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/azure.go
@@ -0,0 +1,2494 @@
+package azure
+
+import (
+ "context"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+ "strconv"
+ "time"
+
+ "github.com/spotinst/spotinst-sdk-go/spotinst"
+ "github.com/spotinst/spotinst-sdk-go/spotinst/client"
+ "github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil"
+ "github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates"
+)
+
+type Group struct {
+ ID *string `json:"id,omitempty"`
+ Name *string `json:"name,omitempty"`
+ ResourceGroupName *string `json:"resourceGroupName,omitempty"`
+ Description *string `json:"description,omitempty"`
+ Region *string `json:"region,omitempty"`
+ Capacity *Capacity `json:"capacity,omitempty"`
+ Compute *Compute `json:"compute,omitempty"`
+ Strategy *Strategy `json:"strategy,omitempty"`
+ Scaling *Scaling `json:"scaling,omitempty"`
+ Scheduling *Scheduling `json:"scheduling,omitempty"`
+ Integration *Integration `json:"thirdPartiesIntegration,omitempty"`
+
+ // Read-only fields.
+ CreatedAt *time.Time `json:"createdAt,omitempty"`
+ UpdatedAt *time.Time `json:"updatedAt,omitempty"`
+
+ // forceSendFields is a list of field names (e.g. "Keys") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ forceSendFields []string
+
+ // nullFields is a list of field names (e.g. "Keys") to include in API
+ // requests with the JSON null value. By default, fields with empty
+ // values are omitted from API requests. However, any field with an
+ // empty value appearing in NullFields will be sent to the server as
+ // null. It is an error if a field in this list has a non-empty value.
+ // This may be used to include null fields in Patch requests.
+ nullFields []string
+}
+
+type Scheduling struct {
+ Tasks []*ScheduledTask `json:"tasks,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Integration struct {
+ Rancher *RancherIntegration `json:"rancher,omitempty"`
+ Kubernetes *KubernetesIntegration `json:"kubernetes,omitempty"`
+ Multai *MultaiIntegration `json:"mlbRuntime,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type KubernetesIntegration struct {
+ ClusterIdentifier *string `json:"clusterIdentifier,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type MultaiIntegration struct {
+ DeploymentID *string `json:"deploymentId,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type RancherIntegration struct {
+ MasterHost *string `json:"masterHost,omitempty"`
+ AccessKey *string `json:"accessKey,omitempty"`
+ SecretKey *string `json:"secretKey,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type ScheduledTask struct {
+ IsEnabled *bool `json:"isEnabled,omitempty"`
+ Frequency *string `json:"frequency,omitempty"`
+ CronExpression *string `json:"cronExpression,omitempty"`
+ TaskType *string `json:"taskType,omitempty"`
+ ScaleTargetCapacity *int `json:"scaleTargetCapacity,omitempty"`
+ ScaleMinCapacity *int `json:"scaleMinCapacity,omitempty"`
+ ScaleMaxCapacity *int `json:"scaleMaxCapacity,omitempty"`
+ BatchSizePercentage *int `json:"batchSizePercentage,omitempty"`
+ GracePeriod *int `json:"gracePeriod,omitempty"`
+ Adjustment *int `json:"adjustment,omitempty"`
+ AdjustmentPercentage *int `json:"adjustmentPercentage,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Scaling struct {
+ Up []*ScalingPolicy `json:"up,omitempty"`
+ Down []*ScalingPolicy `json:"down,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type ScalingPolicy struct {
+ PolicyName *string `json:"policyName,omitempty"`
+ MetricName *string `json:"metricName,omitempty"`
+ Statistic *string `json:"statistic,omitempty"`
+ Unit *string `json:"unit,omitempty"`
+ Threshold *float64 `json:"threshold,omitempty"`
+ Adjustment *int `json:"adjustment,omitempty"`
+ MinTargetCapacity *int `json:"minTargetCapacity,omitempty"`
+ MaxTargetCapacity *int `json:"maxTargetCapacity,omitempty"`
+ Namespace *string `json:"namespace,omitempty"`
+ EvaluationPeriods *int `json:"evaluationPeriods,omitempty"`
+ Period *int `json:"period,omitempty"`
+ Cooldown *int `json:"cooldown,omitempty"`
+ Operator *string `json:"operator,omitempty"`
+ Dimensions []*Dimension `json:"dimensions,omitempty"`
+ Action *Action `json:"action,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Action struct {
+ Type *string `json:"type,omitempty"`
+ Adjustment *string `json:"adjustment,omitempty"`
+ MinTargetCapacity *string `json:"minTargetCapacity,omitempty"`
+ MaxTargetCapacity *string `json:"maxTargetCapacity,omitempty"`
+ Maximum *string `json:"maximum,omitempty"`
+ Minimum *string `json:"minimum,omitempty"`
+ Target *string `json:"target,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Dimension struct {
+ Name *string `json:"name,omitempty"`
+ Value *string `json:"value,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Strategy struct {
+ LowPriorityPercentage *int `json:"lowPriorityPercentage,omitempty"`
+ OnDemandCount *int `json:"onDemandCount,omitempty"`
+ DrainingTimeout *int `json:"drainingTimeout,omitempty"`
+ Signals []*Signal `json:"signals,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Signal struct {
+ Name *string `json:"name,omitempty"`
+ Timeout *int `json:"timeout,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Capacity struct {
+ Minimum *int `json:"minimum,omitempty"`
+ Maximum *int `json:"maximum,omitempty"`
+ Target *int `json:"target,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Compute struct {
+ Region *string `json:"region,omitempty"`
+ Product *string `json:"product,omitempty"`
+ ResourceGroupName *string `json:"resourceGroupName,omitempty"`
+ VMSizes *VMSizes `json:"vmSizes,omitempty"`
+ LaunchSpecification *LaunchSpecification `json:"launchSpecification,omitempty"`
+ Health *Health `json:"health,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type VMSizes struct {
+ OnDemand []string `json:"odSizes,omitempty"`
+ LowPriority []string `json:"lowPrioritySizes,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type LaunchSpecification struct {
+ LoadBalancersConfig *LoadBalancersConfig `json:"loadBalancersConfig,omitempty"`
+ Image *Image `json:"image,omitempty"`
+ UserData *string `json:"userData,omitempty"`
+ ShutdownScript *string `json:"shutdownScript,omitempty"`
+ Storage *Storage `json:"storage,omitempty"`
+ Network *Network `json:"network,omitempty"`
+ Login *Login `json:"login,omitempty"`
+ CustomData *string `json:"customData,omitempty"`
+ ManagedServiceIdentities []*ManagedServiceIdentity `json:"managedServiceIdentities,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type LoadBalancersConfig struct {
+ LoadBalancers []*LoadBalancer `json:"loadBalancers,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type LoadBalancer struct {
+ Type *string `json:"type,omitempty"`
+ BalancerID *string `json:"balancerId,omitempty"`
+ TargetSetID *string `json:"targetSetId,omitempty"`
+ AutoWeight *bool `json:"autoWeight,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type ManagedServiceIdentity struct {
+ ResourceGroupName *string `json:"resourceGroupName,omitempty"`
+ Name *string `json:"name,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Image struct {
+ MarketPlace *MarketPlaceImage `json:"marketplace,omitempty"`
+ Custom *CustomImage `json:"custom,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type MarketPlaceImage struct {
+ Publisher *string `json:"publisher,omitempty"`
+ Offer *string `json:"offer,omitempty"`
+ SKU *string `json:"sku,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type CustomImage struct {
+ ResourceGroupName *string `json:"resourceGroupName,omitempty"`
+ ImageName *string `json:"imageName,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type ResourceFile struct {
+ URL *string `json:"resourceFileUrl,omitempty"`
+ TargetPath *string `json:"resourceFileTargetPath,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Storage struct {
+ AccountName *string `json:"storageAccountName,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Network struct {
+ VirtualNetworkName *string `json:"virtualNetworkName,omitempty"`
+ SubnetName *string `json:"subnetName,omitempty"`
+ ResourceGroupName *string `json:"resourceGroupName,omitempty"`
+ AssignPublicIP *bool `json:"assignPublicIp,omitempty"`
+ AdditionalIPConfigs []*AdditionalIPConfigs `json:"additionalIpConfigurations,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type AdditionalIPConfigs struct {
+ Name *string `json:"name,omitempty"`
+ PrivateIPAddressVersion *string `json:"privateIpAddressVersion,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Login struct {
+ UserName *string `json:"userName,omitempty"`
+ SSHPublicKey *string `json:"sshPublicKey,omitempty"`
+ Password *string `json:"password,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Health struct {
+ HealthCheckType *string `json:"healthCheckType,omitempty"`
+ AutoHealing *bool `json:"autoHealing,omitempty"`
+ GracePeriod *int `json:"gracePeriod,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Node struct {
+ ID *string `json:"id,omitempty"`
+ VMSize *string `json:"vmSize,omitempty"`
+ State *string `json:"state,omitempty"`
+ LifeCycle *string `json:"lifeCycle,omitempty"`
+ Region *string `json:"region,omitempty"`
+ IPAddress *string `json:"ipAddress,omitempty"`
+ CreatedAt *time.Time `json:"createdAt,omitempty"`
+}
+
+type RollStrategy struct {
+ Action *string `json:"action,omitempty"`
+ ShouldDrainInstances *bool `json:"shouldDrainInstances,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type ListGroupsInput struct{}
+
+type ListGroupsOutput struct {
+ Groups []*Group `json:"groups,omitempty"`
+}
+
+type CreateGroupInput struct {
+ Group *Group `json:"group,omitempty"`
+}
+
+type CreateGroupOutput struct {
+ Group *Group `json:"group,omitempty"`
+}
+
+type ReadGroupInput struct {
+ GroupID *string `json:"groupId,omitempty"`
+}
+
+type ReadGroupOutput struct {
+ Group *Group `json:"group,omitempty"`
+}
+
+type UpdateGroupInput struct {
+ Group *Group `json:"group,omitempty"`
+}
+
+type UpdateGroupOutput struct {
+ Group *Group `json:"group,omitempty"`
+}
+
+type DeleteGroupInput struct {
+ GroupID *string `json:"groupId,omitempty"`
+}
+
+type DeleteGroupOutput struct{}
+
+type StatusGroupInput struct {
+ GroupID *string `json:"groupId,omitempty"`
+}
+
+type StatusGroupOutput struct {
+ Nodes []*Node `json:"nodes,omitempty"`
+}
+
+type ScaleGroupInput struct {
+ GroupID *string `json:"groupId,omitempty"`
+ ScaleType *string `json:"type,omitempty"`
+ Adjustment *int `json:"adjustment,omitempty"`
+}
+
+type ScaleGroupOutput struct{}
+
+type DetachGroupInput struct {
+ GroupID *string `json:"groupId,omitempty"`
+ InstanceIDs []string `json:"instancesToDetach,omitempty"`
+ ShouldDecrementTargetCapacity *bool `json:"shouldDecrementTargetCapacity,omitempty"`
+ ShouldTerminateInstances *bool `json:"shouldTerminateInstances,omitempty"`
+ DrainingTimeout *int `json:"drainingTimeout,omitempty"`
+}
+
+type DetachGroupOutput struct{}
+
+type RollGroupInput struct {
+ GroupID *string `json:"groupId,omitempty"`
+ BatchSizePercentage *int `json:"batchSizePercentage,omitempty"`
+ GracePeriod *int `json:"gracePeriod,omitempty"`
+ HealthCheckType *string `json:"healthCheckType,omitempty"`
+ Strategy *RollStrategy `json:"strategy,omitempty"`
+}
+
+type RollGroupOutput struct {
+ Items []*RollItem `json:"items"`
+}
+
+type Roll struct {
+ Status *string `json:"status,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type RollItem struct {
+ GroupID *string `json:"groupId,omitempty"`
+ RollID *string `json:"id,omitempty"`
+ Status *string `json:"status,omitempty"`
+ CurrentBatch *int `json:"currentBatch,omitempty"`
+ NumBatches *int `json:"numOfBatches,omitempty"`
+ Progress *RollProgress `json:"progress,omitempty"`
+}
+
+type RollStatus struct {
+ GroupID *string `json:"groupId,omitempty"`
+ RollID *string `json:"id,omitempty"`
+ Status *string `json:"status,omitempty"`
+ Progress *RollProgress `json:"progress,omitempty"`
+ CreatedAt *string `json:"createdAt,omitempty"`
+ UpdatedAt *string `json:"updatedAt,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type RollProgress struct {
+ Unit *string `json:"unit,omitempty"`
+ Value *int `json:"value,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type StopRollInput struct {
+ GroupID *string `json:"groupId,omitempty"`
+ RollID *string `json:"rollId,omitempty"`
+ Roll *Roll `json:"roll,omitempty"`
+}
+
+type StopRollOutput struct{}
+
+type RollStatusInput struct {
+ GroupID *string `json:"groupId,omitempty"`
+ RollID *string `json:"rollId,omitempty"`
+}
+
+type RollStatusOutput struct {
+ RollStatus *RollStatus `json:"rollStatus,omitempty"`
+}
+
+type ListRollStatusInput struct {
+ GroupID *string `json:"groupId,omitempty"`
+}
+
+type ListRollStatusOutput struct {
+ Items []*RollStatus `json:"items"`
+}
+
+type NodeSignal struct {
+ NodeID *string `json:"nodeId,omitempty"`
+ PoolID *string `json:"poolId,omitempty"`
+ Signal *string `json:"signal,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type NodeSignalInput struct {
+ NodeID *string `json:"nodeId,omitempty"`
+ PoolID *string `json:"poolId,omitempty"`
+ Signal *string `json:"signal,omitempty"`
+}
+
+type NodeSignalOutput struct{}
+
+type Task struct {
+ ID *string `json:"id,omitempty"`
+ Name *string `json:"name,omitempty"`
+ Description *string `json:"description,omitempty"`
+ Policies []*TaskPolicy `json:"policies,omitempty"`
+ Instances []*TaskInstance `json:"instances,omitempty"`
+ State *string `json:"state,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type TaskPolicy struct {
+ Cron *string `json:"cron,omitempty"`
+ Action *string `json:"action,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type TaskInstance struct {
+ VMName *string `json:"vmName,omitempty"`
+ ResourceGroupName *string `json:"resourceGroupName,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type ListTasksInput struct{}
+
+type ListTasksOutput struct {
+ Tasks []*Task `json:"tasks,omitempty"`
+}
+
+type CreateTaskInput struct {
+ Task *Task `json:"task,omitempty"`
+}
+
+type CreateTaskOutput struct {
+ Task *Task `json:"task,omitempty"`
+}
+
+type ReadTaskInput struct {
+ TaskID *string `json:"taskId,omitempty"`
+}
+
+type ReadTaskOutput struct {
+ Task *Task `json:"task,omitempty"`
+}
+
+type UpdateTaskInput struct {
+ Task *Task `json:"task,omitempty"`
+}
+
+type UpdateTaskOutput struct {
+ Task *Task `json:"task,omitempty"`
+}
+
+type DeleteTaskInput struct {
+ TaskID *string `json:"id,omitempty"`
+}
+
+type DeleteTaskOutput struct{}
+
+// region Unmarshallers
+
+func groupFromJSON(in []byte) (*Group, error) {
+ b := new(Group)
+ if err := json.Unmarshal(in, b); err != nil {
+ return nil, err
+ }
+ return b, nil
+}
+
+func groupsFromJSON(in []byte) ([]*Group, error) {
+ var rw client.Response
+ if err := json.Unmarshal(in, &rw); err != nil {
+ return nil, err
+ }
+ out := make([]*Group, len(rw.Response.Items))
+ if len(out) == 0 {
+ return out, nil
+ }
+ for i, rb := range rw.Response.Items {
+ b, err := groupFromJSON(rb)
+ if err != nil {
+ return nil, err
+ }
+ out[i] = b
+ }
+ return out, nil
+}
+
+func groupsFromHttpResponse(resp *http.Response) ([]*Group, error) {
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+ return groupsFromJSON(body)
+}
+
+func nodeFromJSON(in []byte) (*Node, error) {
+ b := new(Node)
+ if err := json.Unmarshal(in, b); err != nil {
+ return nil, err
+ }
+ return b, nil
+}
+
+func nodesFromJSON(in []byte) ([]*Node, error) {
+ var rw client.Response
+ if err := json.Unmarshal(in, &rw); err != nil {
+ return nil, err
+ }
+ out := make([]*Node, len(rw.Response.Items))
+ if len(out) == 0 {
+ return out, nil
+ }
+ for i, rb := range rw.Response.Items {
+ b, err := nodeFromJSON(rb)
+ if err != nil {
+ return nil, err
+ }
+ out[i] = b
+ }
+ return out, nil
+}
+
+func nodesFromHttpResponse(resp *http.Response) ([]*Node, error) {
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+ return nodesFromJSON(body)
+}
+
+func tasksFromHttpResponse(resp *http.Response) ([]*Task, error) {
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+ return tasksFromJSON(body)
+}
+
+func taskFromJSON(in []byte) (*Task, error) {
+ b := new(Task)
+ if err := json.Unmarshal(in, b); err != nil {
+ return nil, err
+ }
+ return b, nil
+}
+
+func tasksFromJSON(in []byte) ([]*Task, error) {
+ var rw client.Response
+ if err := json.Unmarshal(in, &rw); err != nil {
+ return nil, err
+ }
+ out := make([]*Task, len(rw.Response.Items))
+ if len(out) == 0 {
+ return out, nil
+ }
+ for i, rb := range rw.Response.Items {
+ b, err := taskFromJSON(rb)
+ if err != nil {
+ return nil, err
+ }
+ out[i] = b
+ }
+ return out, nil
+}
+
+func rollResponseFromJSON(in []byte) (*RollGroupOutput, error) {
+ var rw client.Response
+ if err := json.Unmarshal(in, &rw); err != nil {
+ return nil, err
+ }
+
+ var retVal RollGroupOutput
+ retVal.Items = make([]*RollItem, len(rw.Response.Items))
+ for i, rb := range rw.Response.Items {
+ b, err := rollItemFromJSON(rb)
+ if err != nil {
+ return nil, err
+ }
+ retVal.Items[i] = b
+ }
+
+ return &retVal, nil
+}
+
+func rollItemFromJSON(in []byte) (*RollItem, error) {
+ var rw *RollItem
+ if err := json.Unmarshal(in, &rw); err != nil {
+ return nil, err
+ }
+ return rw, nil
+}
+
+func rollStatusFromJSON(in []byte) (*RollStatus, error) {
+ b := new(RollStatus)
+ if err := json.Unmarshal(in, b); err != nil {
+ return nil, err
+ }
+ return b, nil
+}
+
+func rollStatusesFromJSON(in []byte) ([]*RollStatus, error) {
+ var rw client.Response
+ if err := json.Unmarshal(in, &rw); err != nil {
+ return nil, err
+ }
+ out := make([]*RollStatus, len(rw.Response.Items))
+ if len(out) == 0 {
+ return out, nil
+ }
+ for i, rb := range rw.Response.Items {
+ b, err := rollStatusFromJSON(rb)
+ if err != nil {
+ return nil, err
+ }
+ out[i] = b
+ }
+ return out, nil
+}
+
+func rollFromHttpResponse(resp *http.Response) (*RollGroupOutput, error) {
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+ return rollResponseFromJSON(body)
+}
+
+func rollStatusesFromHttpResponse(resp *http.Response) ([]*RollStatus, error) {
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+ return rollStatusesFromJSON(body)
+}
+
+func nodeSignalFromJSON(in []byte) (*NodeSignal, error) {
+ b := new(NodeSignal)
+ if err := json.Unmarshal(in, b); err != nil {
+ return nil, err
+ }
+ return b, nil
+}
+
+func nodeSignalFromHttpResponse(resp *http.Response) (*NodeSignal, error) {
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+ return nodeSignalFromJSON(body)
+}
+
+// endregion
+
+// region API requests
+
+func (s *ServiceOp) List(ctx context.Context, input *ListGroupsInput) (*ListGroupsOutput, error) {
+ r := client.NewRequest(http.MethodGet, "/compute/azure/group")
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ gs, err := groupsFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ return &ListGroupsOutput{Groups: gs}, nil
+}
+
+func (s *ServiceOp) Create(ctx context.Context, input *CreateGroupInput) (*CreateGroupOutput, error) {
+ r := client.NewRequest(http.MethodPost, "/azure/compute/group")
+ r.Obj = input
+
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ gs, err := groupsFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ output := new(CreateGroupOutput)
+ if len(gs) > 0 {
+ output.Group = gs[0]
+ }
+
+ return output, nil
+}
+
+func (s *ServiceOp) Read(ctx context.Context, input *ReadGroupInput) (*ReadGroupOutput, error) {
+ path, err := uritemplates.Expand("/compute/azure/group/{groupId}", uritemplates.Values{
+ "groupId": spotinst.StringValue(input.GroupID),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ r := client.NewRequest(http.MethodGet, path)
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ gs, err := groupsFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ output := new(ReadGroupOutput)
+ if len(gs) > 0 {
+ output.Group = gs[0]
+ }
+
+ return output, nil
+}
+
+func (s *ServiceOp) Update(ctx context.Context, input *UpdateGroupInput) (*UpdateGroupOutput, error) {
+ path, err := uritemplates.Expand("/compute/azure/group/{groupId}", uritemplates.Values{
+ "groupId": spotinst.StringValue(input.Group.ID),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // We do NOT need the ID anymore, so let's drop it.
+ input.Group.ID = nil
+
+ r := client.NewRequest(http.MethodPut, path)
+ r.Obj = input
+
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ gs, err := groupsFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ output := new(UpdateGroupOutput)
+ if len(gs) > 0 {
+ output.Group = gs[0]
+ }
+
+ return output, nil
+}
+
+func (s *ServiceOp) Delete(ctx context.Context, input *DeleteGroupInput) (*DeleteGroupOutput, error) {
+ path, err := uritemplates.Expand("/compute/azure/group/{groupId}", uritemplates.Values{
+ "groupId": spotinst.StringValue(input.GroupID),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ r := client.NewRequest(http.MethodDelete, path)
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ return &DeleteGroupOutput{}, nil
+}
+
+func (s *ServiceOp) Status(ctx context.Context, input *StatusGroupInput) (*StatusGroupOutput, error) {
+ path, err := uritemplates.Expand("/compute/azure/group/{groupId}/status", uritemplates.Values{
+ "groupId": spotinst.StringValue(input.GroupID),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ r := client.NewRequest(http.MethodGet, path)
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ ns, err := nodesFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ return &StatusGroupOutput{Nodes: ns}, nil
+}
+
+func (s *ServiceOp) Detach(ctx context.Context, input *DetachGroupInput) (*DetachGroupOutput, error) {
+ path, err := uritemplates.Expand("/compute/azure/group/{groupId}/detachNodes", uritemplates.Values{
+ "groupId": spotinst.StringValue(input.GroupID),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // We do not need the ID anymore so let's drop it.
+ input.GroupID = nil
+
+ r := client.NewRequest(http.MethodPut, path)
+ r.Obj = input
+
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ return &DetachGroupOutput{}, nil
+}
+
+func (s *ServiceOp) ListTasks(ctx context.Context, input *ListTasksInput) (*ListTasksOutput, error) {
+ r := client.NewRequest(http.MethodGet, "/azure/compute/task")
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ tasks, err := tasksFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ return &ListTasksOutput{Tasks: tasks}, nil
+}
+
+func (s *ServiceOp) CreateTask(ctx context.Context, input *CreateTaskInput) (*CreateTaskOutput, error) {
+ r := client.NewRequest(http.MethodPost, "/azure/compute/task")
+ r.Obj = input.Task
+
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ tasks, err := tasksFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ output := new(CreateTaskOutput)
+ if len(tasks) > 0 {
+ output.Task = tasks[0]
+ }
+
+ return output, nil
+}
+
+func (s *ServiceOp) ReadTask(ctx context.Context, input *ReadTaskInput) (*ReadTaskOutput, error) {
+ path, err := uritemplates.Expand("/azure/compute/task/{taskId}", uritemplates.Values{
+ "taskId": spotinst.StringValue(input.TaskID),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ r := client.NewRequest(http.MethodGet, path)
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ tasks, err := tasksFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ output := new(ReadTaskOutput)
+ if len(tasks) > 0 {
+ output.Task = tasks[0]
+ }
+
+ return output, nil
+}
+
+func (s *ServiceOp) UpdateTask(ctx context.Context, input *UpdateTaskInput) (*UpdateTaskOutput, error) {
+ path, err := uritemplates.Expand("/azure/compute/task/{taskId}", uritemplates.Values{
+ "taskId": spotinst.StringValue(input.Task.ID),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // We do not need the ID anymore so let's drop it.
+ input.Task.ID = nil
+
+ r := client.NewRequest(http.MethodPut, path)
+ r.Obj = input.Task
+
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ tasks, err := tasksFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ output := new(UpdateTaskOutput)
+ if len(tasks) > 0 {
+ output.Task = tasks[0]
+ }
+
+ return output, nil
+}
+
+func (s *ServiceOp) DeleteTask(ctx context.Context, input *DeleteTaskInput) (*DeleteTaskOutput, error) {
+ path, err := uritemplates.Expand("/azure/compute/task/{taskId}", uritemplates.Values{
+ "taskId": spotinst.StringValue(input.TaskID),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ r := client.NewRequest(http.MethodDelete, path)
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ return &DeleteTaskOutput{}, nil
+}
+
+func (s *ServiceOp) Roll(ctx context.Context, input *RollGroupInput) (*RollGroupOutput, error) {
+ path, err := uritemplates.Expand("/compute/azure/group/{groupId}/roll", uritemplates.Values{
+ "groupId": spotinst.StringValue(input.GroupID),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // We do not need the ID anymore so let's drop it.
+ input.GroupID = nil
+
+ r := client.NewRequest(http.MethodPut, path)
+ r.Obj = input
+
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ output, err := rollFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ return output, nil
+}
+
+func (s *ServiceOp) GetRollStatus(ctx context.Context, input *RollStatusInput) (*RollStatusOutput, error) {
+ path, err := uritemplates.Expand("/compute/azure/group/{groupId}/roll/{rollId}", uritemplates.Values{
+ "groupId": spotinst.StringValue(input.GroupID),
+ "rollId": spotinst.StringValue(input.RollID),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // We do not need the ID anymore so let's drop it.
+ input.GroupID = nil
+
+ r := client.NewRequest(http.MethodGet, path)
+ r.Obj = input
+
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ rolls, err := rollStatusesFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ output := new(RollStatusOutput)
+ if len(rolls) > 0 {
+ output.RollStatus = rolls[0]
+ }
+
+ return output, nil
+}
+
+func (s *ServiceOp) ListRollStatus(ctx context.Context, input *ListRollStatusInput) (*ListRollStatusOutput, error) {
+ path, err := uritemplates.Expand("/compute/azure/group/{groupId}/roll", uritemplates.Values{
+ "groupId": spotinst.StringValue(input.GroupID),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // We do not need the ID anymore so let's drop it.
+ input.GroupID = nil
+
+ r := client.NewRequest(http.MethodGet, path)
+ r.Obj = input
+
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ rolls, err := rollStatusesFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ return &ListRollStatusOutput{Items: rolls}, nil
+}
+
+func (s *ServiceOp) StopRoll(ctx context.Context, input *StopRollInput) (*StopRollOutput, error) {
+ path, err := uritemplates.Expand("/compute/azure/group/{groupId}/roll/{rollId}", uritemplates.Values{
+ "groupId": spotinst.StringValue(input.GroupID),
+ "rollId": spotinst.StringValue(input.RollID),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // We do not need the IDs anymore so let's drop them.
+ input.GroupID = nil
+ input.RollID = nil
+
+ r := client.NewRequest(http.MethodPut, path)
+ r.Obj = input
+
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ return &StopRollOutput{}, nil
+}
+
+func (s *ServiceOp) CreateNodeSignal(ctx context.Context, input *NodeSignalInput) (*NodeSignalOutput, error) {
+ r := client.NewRequest(http.MethodPost, "compute/azure/node/signal")
+ r.Obj = input
+
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ ns, err := nodeSignalFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ output := new(NodeSignalOutput)
+ if ns != nil {
+ }
+
+ return output, nil
+}
+
+func (s *ServiceOp) Scale(ctx context.Context, input *ScaleGroupInput) (*ScaleGroupOutput, error) {
+ path, err := uritemplates.Expand("/compute/azure/group/{groupId}/scale/{type}", uritemplates.Values{
+ "groupId": spotinst.StringValue(input.GroupID),
+ "type": spotinst.StringValue(input.ScaleType),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // We do not need the ID anymore so let's drop it.
+ input.GroupID = nil
+
+ r := client.NewRequest(http.MethodPut, path)
+
+ if input.Adjustment != nil {
+ r.Params.Set("adjustment", strconv.Itoa(*input.Adjustment))
+ }
+ r.Obj = input
+
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ return &ScaleGroupOutput{}, err
+}
+
+// endregion
+
+// region Group
+
+func (o Group) MarshalJSON() ([]byte, error) {
+ type noMethod Group
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Group) SetId(v *string) *Group {
+ if o.ID = v; o.ID == nil {
+ o.nullFields = append(o.nullFields, "ID")
+ }
+ return o
+}
+
+func (o *Group) SetName(v *string) *Group {
+ if o.Name = v; o.Name == nil {
+ o.nullFields = append(o.nullFields, "Name")
+ }
+ return o
+}
+
+func (o *Group) SetResourceGroupName(v *string) *Group {
+ if o.ResourceGroupName = v; o.ResourceGroupName == nil {
+ o.nullFields = append(o.nullFields, "ResourceGroupName")
+ }
+ return o
+}
+
+func (o *Group) SetDescription(v *string) *Group {
+ if o.Description = v; o.Description == nil {
+ o.nullFields = append(o.nullFields, "Description")
+ }
+ return o
+}
+
+func (o *Group) SetCapacity(v *Capacity) *Group {
+ if o.Capacity = v; o.Capacity == nil {
+ o.nullFields = append(o.nullFields, "Capacity")
+ }
+ return o
+}
+
+func (o *Group) SetCompute(v *Compute) *Group {
+ if o.Compute = v; o.Compute == nil {
+ o.nullFields = append(o.nullFields, "Compute")
+ }
+ return o
+}
+
+func (o *Group) SetStrategy(v *Strategy) *Group {
+ if o.Strategy = v; o.Strategy == nil {
+ o.nullFields = append(o.nullFields, "Strategy")
+ }
+ return o
+}
+
+func (o *Group) SetScaling(v *Scaling) *Group {
+ if o.Scaling = v; o.Scaling == nil {
+ o.nullFields = append(o.nullFields, "Scaling")
+ }
+ return o
+}
+
+func (o *Group) SetScheduling(v *Scheduling) *Group {
+ if o.Scheduling = v; o.Scheduling == nil {
+ o.nullFields = append(o.nullFields, "Scheduling")
+ }
+ return o
+}
+
+func (o *Group) SetIntegration(v *Integration) *Group {
+ if o.Integration = v; o.Integration == nil {
+ o.nullFields = append(o.nullFields, "Integration")
+ }
+ return o
+}
+
+func (o *Group) SetRegion(v *string) *Group {
+ if o.Region = v; o.Region == nil {
+ o.nullFields = append(o.nullFields, "Region")
+ }
+ return o
+}
+
+// endregion
+
+// region Scheduling
+
+func (o Scheduling) MarshalJSON() ([]byte, error) {
+ type noMethod Scheduling
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Scheduling) SetTasks(v []*ScheduledTask) *Scheduling {
+ if o.Tasks = v; o.Tasks == nil {
+ o.nullFields = append(o.nullFields, "Tasks")
+ }
+ return o
+}
+
+// endregion
+
+// region Integration
+
+func (o Integration) MarshalJSON() ([]byte, error) {
+ type noMethod Integration
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Integration) SetKubernetes(v *KubernetesIntegration) *Integration {
+ if o.Kubernetes = v; o.Kubernetes == nil {
+ o.nullFields = append(o.nullFields, "Kubernetes")
+ }
+ return o
+}
+
+func (o *Integration) SetMultai(v *MultaiIntegration) *Integration {
+ if o.Multai = v; o.Multai == nil {
+ o.nullFields = append(o.nullFields, "Multai")
+ }
+ return o
+}
+
+func (o *Integration) SetRancher(v *RancherIntegration) *Integration {
+ if o.Rancher = v; o.Rancher == nil {
+ o.nullFields = append(o.nullFields, "Rancher")
+ }
+ return o
+}
+
+// endregion
+
+// region KubernetesIntegration
+
+func (o KubernetesIntegration) MarshalJSON() ([]byte, error) {
+ type noMethod KubernetesIntegration
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *KubernetesIntegration) SetClusterIdentifier(v *string) *KubernetesIntegration {
+ if o.ClusterIdentifier = v; o.ClusterIdentifier == nil {
+ o.nullFields = append(o.nullFields, "ClusterIdentifier")
+ }
+ return o
+}
+
+// endregion
+
+// region MultaiIntegration
+
+func (o MultaiIntegration) MarshalJSON() ([]byte, error) {
+ type noMethod MultaiIntegration
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *MultaiIntegration) SetDeploymentId(v *string) *MultaiIntegration {
+ if o.DeploymentID = v; o.DeploymentID == nil {
+ o.nullFields = append(o.nullFields, "DeploymentID")
+ }
+ return o
+}
+
+// endregion
+
+// region RancherIntegration
+
+func (o RancherIntegration) MarshalJSON() ([]byte, error) {
+ type noMethod RancherIntegration
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *RancherIntegration) SetMasterHost(v *string) *RancherIntegration {
+ if o.MasterHost = v; o.MasterHost == nil {
+ o.nullFields = append(o.nullFields, "MasterHost")
+ }
+ return o
+}
+
+func (o *RancherIntegration) SetAccessKey(v *string) *RancherIntegration {
+ if o.AccessKey = v; o.AccessKey == nil {
+ o.nullFields = append(o.nullFields, "AccessKey")
+ }
+ return o
+}
+
+func (o *RancherIntegration) SetSecretKey(v *string) *RancherIntegration {
+ if o.SecretKey = v; o.SecretKey == nil {
+ o.nullFields = append(o.nullFields, "SecretKey")
+ }
+ return o
+}
+
+// endregion
+
+// region ScheduledTask
+
+func (o ScheduledTask) MarshalJSON() ([]byte, error) {
+ type noMethod ScheduledTask
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *ScheduledTask) SetIsEnabled(v *bool) *ScheduledTask {
+ if o.IsEnabled = v; o.IsEnabled == nil {
+ o.nullFields = append(o.nullFields, "IsEnabled")
+ }
+ return o
+}
+
+func (o *ScheduledTask) SetFrequency(v *string) *ScheduledTask {
+ if o.Frequency = v; o.Frequency == nil {
+ o.nullFields = append(o.nullFields, "Frequency")
+ }
+ return o
+}
+
+func (o *ScheduledTask) SetCronExpression(v *string) *ScheduledTask {
+ if o.CronExpression = v; o.CronExpression == nil {
+ o.nullFields = append(o.nullFields, "CronExpression")
+ }
+ return o
+}
+
+func (o *ScheduledTask) SetTaskType(v *string) *ScheduledTask {
+ if o.TaskType = v; o.TaskType == nil {
+ o.nullFields = append(o.nullFields, "TaskType")
+ }
+ return o
+}
+
+func (o *ScheduledTask) SetScaleTargetCapacity(v *int) *ScheduledTask {
+ if o.ScaleTargetCapacity = v; o.ScaleTargetCapacity == nil {
+ o.nullFields = append(o.nullFields, "ScaleTargetCapacity")
+ }
+ return o
+}
+
+func (o *ScheduledTask) SetScaleMinCapacity(v *int) *ScheduledTask {
+ if o.ScaleMinCapacity = v; o.ScaleMinCapacity == nil {
+ o.nullFields = append(o.nullFields, "ScaleMinCapacity")
+ }
+ return o
+}
+
+func (o *ScheduledTask) SetScaleMaxCapacity(v *int) *ScheduledTask {
+ if o.ScaleMaxCapacity = v; o.ScaleMaxCapacity == nil {
+ o.nullFields = append(o.nullFields, "ScaleMaxCapacity")
+ }
+ return o
+}
+
+func (o *ScheduledTask) SetBatchSizePercentage(v *int) *ScheduledTask {
+ if o.BatchSizePercentage = v; o.BatchSizePercentage == nil {
+ o.nullFields = append(o.nullFields, "BatchSizePercentage")
+ }
+ return o
+}
+
+func (o *ScheduledTask) SetGracePeriod(v *int) *ScheduledTask {
+ if o.GracePeriod = v; o.GracePeriod == nil {
+ o.nullFields = append(o.nullFields, "GracePeriod")
+ }
+ return o
+}
+
+func (o *ScheduledTask) SetAdjustment(v *int) *ScheduledTask {
+ if o.Adjustment = v; o.Adjustment == nil {
+ o.nullFields = append(o.nullFields, "Adjustment")
+ }
+ return o
+}
+
+func (o *ScheduledTask) SetAdjustmentPercentage(v *int) *ScheduledTask {
+ if o.AdjustmentPercentage = v; o.AdjustmentPercentage == nil {
+ o.nullFields = append(o.nullFields, "AdjustmentPercentage")
+ }
+ return o
+}
+
+// endregion
+
+// region Scaling
+
+func (o Scaling) MarshalJSON() ([]byte, error) {
+ type noMethod Scaling
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Scaling) SetUp(v []*ScalingPolicy) *Scaling {
+ if o.Up = v; o.Up == nil {
+ o.nullFields = append(o.nullFields, "Up")
+ }
+ return o
+}
+
+func (o *Scaling) SetDown(v []*ScalingPolicy) *Scaling {
+ if o.Down = v; o.Down == nil {
+ o.nullFields = append(o.nullFields, "Down")
+ }
+ return o
+}
+
+// endregion
+
+// region ScalingPolicy
+
+func (o ScalingPolicy) MarshalJSON() ([]byte, error) {
+ type noMethod ScalingPolicy
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *ScalingPolicy) SetPolicyName(v *string) *ScalingPolicy {
+ if o.PolicyName = v; o.PolicyName == nil {
+ o.nullFields = append(o.nullFields, "PolicyName")
+ }
+ return o
+}
+
+func (o *ScalingPolicy) SetMetricName(v *string) *ScalingPolicy {
+ if o.MetricName = v; o.MetricName == nil {
+ o.nullFields = append(o.nullFields, "MetricName")
+ }
+ return o
+}
+
+func (o *ScalingPolicy) SetStatistic(v *string) *ScalingPolicy {
+ if o.Statistic = v; o.Statistic == nil {
+ o.nullFields = append(o.nullFields, "Statistic")
+ }
+ return o
+}
+
+func (o *ScalingPolicy) SetUnit(v *string) *ScalingPolicy {
+ if o.Unit = v; o.Unit == nil {
+ o.nullFields = append(o.nullFields, "Unit")
+ }
+ return o
+}
+
+func (o *ScalingPolicy) SetThreshold(v *float64) *ScalingPolicy {
+ if o.Threshold = v; o.Threshold == nil {
+ o.nullFields = append(o.nullFields, "Threshold")
+ }
+ return o
+}
+
+func (o *ScalingPolicy) SetAdjustment(v *int) *ScalingPolicy {
+ if o.Adjustment = v; o.Adjustment == nil {
+ o.nullFields = append(o.nullFields, "Adjustment")
+ }
+ return o
+}
+
+func (o *ScalingPolicy) SetMinTargetCapacity(v *int) *ScalingPolicy {
+ if o.MinTargetCapacity = v; o.MinTargetCapacity == nil {
+ o.nullFields = append(o.nullFields, "MinTargetCapacity")
+ }
+ return o
+}
+
+func (o *ScalingPolicy) SetMaxTargetCapacity(v *int) *ScalingPolicy {
+ if o.MaxTargetCapacity = v; o.MaxTargetCapacity == nil {
+ o.nullFields = append(o.nullFields, "MaxTargetCapacity")
+ }
+ return o
+}
+
+func (o *ScalingPolicy) SetNamespace(v *string) *ScalingPolicy {
+ if o.Namespace = v; o.Namespace == nil {
+ o.nullFields = append(o.nullFields, "Namespace")
+ }
+ return o
+}
+
+func (o *ScalingPolicy) SetEvaluationPeriods(v *int) *ScalingPolicy {
+ if o.EvaluationPeriods = v; o.EvaluationPeriods == nil {
+ o.nullFields = append(o.nullFields, "EvaluationPeriods")
+ }
+ return o
+}
+
+func (o *ScalingPolicy) SetPeriod(v *int) *ScalingPolicy {
+ if o.Period = v; o.Period == nil {
+ o.nullFields = append(o.nullFields, "Period")
+ }
+ return o
+}
+
+func (o *ScalingPolicy) SetCooldown(v *int) *ScalingPolicy {
+ if o.Cooldown = v; o.Cooldown == nil {
+ o.nullFields = append(o.nullFields, "Cooldown")
+ }
+ return o
+}
+
+func (o *ScalingPolicy) SetOperator(v *string) *ScalingPolicy {
+ if o.Operator = v; o.Operator == nil {
+ o.nullFields = append(o.nullFields, "Operator")
+ }
+ return o
+}
+
+func (o *ScalingPolicy) SetDimensions(v []*Dimension) *ScalingPolicy {
+ if o.Dimensions = v; o.Dimensions == nil {
+ o.nullFields = append(o.nullFields, "Dimensions")
+ }
+ return o
+}
+
+func (o *ScalingPolicy) SetAction(v *Action) *ScalingPolicy {
+ if o.Action = v; o.Action == nil {
+ o.nullFields = append(o.nullFields, "Action")
+ }
+ return o
+}
+
+// endregion
+
+// region Action
+
+func (o Action) MarshalJSON() ([]byte, error) {
+ type noMethod Action
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Action) SetType(v *string) *Action {
+ if o.Type = v; o.Type == nil {
+ o.nullFields = append(o.nullFields, "Type")
+ }
+ return o
+}
+
+func (o *Action) SetAdjustment(v *string) *Action {
+ if o.Adjustment = v; o.Adjustment == nil {
+ o.nullFields = append(o.nullFields, "Adjustment")
+ }
+ return o
+}
+
+func (o *Action) SetMinTargetCapacity(v *string) *Action {
+ if o.MinTargetCapacity = v; o.MinTargetCapacity == nil {
+ o.nullFields = append(o.nullFields, "MinTargetCapacity")
+ }
+ return o
+}
+
+func (o *Action) SetMaxTargetCapacity(v *string) *Action {
+ if o.MaxTargetCapacity = v; o.MaxTargetCapacity == nil {
+ o.nullFields = append(o.nullFields, "MaxTargetCapacity")
+ }
+ return o
+}
+
+func (o *Action) SetMaximum(v *string) *Action {
+ if o.Maximum = v; o.Maximum == nil {
+ o.nullFields = append(o.nullFields, "Maximum")
+ }
+ return o
+}
+
+func (o *Action) SetMinimum(v *string) *Action {
+ if o.Minimum = v; o.Minimum == nil {
+ o.nullFields = append(o.nullFields, "Minimum")
+ }
+ return o
+}
+
+func (o *Action) SetTarget(v *string) *Action {
+ if o.Target = v; o.Target == nil {
+ o.nullFields = append(o.nullFields, "Target")
+ }
+ return o
+}
+
+// endregion
+
+// region Dimension
+
+func (o Dimension) MarshalJSON() ([]byte, error) {
+ type noMethod Dimension
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Dimension) SetName(v *string) *Dimension {
+ if o.Name = v; o.Name == nil {
+ o.nullFields = append(o.nullFields, "Name")
+ }
+ return o
+}
+
+func (o *Dimension) SetValue(v *string) *Dimension {
+ if o.Value = v; o.Value == nil {
+ o.nullFields = append(o.nullFields, "Value")
+ }
+ return o
+}
+
+// endregion
+
+// region Strategy
+
+func (o Strategy) MarshalJSON() ([]byte, error) {
+ type noMethod Strategy
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Strategy) SetLowPriorityPercentage(v *int) *Strategy {
+ if o.LowPriorityPercentage = v; o.LowPriorityPercentage == nil {
+ o.nullFields = append(o.nullFields, "LowPriorityPercentage")
+ }
+ return o
+}
+
+func (o *Strategy) SetOnDemandCount(v *int) *Strategy {
+ if o.OnDemandCount = v; o.OnDemandCount == nil {
+ o.nullFields = append(o.nullFields, "OnDemandCount")
+ }
+ return o
+}
+
+func (o *Strategy) SetDrainingTimeout(v *int) *Strategy {
+ if o.DrainingTimeout = v; o.DrainingTimeout == nil {
+ o.nullFields = append(o.nullFields, "DrainingTimeout")
+ }
+ return o
+}
+
+func (o *Strategy) SetSignals(v []*Signal) *Strategy {
+ if o.Signals = v; o.Signals == nil {
+ o.nullFields = append(o.nullFields, "Signals")
+ }
+ return o
+}
+
+// endregion
+
+// region Signal
+
+func (o Signal) MarshalJSON() ([]byte, error) {
+ type noMethod Signal
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Signal) SetName(v *string) *Signal {
+ if o.Name = v; o.Name == nil {
+ o.nullFields = append(o.nullFields, "Name")
+ }
+ return o
+}
+
+func (o *Signal) SetTimeout(v *int) *Signal {
+ if o.Timeout = v; o.Timeout == nil {
+ o.nullFields = append(o.nullFields, "Timeout")
+ }
+ return o
+}
+
+// endregion
+
+// region Capacity
+
+func (o Capacity) MarshalJSON() ([]byte, error) {
+ type noMethod Capacity
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Capacity) SetMinimum(v *int) *Capacity {
+ if o.Minimum = v; o.Minimum == nil {
+ o.nullFields = append(o.nullFields, "Minimum")
+ }
+ return o
+}
+
+func (o *Capacity) SetMaximum(v *int) *Capacity {
+ if o.Maximum = v; o.Maximum == nil {
+ o.nullFields = append(o.nullFields, "Maximum")
+ }
+ return o
+}
+
+func (o *Capacity) SetTarget(v *int) *Capacity {
+ if o.Target = v; o.Target == nil {
+ o.nullFields = append(o.nullFields, "Target")
+ }
+ return o
+}
+
+// endregion
+
+// region Compute
+
+func (o Compute) MarshalJSON() ([]byte, error) {
+ type noMethod Compute
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Compute) SetRegion(v *string) *Compute {
+ if o.Region = v; o.Region == nil {
+ o.nullFields = append(o.nullFields, "Region")
+ }
+ return o
+}
+
+func (o *Compute) SetProduct(v *string) *Compute {
+ if o.Product = v; o.Product == nil {
+ o.nullFields = append(o.nullFields, "Product")
+ }
+ return o
+}
+
+func (o *Compute) SetResourceGroupName(v *string) *Compute {
+ if o.ResourceGroupName = v; o.ResourceGroupName == nil {
+ o.nullFields = append(o.nullFields, "ResourceGroupName")
+ }
+ return o
+}
+
+func (o *Compute) SetVMSizes(v *VMSizes) *Compute {
+ if o.VMSizes = v; o.VMSizes == nil {
+ o.nullFields = append(o.nullFields, "VMSizes")
+ }
+ return o
+}
+
+func (o *Compute) SetLaunchSpecification(v *LaunchSpecification) *Compute {
+ if o.LaunchSpecification = v; o.LaunchSpecification == nil {
+ o.nullFields = append(o.nullFields, "LaunchSpecification")
+ }
+ return o
+}
+
+func (o *Compute) SetHealth(v *Health) *Compute {
+ if o.Health = v; o.Health == nil {
+ o.nullFields = append(o.nullFields, "Health")
+ }
+ return o
+}
+
+// endregion
+
+// region VMSize
+
+func (o VMSizes) MarshalJSON() ([]byte, error) {
+ type noMethod VMSizes
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *VMSizes) SetOnDemand(v []string) *VMSizes {
+ if o.OnDemand = v; o.OnDemand == nil {
+ o.nullFields = append(o.nullFields, "OnDemand")
+ }
+ return o
+}
+
+func (o *VMSizes) SetLowPriority(v []string) *VMSizes {
+ if o.LowPriority = v; o.LowPriority == nil {
+ o.nullFields = append(o.nullFields, "LowPriority")
+ }
+ return o
+}
+
+// endregion
+
+// region LaunchSpecification
+
+func (o LaunchSpecification) MarshalJSON() ([]byte, error) {
+ type noMethod LaunchSpecification
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *LaunchSpecification) SetLoadBalancersConfig(v *LoadBalancersConfig) *LaunchSpecification {
+ if o.LoadBalancersConfig = v; o.LoadBalancersConfig == nil {
+ o.nullFields = append(o.nullFields, "LoadBalancersConfig")
+ }
+ return o
+}
+
+func (o *LaunchSpecification) SetImage(v *Image) *LaunchSpecification {
+ if o.Image = v; o.Image == nil {
+ o.nullFields = append(o.nullFields, "Image")
+ }
+ return o
+}
+
+func (o *LaunchSpecification) SetUserData(v *string) *LaunchSpecification {
+ if o.UserData = v; o.UserData == nil {
+ o.nullFields = append(o.nullFields, "UserData")
+ }
+ return o
+}
+
+func (o *LaunchSpecification) SetCustomData(v *string) *LaunchSpecification {
+ if o.CustomData = v; o.CustomData == nil {
+ o.nullFields = append(o.nullFields, "CustomData")
+ }
+ return o
+}
+
+func (o *LaunchSpecification) SetManagedServiceIdentities(v []*ManagedServiceIdentity) *LaunchSpecification {
+ if o.ManagedServiceIdentities = v; o.ManagedServiceIdentities == nil {
+ o.nullFields = append(o.nullFields, "ManagedServiceIdentities")
+ }
+ return o
+}
+
+// SetShutdownScript sets the shutdown script used when draining instances
+func (o *LaunchSpecification) SetShutdownScript(v *string) *LaunchSpecification {
+ if o.ShutdownScript = v; o.ShutdownScript == nil {
+ o.nullFields = append(o.nullFields, "ShutdownScript")
+ }
+ return o
+}
+
+func (o *LaunchSpecification) SetStorage(v *Storage) *LaunchSpecification {
+ if o.Storage = v; o.Storage == nil {
+ o.nullFields = append(o.nullFields, "Storage")
+ }
+ return o
+}
+
+func (o *LaunchSpecification) SetNetwork(v *Network) *LaunchSpecification {
+ if o.Network = v; o.Network == nil {
+ o.nullFields = append(o.nullFields, "Network")
+ }
+ return o
+}
+
+func (o *LaunchSpecification) SetLogin(v *Login) *LaunchSpecification {
+ if o.Login = v; o.Login == nil {
+ o.nullFields = append(o.nullFields, "Login")
+ }
+ return o
+}
+
+// endregion
+
+// region LoadBalancersConfig
+
+func (o LoadBalancersConfig) MarshalJSON() ([]byte, error) {
+ type noMethod LoadBalancersConfig
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *LoadBalancersConfig) SetLoadBalancers(v []*LoadBalancer) *LoadBalancersConfig {
+ if o.LoadBalancers = v; o.LoadBalancers == nil {
+ o.nullFields = append(o.nullFields, "LoadBalancers")
+ }
+ return o
+}
+
+// endregion
+
+// region LoadBalancer
+
+func (o LoadBalancer) MarshalJSON() ([]byte, error) {
+ type noMethod LoadBalancer
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *LoadBalancer) SetType(v *string) *LoadBalancer {
+ if o.Type = v; o.Type == nil {
+ o.nullFields = append(o.nullFields, "Type")
+ }
+ return o
+}
+
+func (o *LoadBalancer) SetBalancerId(v *string) *LoadBalancer {
+ if o.BalancerID = v; o.BalancerID == nil {
+ o.nullFields = append(o.nullFields, "BalancerID")
+ }
+ return o
+}
+
+func (o *LoadBalancer) SetTargetSetId(v *string) *LoadBalancer {
+ if o.TargetSetID = v; o.TargetSetID == nil {
+ o.nullFields = append(o.nullFields, "TargetSetID")
+ }
+ return o
+}
+
+func (o *LoadBalancer) SetAutoWeight(v *bool) *LoadBalancer {
+ if o.AutoWeight = v; o.AutoWeight == nil {
+ o.nullFields = append(o.nullFields, "AutoWeight")
+ }
+ return o
+}
+
+// endregion
+
+// region ManagedServiceIdentity
+
+func (o ManagedServiceIdentity) MarshalJSON() ([]byte, error) {
+ type noMethod ManagedServiceIdentity
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *ManagedServiceIdentity) SetResourceGroupName(v *string) *ManagedServiceIdentity {
+ if o.ResourceGroupName = v; o.ResourceGroupName == nil {
+ o.nullFields = append(o.nullFields, "ResourceGroupName")
+ }
+ return o
+}
+
+func (o *ManagedServiceIdentity) SetName(v *string) *ManagedServiceIdentity {
+ if o.Name = v; o.Name == nil {
+ o.nullFields = append(o.nullFields, "Name")
+ }
+ return o
+}
+
+// endregion
+
+// region Image
+
+func (o Image) MarshalJSON() ([]byte, error) {
+ type noMethod Image
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Image) SetMarketPlaceImage(v *MarketPlaceImage) *Image {
+ if o.MarketPlace = v; o.MarketPlace == nil {
+ o.nullFields = append(o.nullFields, "MarketPlace")
+ }
+ return o
+}
+
+func (o *Image) SetCustom(v *CustomImage) *Image {
+ if o.Custom = v; o.Custom == nil {
+ o.nullFields = append(o.nullFields, "Custom")
+ }
+ return o
+}
+
+// endregion
+
+// region MarketPlaceImage
+
+func (o MarketPlaceImage) MarshalJSON() ([]byte, error) {
+ type noMethod MarketPlaceImage
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *MarketPlaceImage) SetPublisher(v *string) *MarketPlaceImage {
+ if o.Publisher = v; o.Publisher == nil {
+ o.nullFields = append(o.nullFields, "Publisher")
+ }
+ return o
+}
+
+func (o *MarketPlaceImage) SetOffer(v *string) *MarketPlaceImage {
+ if o.Offer = v; o.Offer == nil {
+ o.nullFields = append(o.nullFields, "Offer")
+ }
+ return o
+}
+
+func (o *MarketPlaceImage) SetSKU(v *string) *MarketPlaceImage {
+ if o.SKU = v; o.SKU == nil {
+ o.nullFields = append(o.nullFields, "SKU")
+ }
+ return o
+}
+
+// endregion
+
+// region CustomImage
+
+func (o CustomImage) MarshalJSON() ([]byte, error) {
+ type noMethod CustomImage
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *CustomImage) SetResourceGroupName(v *string) *CustomImage {
+ if o.ResourceGroupName = v; o.ResourceGroupName == nil {
+ o.nullFields = append(o.nullFields, "ResourceGroupName")
+ }
+ return o
+}
+
+func (o *CustomImage) SetImageName(v *string) *CustomImage {
+ if o.ImageName = v; o.ImageName == nil {
+ o.nullFields = append(o.nullFields, "ImageName")
+ }
+ return o
+}
+
+// endregion
+
+// region ResourceFile
+
+func (o ResourceFile) MarshalJSON() ([]byte, error) {
+ type noMethod ResourceFile
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *ResourceFile) SetURL(v *string) *ResourceFile {
+ if o.URL = v; o.URL == nil {
+ o.nullFields = append(o.nullFields, "URL")
+ }
+ return o
+}
+
+func (o *ResourceFile) SetTargetPath(v *string) *ResourceFile {
+ if o.TargetPath = v; o.TargetPath == nil {
+ o.nullFields = append(o.nullFields, "TargetPath")
+ }
+ return o
+}
+
+// endregion
+
+// region Storage
+
+func (o Storage) MarshalJSON() ([]byte, error) {
+ type noMethod Storage
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Storage) SetAccountName(v *string) *Storage {
+ if o.AccountName = v; o.AccountName == nil {
+ o.nullFields = append(o.nullFields, "AccountName")
+ }
+ return o
+}
+
+// endregion
+
+// region Network
+
+func (o Network) MarshalJSON() ([]byte, error) {
+ type noMethod Network
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Network) SetVirtualNetworkName(v *string) *Network {
+ if o.VirtualNetworkName = v; o.VirtualNetworkName == nil {
+ o.nullFields = append(o.nullFields, "VirtualNetworkName")
+ }
+ return o
+}
+
+func (o *Network) SetSubnetName(v *string) *Network {
+ if o.SubnetName = v; o.SubnetName == nil {
+ o.nullFields = append(o.nullFields, "SubnetName")
+ }
+ return o
+}
+
+func (o *Network) SetResourceGroupName(v *string) *Network {
+ if o.ResourceGroupName = v; o.ResourceGroupName == nil {
+ o.nullFields = append(o.nullFields, "ResourceGroupName")
+ }
+ return o
+}
+
+func (o *Network) SetAssignPublicIP(v *bool) *Network {
+ if o.AssignPublicIP = v; o.AssignPublicIP == nil {
+ o.nullFields = append(o.nullFields, "AssignPublicIP")
+ }
+ return o
+}
+
+// SetAdditionalIPConfigs sets the additional IP configurations
+func (o *Network) SetAdditionalIPConfigs(v []*AdditionalIPConfigs) *Network {
+ if o.AdditionalIPConfigs = v; o.AdditionalIPConfigs == nil {
+ o.nullFields = append(o.nullFields, "AdditionalIPConfigs")
+ }
+ return o
+}
+
+// endregion
+
+// region Login
+
+func (o Login) MarshalJSON() ([]byte, error) {
+ type noMethod Login
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Login) SetUserName(v *string) *Login {
+ if o.UserName = v; o.UserName == nil {
+ o.nullFields = append(o.nullFields, "UserName")
+ }
+ return o
+}
+
+func (o *Login) SetSSHPublicKey(v *string) *Login {
+ if o.SSHPublicKey = v; o.SSHPublicKey == nil {
+ o.nullFields = append(o.nullFields, "SSHPublicKey")
+ }
+ return o
+}
+
+func (o *Login) SetPassword(v *string) *Login {
+ if o.Password = v; o.Password == nil {
+ o.nullFields = append(o.nullFields, "Password")
+ }
+ return o
+}
+
+// endregion
+
+// region AdditionalIPConfigs
+
+func (o AdditionalIPConfigs) MarshalJSON() ([]byte, error) {
+ type noMethod AdditionalIPConfigs
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+// SetName sets the name
+func (o *AdditionalIPConfigs) SetName(v *string) *AdditionalIPConfigs {
+ if o.Name = v; o.Name == nil {
+ o.nullFields = append(o.nullFields, "Name")
+ }
+ return o
+}
+
+// SetPrivateIPAddressVersion sets the ip address version
+func (o *AdditionalIPConfigs) SetPrivateIPAddressVersion(v *string) *AdditionalIPConfigs {
+ if o.PrivateIPAddressVersion = v; o.PrivateIPAddressVersion == nil {
+ o.nullFields = append(o.nullFields, "PrivateIPAddressVersion")
+ }
+ return o
+}
+
+// endregion
+
+// region Health
+
+func (o Health) MarshalJSON() ([]byte, error) {
+ type noMethod Health
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Health) SetHealthCheckType(v *string) *Health {
+ if o.HealthCheckType = v; o.HealthCheckType == nil {
+ o.nullFields = append(o.nullFields, "HealthCheckType")
+ }
+ return o
+}
+
+func (o *Health) SetAutoHealing(v *bool) *Health {
+ if o.AutoHealing = v; o.AutoHealing == nil {
+ o.nullFields = append(o.nullFields, "AutoHealing")
+ }
+ return o
+}
+
+func (o *Health) SetGracePeriod(v *int) *Health {
+ if o.GracePeriod = v; o.GracePeriod == nil {
+ o.nullFields = append(o.nullFields, "GracePeriod")
+ }
+ return o
+}
+
+// endregion
+
+// region NodeSignal
+
+func (o NodeSignal) MarshalJSON() ([]byte, error) {
+ type noMethod NodeSignal
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *NodeSignal) SetNodeID(v *string) *NodeSignal {
+ if o.NodeID = v; o.NodeID == nil {
+ o.nullFields = append(o.nullFields, "NodeID")
+ }
+ return o
+}
+
+func (o *NodeSignal) SetPoolID(v *string) *NodeSignal {
+ if o.PoolID = v; o.PoolID == nil {
+ o.nullFields = append(o.nullFields, "PoolID")
+ }
+ return o
+}
+
+func (o *NodeSignal) SetSignal(v *string) *NodeSignal {
+ if o.Signal = v; o.Signal == nil {
+ o.nullFields = append(o.nullFields, "Signal")
+ }
+ return o
+}
+
+// endregion
+
+// region Roll Group
+
+func (o RollStatus) MarshalJSON() ([]byte, error) {
+ type noMethod RollStatus
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *RollStatus) SetGroupID(v *string) *RollStatus {
+ if o.GroupID = v; o.GroupID == nil {
+ o.nullFields = append(o.nullFields, "GroupID")
+ }
+ return o
+}
+
+func (o *RollStatus) SetRollID(v *string) *RollStatus {
+ if o.RollID = v; o.RollID == nil {
+ o.nullFields = append(o.nullFields, "RollID")
+ }
+ return o
+}
+
+func (o *RollStatus) SetStatus(v *string) *RollStatus {
+ if o.Status = v; o.Status == nil {
+ o.nullFields = append(o.nullFields, "Status")
+ }
+ return o
+}
+
+func (o *RollStatus) SetProgress(v *RollProgress) *RollStatus {
+ if o.Progress = v; o.Progress == nil {
+ o.nullFields = append(o.nullFields, "Progress")
+ }
+ return o
+}
+
+// endregion
+
+// region RollProgress
+
+func (o RollProgress) MarshalJSON() ([]byte, error) {
+ type noMethod RollProgress
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *RollProgress) SetUnit(v *string) *RollProgress {
+ if o.Unit = v; o.Unit == nil {
+ o.nullFields = append(o.nullFields, "Unit")
+ }
+ return o
+}
+
+func (o *RollProgress) SetValue(v *int) *RollProgress {
+ if o.Value = v; o.Value == nil {
+ o.nullFields = append(o.nullFields, "Value")
+ }
+ return o
+}
+
+// endregion
+
+// region Roll
+
+func (o Roll) MarshalJSON() ([]byte, error) {
+ type noMethod Roll
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Roll) SetStatus(v *string) *Roll {
+ if o.Status = v; o.Status == nil {
+ o.nullFields = append(o.nullFields, "Status")
+ }
+ return o
+}
+
+// endregion
+
+// region Tasks
+
+func (o Task) MarshalJSON() ([]byte, error) {
+ type noMethod Task
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Task) SetId(v *string) *Task {
+ if o.ID = v; o.ID == nil {
+ o.nullFields = append(o.nullFields, "ID")
+ }
+ return o
+}
+
+func (o *Task) SetName(v *string) *Task {
+ if o.Name = v; o.Name == nil {
+ o.nullFields = append(o.nullFields, "Name")
+ }
+ return o
+}
+
+func (o *Task) SetDescription(v *string) *Task {
+ if o.Description = v; o.Description == nil {
+ o.nullFields = append(o.nullFields, "Description")
+ }
+ return o
+}
+
+func (o *Task) SetState(v *string) *Task {
+ if o.State = v; o.State == nil {
+ o.nullFields = append(o.nullFields, "State")
+ }
+ return o
+}
+
+func (o *Task) SetPolicies(v []*TaskPolicy) *Task {
+ if o.Policies = v; o.Policies == nil {
+ o.nullFields = append(o.nullFields, "Policies")
+ }
+ return o
+}
+
+func (o *Task) SetInstances(v []*TaskInstance) *Task {
+ if o.Instances = v; o.Instances == nil {
+ o.nullFields = append(o.nullFields, "Instances")
+ }
+ return o
+}
+
+// endregion
+
+// region TaskPolicy
+
+func (o TaskPolicy) MarshalJSON() ([]byte, error) {
+ type noMethod TaskPolicy
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *TaskPolicy) SetCron(v *string) *TaskPolicy {
+ if o.Cron = v; o.Cron == nil {
+ o.nullFields = append(o.nullFields, "Cron")
+ }
+ return o
+}
+
+func (o *TaskPolicy) SetAction(v *string) *TaskPolicy {
+ if o.Action = v; o.Action == nil {
+ o.nullFields = append(o.nullFields, "Action")
+ }
+ return o
+}
+
+// endregion
+
+// region TaskInstance
+
+func (o TaskInstance) MarshalJSON() ([]byte, error) {
+ type noMethod TaskInstance
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *TaskInstance) SetVMName(v *string) *TaskInstance {
+ if o.VMName = v; o.VMName == nil {
+ o.nullFields = append(o.nullFields, "VMName")
+ }
+ return o
+}
+
+func (o *TaskInstance) SetResourceGroupName(v *string) *TaskInstance {
+ if o.ResourceGroupName = v; o.ResourceGroupName == nil {
+ o.nullFields = append(o.nullFields, "ResourceGroupName")
+ }
+ return o
+}
+
+// endregion
diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/service.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/service.go
new file mode 100644
index 000000000000..f15a3b0ebba9
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/service.go
@@ -0,0 +1,51 @@
+package azure
+
+import (
+ "context"
+
+ "github.com/spotinst/spotinst-sdk-go/spotinst"
+ "github.com/spotinst/spotinst-sdk-go/spotinst/client"
+ "github.com/spotinst/spotinst-sdk-go/spotinst/session"
+)
+
+// Service provides the API operation methods for making requests to endpoints
+// of the Spotinst API. See this package's package overview docs for details on
+// the service.
+type Service interface {
+ List(context.Context, *ListGroupsInput) (*ListGroupsOutput, error)
+ Create(context.Context, *CreateGroupInput) (*CreateGroupOutput, error)
+ Read(context.Context, *ReadGroupInput) (*ReadGroupOutput, error)
+ Update(context.Context, *UpdateGroupInput) (*UpdateGroupOutput, error)
+ Delete(context.Context, *DeleteGroupInput) (*DeleteGroupOutput, error)
+ Status(context.Context, *StatusGroupInput) (*StatusGroupOutput, error)
+ Detach(context.Context, *DetachGroupInput) (*DetachGroupOutput, error)
+ Scale(context.Context, *ScaleGroupInput) (*ScaleGroupOutput, error)
+ CreateNodeSignal(context.Context, *NodeSignalInput) (*NodeSignalOutput, error)
+
+ Roll(context.Context, *RollGroupInput) (*RollGroupOutput, error)
+ GetRollStatus(context.Context, *RollStatusInput) (*RollStatusOutput, error)
+ ListRollStatus(context.Context, *ListRollStatusInput) (*ListRollStatusOutput, error)
+ StopRoll(context.Context, *StopRollInput) (*StopRollOutput, error)
+
+ ListTasks(context.Context, *ListTasksInput) (*ListTasksOutput, error)
+ CreateTask(context.Context, *CreateTaskInput) (*CreateTaskOutput, error)
+ ReadTask(context.Context, *ReadTaskInput) (*ReadTaskOutput, error)
+ UpdateTask(context.Context, *UpdateTaskInput) (*UpdateTaskOutput, error)
+ DeleteTask(context.Context, *DeleteTaskInput) (*DeleteTaskOutput, error)
+}
+
+type ServiceOp struct {
+ Client *client.Client
+}
+
+var _ Service = &ServiceOp{}
+
+func New(sess *session.Session, cfgs ...*spotinst.Config) *ServiceOp {
+ cfg := &spotinst.Config{}
+ cfg.Merge(sess.Config)
+ cfg.Merge(cfgs...)
+
+ return &ServiceOp{
+ Client: client.New(sess.Config),
+ }
+}
diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/tag.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/tag.go
new file mode 100644
index 000000000000..b70da049949d
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/tag.go
@@ -0,0 +1,31 @@
+package azure
+
+import "github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil"
+
+type Tag struct {
+ Key *string `json:"tagKey,omitempty"`
+ Value *string `json:"tagValue,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+func (o Tag) MarshalJSON() ([]byte, error) {
+ type noMethod Tag
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Tag) SetKey(v *string) *Tag {
+ if o.Key = v; o.Key == nil {
+ o.nullFields = append(o.nullFields, "Key")
+ }
+ return o
+}
+
+func (o *Tag) SetValue(v *string) *Tag {
+ if o.Value = v; o.Value == nil {
+ o.nullFields = append(o.nullFields, "Value")
+ }
+ return o
+}
diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/v3/azure.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/v3/azure.go
new file mode 100644
index 000000000000..8f53835ca377
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/v3/azure.go
@@ -0,0 +1,997 @@
+package v3
+
+import (
+ "context"
+ "encoding/json"
+ "io/ioutil"
+ "net/http"
+ "time"
+
+ "github.com/spotinst/spotinst-sdk-go/spotinst"
+ "github.com/spotinst/spotinst-sdk-go/spotinst/client"
+ "github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil"
+ "github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates"
+)
+
+type Group struct {
+ ID *string `json:"id,omitempty"`
+ Name *string `json:"name,omitempty"`
+ ResourceGroupName *string `json:"resourceGroupName,omitempty"`
+ Region *string `json:"region,omitempty"`
+ Capacity *Capacity `json:"capacity,omitempty"`
+ Compute *Compute `json:"compute,omitempty"`
+ Strategy *Strategy `json:"strategy,omitempty"`
+
+ // Read-only fields.
+ CreatedAt *time.Time `json:"createdAt,omitempty"`
+ UpdatedAt *time.Time `json:"updatedAt,omitempty"`
+
+ // forceSendFields is a list of field names (e.g. "Keys") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ forceSendFields []string
+
+ // nullFields is a list of field names (e.g. "Keys") to include in API
+ // requests with the JSON null value. By default, fields with empty
+ // values are omitted from API requests. However, any field with an
+ // empty value appearing in NullFields will be sent to the server as
+ // null. It is an error if a field in this list has a non-empty value.
+ // This may be used to include null fields in Patch requests.
+ nullFields []string
+}
+
+type Strategy struct {
+ OnDemandCount *int `json:"onDemandCount,omitempty"`
+ DrainingTimeout *int `json:"drainingTimeout,omitempty"`
+ SpotPercentage *int `json:"spotPercentage,omitempty"`
+ FallbackToOnDemand *bool `json:"fallbackToOd,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Capacity struct {
+ Minimum *int `json:"minimum,omitempty"`
+ Maximum *int `json:"maximum,omitempty"`
+ Target *int `json:"target,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Compute struct {
+ VMSizes *VMSizes `json:"vmSizes,omitempty"`
+ OS *string `json:"os,omitempty"`
+ LaunchSpecification *LaunchSpecification `json:"launchSpecification,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type VMSizes struct {
+ OnDemandSizes []string `json:"odSizes,omitempty"`
+ SpotSizes []string `json:"spotSizes,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type LaunchSpecification struct {
+ Image *Image `json:"image,omitempty"`
+ Network *Network `json:"network,omitempty"`
+ Login *Login `json:"login,omitempty"`
+ CustomData *string `json:"customData,omitempty"`
+ ManagedServiceIdentities []*ManagedServiceIdentity `json:"managedServiceIdentities,omitempty"`
+ Tags []*Tags `json:"tags,omitempty"`
+ LoadBalancersConfig *LoadBalancersConfig `json:"loadBalancersConfig,omitempty"`
+ ShutdownScript *string `json:"shutdownScript,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type LoadBalancersConfig struct {
+ LoadBalancers []*LoadBalancer `json:"loadBalancers,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type LoadBalancer struct {
+ Type *string `json:"type,omitempty"`
+ ResourceGroupName *string `json:"resourceGroupName,omitempty"`
+ Name *string `json:"name,omitempty"`
+ SKU *string `json:"sku,omitempty"`
+ BackendPoolNames []string `json:"backendPoolNames,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Image struct {
+ MarketPlace *MarketPlaceImage `json:"marketplace,omitempty"`
+ Custom *CustomImage `json:"custom,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type MarketPlaceImage struct {
+ Publisher *string `json:"publisher,omitempty"`
+ Offer *string `json:"offer,omitempty"`
+ SKU *string `json:"sku,omitempty"`
+ Version *string `json:"version,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type CustomImage struct {
+ ResourceGroupName *string `json:"resourceGroupName,omitempty"`
+ Name *string `json:"name,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Network struct {
+ VirtualNetworkName *string `json:"virtualNetworkName,omitempty"`
+ ResourceGroupName *string `json:"resourceGroupName,omitempty"`
+ NetworkInterfaces []*NetworkInterface `json:"networkInterfaces,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type NetworkInterface struct {
+ SubnetName *string `json:"subnetName,omitempty"`
+ AssignPublicIP *bool `json:"assignPublicIp,omitempty"`
+ IsPrimary *bool `json:"isPrimary,omitempty"`
+ AdditionalIPConfigs []*AdditionalIPConfig `json:"additionalIpConfigurations,omitempty"`
+ ApplicationSecurityGroups []*ApplicationSecurityGroup `json:"applicationSecurityGroups,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type AdditionalIPConfig struct {
+ Name *string `json:"name,omitempty"`
+ PrivateIPAddressVersion *string `json:"privateIpAddressVersion,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Login struct {
+ UserName *string `json:"userName,omitempty"`
+ SSHPublicKey *string `json:"sshPublicKey,omitempty"`
+ Password *string `json:"password,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type ApplicationSecurityGroup struct {
+ Name *string `json:"name,omitempty"`
+ ResourceGroupName *string `json:"resourceGroupName,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type ManagedServiceIdentity struct {
+ ResourceGroupName *string `json:"resourceGroupName,omitempty"`
+ Name *string `json:"name,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Tags struct {
+ TagKey *string `json:"tagKey,omitempty"`
+ TagValue *string `json:"tagValue,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type CreateGroupInput struct {
+ Group *Group `json:"group,omitempty"`
+}
+
+type CreateGroupOutput struct {
+ Group *Group `json:"group,omitempty"`
+}
+
+type ReadGroupInput struct {
+ GroupID *string `json:"groupId,omitempty"`
+}
+
+type ReadGroupOutput struct {
+ Group *Group `json:"group,omitempty"`
+}
+
+type UpdateGroupInput struct {
+ Group *Group `json:"group,omitempty"`
+}
+
+type UpdateGroupOutput struct {
+ Group *Group `json:"group,omitempty"`
+}
+
+type DeleteGroupInput struct {
+ GroupID *string `json:"groupId,omitempty"`
+}
+
+type DeleteGroupOutput struct{}
+
+type ListGroupsInput struct{}
+
+type ListGroupsOutput struct {
+ Groups []*Group `json:"groups,omitempty"`
+}
+
+// region Unmarshallers
+
+func groupFromJSON(in []byte) (*Group, error) {
+ b := new(Group)
+ if err := json.Unmarshal(in, b); err != nil {
+ return nil, err
+ }
+ return b, nil
+}
+
+func groupsFromJSON(in []byte) ([]*Group, error) {
+ var rw client.Response
+ if err := json.Unmarshal(in, &rw); err != nil {
+ return nil, err
+ }
+ out := make([]*Group, len(rw.Response.Items))
+ if len(out) == 0 {
+ return out, nil
+ }
+ for i, rb := range rw.Response.Items {
+ b, err := groupFromJSON(rb)
+ if err != nil {
+ return nil, err
+ }
+ out[i] = b
+ }
+ return out, nil
+}
+
+func groupsFromHttpResponse(resp *http.Response) ([]*Group, error) {
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+ return groupsFromJSON(body)
+}
+
+// endregion
+
+// region API requests
+
+func (s *ServiceOp) List(ctx context.Context, input *ListGroupsInput) (*ListGroupsOutput, error) {
+ r := client.NewRequest(http.MethodGet, "/azure/compute/group")
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ gs, err := groupsFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ return &ListGroupsOutput{Groups: gs}, nil
+}
+
+func (s *ServiceOp) Create(ctx context.Context, input *CreateGroupInput) (*CreateGroupOutput, error) {
+ r := client.NewRequest(http.MethodPost, "/azure/compute/group")
+ r.Obj = input
+
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ gs, err := groupsFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ output := new(CreateGroupOutput)
+ if len(gs) > 0 {
+ output.Group = gs[0]
+ }
+
+ return output, nil
+}
+
+func (s *ServiceOp) Read(ctx context.Context, input *ReadGroupInput) (*ReadGroupOutput, error) {
+ path, err := uritemplates.Expand("/azure/compute/group/{groupId}", uritemplates.Values{
+ "groupId": spotinst.StringValue(input.GroupID),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ r := client.NewRequest(http.MethodGet, path)
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ gs, err := groupsFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ output := new(ReadGroupOutput)
+ if len(gs) > 0 {
+ output.Group = gs[0]
+ }
+
+ return output, nil
+}
+
+func (s *ServiceOp) Update(ctx context.Context, input *UpdateGroupInput) (*UpdateGroupOutput, error) {
+ path, err := uritemplates.Expand("/azure/compute/group/{groupId}", uritemplates.Values{
+ "groupId": spotinst.StringValue(input.Group.ID),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // We do NOT need the ID anymore, so let's drop it.
+ input.Group.ID = nil
+
+ r := client.NewRequest(http.MethodPut, path)
+ r.Obj = input
+
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ gs, err := groupsFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ output := new(UpdateGroupOutput)
+ if len(gs) > 0 {
+ output.Group = gs[0]
+ }
+
+ return output, nil
+}
+
+func (s *ServiceOp) Delete(ctx context.Context, input *DeleteGroupInput) (*DeleteGroupOutput, error) {
+ path, err := uritemplates.Expand("/azure/compute/group/{groupId}", uritemplates.Values{
+ "groupId": spotinst.StringValue(input.GroupID),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ r := client.NewRequest(http.MethodDelete, path)
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ return &DeleteGroupOutput{}, nil
+}
+
+// endregion
+
+// region Group
+
+func (o Group) MarshalJSON() ([]byte, error) {
+ type noMethod Group
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Group) SetId(v *string) *Group {
+ if o.ID = v; o.ID == nil {
+ o.nullFields = append(o.nullFields, "ID")
+ }
+ return o
+}
+
+func (o *Group) SetName(v *string) *Group {
+ if o.Name = v; o.Name == nil {
+ o.nullFields = append(o.nullFields, "Name")
+ }
+ return o
+}
+
+func (o *Group) SetResourceGroupName(v *string) *Group {
+ if o.ResourceGroupName = v; o.ResourceGroupName == nil {
+ o.nullFields = append(o.nullFields, "ResourceGroupName")
+ }
+ return o
+}
+
+func (o *Group) SetCapacity(v *Capacity) *Group {
+ if o.Capacity = v; o.Capacity == nil {
+ o.nullFields = append(o.nullFields, "Capacity")
+ }
+ return o
+}
+
+func (o *Group) SetCompute(v *Compute) *Group {
+ if o.Compute = v; o.Compute == nil {
+ o.nullFields = append(o.nullFields, "Compute")
+ }
+ return o
+}
+
+func (o *Group) SetStrategy(v *Strategy) *Group {
+ if o.Strategy = v; o.Strategy == nil {
+ o.nullFields = append(o.nullFields, "Strategy")
+ }
+ return o
+}
+
+func (o *Group) SetRegion(v *string) *Group {
+ if o.Region = v; o.Region == nil {
+ o.nullFields = append(o.nullFields, "Region")
+ }
+ return o
+}
+
+// endregion
+
+// region Strategy
+
+func (o Strategy) MarshalJSON() ([]byte, error) {
+ type noMethod Strategy
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Strategy) SetOnDemandCount(v *int) *Strategy {
+ if o.OnDemandCount = v; o.OnDemandCount == nil {
+ o.nullFields = append(o.nullFields, "OnDemandCount")
+ }
+ return o
+}
+
+func (o *Strategy) SetDrainingTimeout(v *int) *Strategy {
+ if o.DrainingTimeout = v; o.DrainingTimeout == nil {
+ o.nullFields = append(o.nullFields, "DrainingTimeout")
+ }
+ return o
+}
+
+func (o *Strategy) SetSpotPercentage(v *int) *Strategy {
+ if o.SpotPercentage = v; o.SpotPercentage == nil {
+ o.nullFields = append(o.nullFields, "SpotPercentage")
+ }
+ return o
+}
+
+func (o *Strategy) SetFallbackToOnDemand(v *bool) *Strategy {
+ if o.FallbackToOnDemand = v; o.FallbackToOnDemand == nil {
+ o.nullFields = append(o.nullFields, "FallbackToOnDemand")
+ }
+ return o
+}
+
+// endregion
+
+// region Capacity
+
+func (o Capacity) MarshalJSON() ([]byte, error) {
+ type noMethod Capacity
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Capacity) SetMinimum(v *int) *Capacity {
+ if o.Minimum = v; o.Minimum == nil {
+ o.nullFields = append(o.nullFields, "Minimum")
+ }
+ return o
+}
+
+func (o *Capacity) SetMaximum(v *int) *Capacity {
+ if o.Maximum = v; o.Maximum == nil {
+ o.nullFields = append(o.nullFields, "Maximum")
+ }
+ return o
+}
+
+func (o *Capacity) SetTarget(v *int) *Capacity {
+ if o.Target = v; o.Target == nil {
+ o.nullFields = append(o.nullFields, "Target")
+ }
+ return o
+}
+
+// endregion
+
+// region Compute
+
+func (o Compute) MarshalJSON() ([]byte, error) {
+ type noMethod Compute
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Compute) SetVMSizes(v *VMSizes) *Compute {
+ if o.VMSizes = v; o.VMSizes == nil {
+ o.nullFields = append(o.nullFields, "VMSizes")
+ }
+ return o
+}
+
+func (o *Compute) SetOS(v *string) *Compute {
+ if o.OS = v; o.OS == nil {
+ o.nullFields = append(o.nullFields, "OS")
+ }
+ return o
+}
+
+func (o *Compute) SetLaunchSpecification(v *LaunchSpecification) *Compute {
+ if o.LaunchSpecification = v; o.LaunchSpecification == nil {
+ o.nullFields = append(o.nullFields, "LaunchSpecification")
+ }
+ return o
+}
+
+// endregion
+
+// region VMSize
+
+func (o VMSizes) MarshalJSON() ([]byte, error) {
+ type noMethod VMSizes
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *VMSizes) SetOnDemandSizes(v []string) *VMSizes {
+ if o.OnDemandSizes = v; o.OnDemandSizes == nil {
+ o.nullFields = append(o.nullFields, "OnDemandSizes")
+ }
+ return o
+}
+
+func (o *VMSizes) SetSpotSizes(v []string) *VMSizes {
+ if o.SpotSizes = v; o.SpotSizes == nil {
+ o.nullFields = append(o.nullFields, "SpotSizes")
+ }
+ return o
+}
+
+// endregion
+
+// region LaunchSpecification
+
+func (o LaunchSpecification) MarshalJSON() ([]byte, error) {
+ type noMethod LaunchSpecification
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *LaunchSpecification) SetImage(v *Image) *LaunchSpecification {
+ if o.Image = v; o.Image == nil {
+ o.nullFields = append(o.nullFields, "Image")
+ }
+ return o
+}
+
+func (o *LaunchSpecification) SetNetwork(v *Network) *LaunchSpecification {
+ if o.Network = v; o.Network == nil {
+ o.nullFields = append(o.nullFields, "Network")
+ }
+ return o
+}
+
+func (o *LaunchSpecification) SetLogin(v *Login) *LaunchSpecification {
+ if o.Login = v; o.Login == nil {
+ o.nullFields = append(o.nullFields, "Login")
+ }
+ return o
+}
+
+func (o *LaunchSpecification) SetCustomData(v *string) *LaunchSpecification {
+ if o.CustomData = v; o.CustomData == nil {
+ o.nullFields = append(o.nullFields, "CustomData")
+ }
+ return o
+}
+
+func (o *LaunchSpecification) SetManagedServiceIdentities(v []*ManagedServiceIdentity) *LaunchSpecification {
+ if o.ManagedServiceIdentities = v; o.ManagedServiceIdentities == nil {
+ o.nullFields = append(o.nullFields, "ManagedServiceIdentities")
+ }
+ return o
+}
+
+func (o *LaunchSpecification) SetLoadBalancersConfig(v *LoadBalancersConfig) *LaunchSpecification {
+ if o.LoadBalancersConfig = v; o.LoadBalancersConfig == nil {
+ o.nullFields = append(o.nullFields, "LoadBalancersConfig")
+ }
+ return o
+}
+
+// SetShutdownScript sets the shutdown script used when draining instances
+func (o *LaunchSpecification) SetShutdownScript(v *string) *LaunchSpecification {
+ if o.ShutdownScript = v; o.ShutdownScript == nil {
+ o.nullFields = append(o.nullFields, "ShutdownScript")
+ }
+ return o
+}
+
+func (o *LaunchSpecification) SetTags(v []*Tags) *LaunchSpecification {
+ if o.Tags = v; o.Tags == nil {
+ o.nullFields = append(o.nullFields, "Tags")
+ }
+ return o
+}
+
+// endregion
+
+// region Image
+
+func (o Image) MarshalJSON() ([]byte, error) {
+ type noMethod Image
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Image) SetMarketPlaceImage(v *MarketPlaceImage) *Image {
+ if o.MarketPlace = v; o.MarketPlace == nil {
+ o.nullFields = append(o.nullFields, "MarketPlace")
+ }
+ return o
+}
+
+func (o *Image) SetCustom(v *CustomImage) *Image {
+ if o.Custom = v; o.Custom == nil {
+ o.nullFields = append(o.nullFields, "Custom")
+ }
+ return o
+}
+
+// endregion
+
+// region MarketPlaceImage
+
+func (o MarketPlaceImage) MarshalJSON() ([]byte, error) {
+ type noMethod MarketPlaceImage
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *MarketPlaceImage) SetPublisher(v *string) *MarketPlaceImage {
+ if o.Publisher = v; o.Publisher == nil {
+ o.nullFields = append(o.nullFields, "Publisher")
+ }
+ return o
+}
+
+func (o *MarketPlaceImage) SetOffer(v *string) *MarketPlaceImage {
+ if o.Offer = v; o.Offer == nil {
+ o.nullFields = append(o.nullFields, "Offer")
+ }
+ return o
+}
+
+func (o *MarketPlaceImage) SetSKU(v *string) *MarketPlaceImage {
+ if o.SKU = v; o.SKU == nil {
+ o.nullFields = append(o.nullFields, "SKU")
+ }
+ return o
+}
+
+func (o *MarketPlaceImage) SetVersion(v *string) *MarketPlaceImage {
+ if o.Version = v; o.Version == nil {
+ o.nullFields = append(o.nullFields, "Version")
+ }
+ return o
+}
+
+// endregion
+
+// region Tags
+
+func (o Tags) MarshalJSON() ([]byte, error) {
+ type noMethod Tags
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Tags) SetTagKey(v *string) *Tags {
+ if o.TagKey = v; o.TagKey == nil {
+ o.nullFields = append(o.nullFields, "TagKey")
+ }
+ return o
+}
+
+func (o *Tags) SetTagValue(v *string) *Tags {
+ if o.TagValue = v; o.TagValue == nil {
+ o.nullFields = append(o.nullFields, "TagValue")
+ }
+ return o
+}
+
+// endregion
+
+// region CustomImage
+
+func (o CustomImage) MarshalJSON() ([]byte, error) {
+ type noMethod CustomImage
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *CustomImage) SetResourceGroupName(v *string) *CustomImage {
+ if o.ResourceGroupName = v; o.ResourceGroupName == nil {
+ o.nullFields = append(o.nullFields, "ResourceGroupName")
+ }
+ return o
+}
+
+func (o *CustomImage) SetName(v *string) *CustomImage {
+ if o.Name = v; o.Name == nil {
+ o.nullFields = append(o.nullFields, "Name")
+ }
+ return o
+}
+
+// endregion
+
+// region Network
+
+func (o Network) MarshalJSON() ([]byte, error) {
+ type noMethod Network
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Network) SetVirtualNetworkName(v *string) *Network {
+ if o.VirtualNetworkName = v; o.VirtualNetworkName == nil {
+ o.nullFields = append(o.nullFields, "VirtualNetworkName")
+ }
+ return o
+}
+
+func (o *Network) SetResourceGroupName(v *string) *Network {
+ if o.ResourceGroupName = v; o.ResourceGroupName == nil {
+ o.nullFields = append(o.nullFields, "ResourceGroupName")
+ }
+ return o
+}
+
+func (o *Network) SetNetworkInterfaces(v []*NetworkInterface) *Network {
+ if o.NetworkInterfaces = v; o.NetworkInterfaces == nil {
+ o.nullFields = append(o.nullFields, "NetworkInterfaces")
+ }
+ return o
+}
+
+// endregion
+
+// region NetworkInterface
+
+func (o NetworkInterface) MarshalJSON() ([]byte, error) {
+ type noMethod NetworkInterface
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *NetworkInterface) SetSubnetName(v *string) *NetworkInterface {
+ if o.SubnetName = v; o.SubnetName == nil {
+ o.nullFields = append(o.nullFields, "SubnetName")
+ }
+ return o
+}
+
+func (o *NetworkInterface) SetAdditionalIPConfigs(v []*AdditionalIPConfig) *NetworkInterface {
+ if o.AdditionalIPConfigs = v; o.AdditionalIPConfigs == nil {
+ o.nullFields = append(o.nullFields, "AdditionalIPConfigs")
+ }
+ return o
+}
+
+func (o *NetworkInterface) SetAssignPublicIP(v *bool) *NetworkInterface {
+ if o.AssignPublicIP = v; o.AssignPublicIP == nil {
+ o.nullFields = append(o.nullFields, "AssignPublicIP")
+ }
+ return o
+}
+
+func (o *NetworkInterface) SetIsPrimary(v *bool) *NetworkInterface {
+ if o.IsPrimary = v; o.IsPrimary == nil {
+ o.nullFields = append(o.nullFields, "IsPrimary")
+ }
+ return o
+}
+
+func (o *NetworkInterface) SetApplicationSecurityGroups(v []*ApplicationSecurityGroup) *NetworkInterface {
+ if o.ApplicationSecurityGroups = v; o.ApplicationSecurityGroups == nil {
+ o.nullFields = append(o.nullFields, "ApplicationSecurityGroups")
+ }
+ return o
+}
+
+// endregion
+
+// region AdditionalIPConfig
+
+func (o AdditionalIPConfig) MarshalJSON() ([]byte, error) {
+ type noMethod AdditionalIPConfig
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *AdditionalIPConfig) SetName(v *string) *AdditionalIPConfig {
+ if o.Name = v; o.Name == nil {
+ o.nullFields = append(o.nullFields, "Name")
+ }
+ return o
+}
+
+func (o *AdditionalIPConfig) SetPrivateIPAddressVersion(v *string) *AdditionalIPConfig {
+ if o.PrivateIPAddressVersion = v; o.PrivateIPAddressVersion == nil {
+ o.nullFields = append(o.nullFields, "PrivateIPAddressVersion")
+ }
+ return o
+}
+
+// endregion
+
+// region Login
+
+func (o Login) MarshalJSON() ([]byte, error) {
+ type noMethod Login
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Login) SetUserName(v *string) *Login {
+ if o.UserName = v; o.UserName == nil {
+ o.nullFields = append(o.nullFields, "UserName")
+ }
+ return o
+}
+
+func (o *Login) SetSSHPublicKey(v *string) *Login {
+ if o.SSHPublicKey = v; o.SSHPublicKey == nil {
+ o.nullFields = append(o.nullFields, "SSHPublicKey")
+ }
+ return o
+}
+
+func (o *Login) SetPassword(v *string) *Login {
+ if o.Password = v; o.Password == nil {
+ o.nullFields = append(o.nullFields, "Password")
+ }
+ return o
+}
+
+// endregion
+
+// region ApplicationSecurityGroup
+
+func (o ApplicationSecurityGroup) MarshalJSON() ([]byte, error) {
+ type noMethod ApplicationSecurityGroup
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *ApplicationSecurityGroup) SetName(v *string) *ApplicationSecurityGroup {
+ if o.Name = v; o.Name == nil {
+ o.nullFields = append(o.nullFields, "Name")
+ }
+ return o
+}
+
+func (o *ApplicationSecurityGroup) SetResourceGroupName(v *string) *ApplicationSecurityGroup {
+ if o.ResourceGroupName = v; o.ResourceGroupName == nil {
+ o.nullFields = append(o.nullFields, "ResourceGroupName")
+ }
+ return o
+}
+
+// endregion
+
+// region ManagedServiceIdentity
+
+func (o ManagedServiceIdentity) MarshalJSON() ([]byte, error) {
+ type noMethod ManagedServiceIdentity
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *ManagedServiceIdentity) SetResourceGroupName(v *string) *ManagedServiceIdentity {
+ if o.ResourceGroupName = v; o.ResourceGroupName == nil {
+ o.nullFields = append(o.nullFields, "ResourceGroupName")
+ }
+ return o
+}
+
+func (o *ManagedServiceIdentity) SetName(v *string) *ManagedServiceIdentity {
+ if o.Name = v; o.Name == nil {
+ o.nullFields = append(o.nullFields, "Name")
+ }
+ return o
+}
+
+// endregion
+
+// region LoadBalancersConfig
+
+func (o LoadBalancersConfig) MarshalJSON() ([]byte, error) {
+ type noMethod LoadBalancersConfig
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *LoadBalancersConfig) SetLoadBalancers(v []*LoadBalancer) *LoadBalancersConfig {
+ if o.LoadBalancers = v; o.LoadBalancers == nil {
+ o.nullFields = append(o.nullFields, "LoadBalancers")
+ }
+ return o
+}
+
+// endregion
+
+// region LoadBalancer
+
+func (o LoadBalancer) MarshalJSON() ([]byte, error) {
+ type noMethod LoadBalancer
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *LoadBalancer) SetType(v *string) *LoadBalancer {
+ if o.Type = v; o.Type == nil {
+ o.nullFields = append(o.nullFields, "Type")
+ }
+ return o
+}
+
+func (o *LoadBalancer) SetResourceGroupName(v *string) *LoadBalancer {
+ if o.ResourceGroupName = v; o.ResourceGroupName == nil {
+ o.nullFields = append(o.nullFields, "ResourceGroupName")
+ }
+ return o
+}
+
+func (o *LoadBalancer) SetName(v *string) *LoadBalancer {
+ if o.Name = v; o.Name == nil {
+ o.nullFields = append(o.nullFields, "Name")
+ }
+ return o
+}
+
+func (o *LoadBalancer) SetSKU(v *string) *LoadBalancer {
+ if o.SKU = v; o.SKU == nil {
+ o.nullFields = append(o.nullFields, "SKU")
+ }
+ return o
+}
+
+func (o *LoadBalancer) SeBackendPoolNames(v []string) *LoadBalancer {
+ if o.BackendPoolNames = v; o.BackendPoolNames == nil {
+ o.nullFields = append(o.nullFields, "BackendPoolNames")
+ }
+ return o
+}
+
+// endregion
diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/v3/service.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/v3/service.go
new file mode 100644
index 000000000000..8af9d2699699
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/v3/service.go
@@ -0,0 +1,36 @@
+package v3
+
+import (
+ "context"
+
+ "github.com/spotinst/spotinst-sdk-go/spotinst"
+ "github.com/spotinst/spotinst-sdk-go/spotinst/client"
+ "github.com/spotinst/spotinst-sdk-go/spotinst/session"
+)
+
+// Service provides the API operation methods for making requests to endpoints
+// of the Spotinst API. See this package's package overview docs for details on
+// the service.
+type Service interface {
+ Create(context.Context, *CreateGroupInput) (*CreateGroupOutput, error)
+ Read(context.Context, *ReadGroupInput) (*ReadGroupOutput, error)
+ Update(context.Context, *UpdateGroupInput) (*UpdateGroupOutput, error)
+ Delete(context.Context, *DeleteGroupInput) (*DeleteGroupOutput, error)
+ List(context.Context, *ListGroupsInput) (*ListGroupsOutput, error)
+}
+
+type ServiceOp struct {
+ Client *client.Client
+}
+
+var _ Service = &ServiceOp{}
+
+func New(sess *session.Session, cfgs ...*spotinst.Config) *ServiceOp {
+ cfg := &spotinst.Config{}
+ cfg.Merge(sess.Config)
+ cfg.Merge(cfgs...)
+
+ return &ServiceOp{
+ Client: client.New(sess.Config),
+ }
+}
diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gcp/gcp.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gcp/gcp.go
new file mode 100644
index 000000000000..19384b6a9873
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gcp/gcp.go
@@ -0,0 +1,2184 @@
+package gcp
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "strconv"
+ "time"
+
+ "github.com/spotinst/spotinst-sdk-go/spotinst"
+ "github.com/spotinst/spotinst-sdk-go/spotinst/client"
+ "github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil"
+ "github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates"
+)
+
+// Group defines a GCP Elastigroup.
+type Group struct {
+ ID *string `json:"id,omitempty"`
+ Name *string `json:"name,omitempty"`
+ Description *string `json:"description,omitempty"`
+ NodeImage *string `json:"nodeImage,omitempty"`
+ Capacity *Capacity `json:"capacity,omitempty"`
+ Compute *Compute `json:"compute,omitempty"`
+ Scaling *Scaling `json:"scaling,omitempty"`
+ Scheduling *Scheduling `json:"scheduling,omitempty"`
+ Strategy *Strategy `json:"strategy,omitempty"`
+ Integration *Integration `json:"thirdPartiesIntegration,omitempty"`
+
+ // Read-only fields.
+ CreatedAt *time.Time `json:"createdAt,omitempty"`
+ UpdatedAt *time.Time `json:"updatedAt,omitempty"`
+
+ // forceSendFields is a list of field names (e.g. "Keys") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ forceSendFields []string
+
+ // nullFields is a list of field names (e.g. "Keys") to include in API
+ // requests with the JSON null value. By default, fields with empty
+ // values are omitted from API requests. However, any field with an
+ // empty value appearing in NullFields will be sent to the server as
+ // null. It is an error if a field in this list has a non-empty value.
+ // This may be used to include null fields in Patch requests.
+ nullFields []string
+}
+
+// region AutoScale structs
+
+type AutoScale struct {
+ IsEnabled *bool `json:"isEnabled,omitempty"`
+ IsAutoConfig *bool `json:"isAutoConfig,omitempty"`
+ Cooldown *int `json:"cooldown,omitempty"`
+ Headroom *AutoScaleHeadroom `json:"headroom,omitempty"`
+ Down *AutoScaleDown `json:"down,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type AutoScaleDown struct {
+ EvaluationPeriods *int `json:"evaluationPeriods,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type AutoScaleHeadroom struct {
+ CPUPerUnit *int `json:"cpuPerUnit,omitempty"`
+ MemoryPerUnit *int `json:"memoryPerUnit,omitempty"`
+ NumOfUnits *int `json:"numOfUnits,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type AutoScaleLabel struct {
+ Key *string `json:"key,omitempty"`
+ Value *string `json:"value,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+// endregion
+
+// region Capacity structs
+
+// Capacity defines the capacity attributes of a Group instance
+type Capacity struct {
+ Maximum *int `json:"maximum,omitempty"`
+ Minimum *int `json:"minimum,omitempty"`
+ Target *int `json:"target,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+// endregion
+
+// region Compute structs
+
+// AccessConfig defines the access configuration for a network. AccessConfig is an element of NetworkInterface.
+type AccessConfig struct {
+ Name *string `json:"name,omitempty"`
+ Type *string `json:"type,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+// AliasIPRange defines the alias ip range for a network. AliasIPRange is an element of NetworkInterface.
+type AliasIPRange struct {
+ IPCIDRRange *string `json:"ipCidrRange,omitempty"`
+ SubnetworkRangeName *string `json:"subnetworkRangeName,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+// BackendServiceConfig constains a list of backend service configurations.
+type BackendServiceConfig struct {
+ BackendServices []*BackendService `json:"backendServices,omitempty"`
+ forceSendFields []string
+ nullFields []string
+}
+
+// BackendService defines the configuration for a single backend service.
+type BackendService struct {
+ BackendServiceName *string `json:"backendServiceName,omitempty"`
+ LocationType *string `json:"locationType,omitempty"`
+ Scheme *string `json:"scheme,omitempty"`
+ NamedPorts *NamedPorts `json:"namedPorts,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+// Compute defines the compute attributes of a Group.
+type Compute struct {
+ AvailabilityZones []string `json:"availabilityZones,omitempty"`
+ GPU *GPU `json:"gpu,omitempty"`
+ Health *Health `json:"health,omitempty"`
+ InstanceTypes *InstanceTypes `json:"instanceTypes,omitempty"`
+ LaunchSpecification *LaunchSpecification `json:"launchSpecification,omitempty"`
+ Subnets []*Subnet `json:"subnets,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+// CustomInstance defines the memory and vCPU constraints of an instance
+type CustomInstance struct {
+ VCPU *int `json:"vCPU,omitempty"`
+ MemoryGiB *int `json:"memoryGiB,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+// Disk defines the a block of memory resources for the group. Stored in an array of Disks, as defined in LaunchSpecification.
+type Disk struct {
+ AutoDelete *bool `json:"autoDelete,omitempty"`
+ Boot *bool `json:"boot,omitempty"`
+ DeviceName *string `json:"deviceName,omitempty"`
+ InitializeParams *InitializeParams `json:"initializeParams,omitempty"`
+ Interface *string `json:"interface,omitempty"`
+ Mode *string `json:"mode,omitempty"`
+ Source *string `json:"source,omitempty"`
+ Type *string `json:"type,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+// GPU defines the kind and number of GPUs to use with the group. GPU is an element of Compute.
+type GPU struct {
+ Type *string `json:"type,omitempty"`
+ Count *int `json:"count,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+// Health defines the healthcheck attributes for the group. Health is an element of Compute.
+type Health struct {
+ AutoHealing *bool `json:"autoHealing,omitempty"`
+ GracePeriod *int `json:"gracePeriod,omitempty"`
+ HealthCheckType *string `json:"healthCheckType,omitempty"`
+ UnhealthyDuration *int `json:"unhealthyDuration,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+// InitializeParams defines the initialization parameters for a Disk object.
+type InitializeParams struct {
+ DiskSizeGB *int `json:"diskSizeGb,omitempty"`
+ DiskType *string `json:"diskType,omitempty"`
+ SourceImage *string `json:"sourceImage,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+// InstanceTypes defines the type of instances to use with the group. InstanceTypes is an element of Compute.
+type InstanceTypes struct {
+ OnDemand *string `json:"ondemand,omitempty"`
+ Preemptible []string `json:"preemptible,omitempty"`
+ Custom []*CustomInstance `json:"custom,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+// Label defines an object holding a key:value pair. Label is an element of LaunchSpecification.
+type Label struct {
+ Key *string `json:"key,omitempty"`
+ Value *string `json:"value,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+// LaunchSpecification defines launch attributes for the Group. LaunchSpecification is an element of Compute.
+type LaunchSpecification struct {
+ BackendServiceConfig *BackendServiceConfig `json:"backendServiceConfig,omitempty"`
+ Disks []*Disk `json:"disks,omitempty"`
+ Labels []*Label `json:"labels,omitempty"`
+ IPForwarding *bool `json:"ipForwarding,omitempty"`
+ NetworkInterfaces []*NetworkInterface `json:"networkInterfaces,omitempty"`
+ Metadata []*Metadata `json:"metadata,omitempty"`
+ ServiceAccount *string `json:"serviceAccount,omitempty"`
+ StartupScript *string `json:"startupScript,omitempty"`
+ ShutdownScript *string `json:"shutdownScript,omitempty"`
+ Tags []string `json:"tags,omitempty"`
+ InstanceNamePrefix *string `json:"instanceNamePrefix,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+// Metadata defines an object holding a key:value pair. Metadata is an element of LaunchSpecification.
+type Metadata struct {
+ Key *string `json:"key,omitempty"`
+ Value *string `json:"value,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+// NamedPorts describes the name and list of ports to use with the backend service
+type NamedPorts struct {
+ Name *string `json:"name,omitempty"`
+ Ports []int `json:"ports,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+// NetworkInterface defines the network configuration for a Group. NetworkInterface is an element of LaunchSpecification.
+type NetworkInterface struct {
+ AccessConfigs []*AccessConfig `json:"accessConfigs,omitempty"`
+ AliasIPRanges []*AliasIPRange `json:"aliasIpRanges,omitempty"`
+ Network *string `json:"network,omitempty"`
+ ProjectID *string `json:"projectId,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+// Subnet defines the attributes of a single Subnet. The Subnets list is an element of Compute.
+type Subnet struct {
+ Region *string `json:"region,omitempty"`
+ SubnetNames []string `json:"subnetNames,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+// endregion
+
+// region GKE structs
+
+// ImportGKEGroup contains a modified group struct used for overriding cluster parameters on import
+type ImportGKEGroup struct {
+ AvailabilityZones []string `json:"availabilityZones,omitempty"`
+ Capacity *CapacityGKE `json:"capacity,omitempty"`
+ Name *string `json:"name,omitempty"`
+ InstanceTypes *InstanceTypesGKE `json:"instanceTypes,omitempty"`
+ PreemptiblePercentage *int `json:"preemptiblePercentage,omitempty"`
+ NodeImage *string `json:"nodeImage,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type CapacityGKE struct {
+ Capacity //embedding
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type InstanceTypesGKE struct {
+ OnDemand *string `json:"ondemand,omitempty"`
+ Preemptible []string `json:"preemptible,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+// endregion
+
+// region Scaling structs
+
+// Action defines the action attributes of a ScalingPolicy.
+type Action struct {
+ Adjustment *int `json:"adjustment,omitempty"`
+ Type *string `json:"type,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+// Dimension defines the attributes for the dimensions of a ScalingPolicy.
+type Dimension struct {
+ Name *string `json:"name,omitempty"`
+ Value *string `json:"value,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+// Scaling defines the scaling attributes of a Group
+type Scaling struct {
+ Up []*ScalingPolicy `json:"up,omitempty"`
+ Down []*ScalingPolicy `json:"down,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+// ScalingPolicy defines the scaling attributes for both up and down policies. ScalingPolicy is an element of Scaling.
+type ScalingPolicy struct {
+ Action *Action `json:"action,omitempty"`
+ Cooldown *int `json:"cooldown,omitempty"`
+ Dimensions []*Dimension `json:"dimensions,omitempty"`
+ EvaluationPeriods *int `json:"evaluationPeriods,omitempty"`
+ MetricName *string `json:"metricName,omitempty"`
+ Namespace *string `json:"namespace,omitempty"`
+ Operator *string `json:"operator,omitempty"`
+ Period *int `json:"period,omitempty"`
+ PolicyName *string `json:"policyName,omitempty"`
+ Source *string `json:"source,omitempty"`
+ Statistic *string `json:"statistic,omitempty"`
+ Threshold *float64 `json:"threshold,omitempty"`
+ Unit *string `json:"unit,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+// endregion
+
+// region Strategy structs
+
+// Strategy defines the strategy attributes of a Group.
+type Strategy struct {
+ DrainingTimeout *int `json:"drainingTimeout,omitempty"`
+ FallbackToOnDemand *bool `json:"fallbackToOd,omitempty"`
+ PreemptiblePercentage *int `json:"preemptiblePercentage,omitempty"`
+ OnDemandCount *int `json:"onDemandCount,omitempty"`
+ ProvisioningModel *string `json:"provisioningModel,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+// endregion
+
+// region Scheduling
+
+type Scheduling struct {
+ Tasks []*Task `json:"tasks,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type Task struct {
+ IsEnabled *bool `json:"isEnabled,omitempty"`
+ Type *string `json:"taskType,omitempty"`
+ CronExpression *string `json:"cronExpression,omitempty"`
+ TargetCapacity *int `json:"targetCapacity,omitempty"`
+ MinCapacity *int `json:"minCapacity,omitempty"`
+ MaxCapacity *int `json:"maxCapacity,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+// endregion
+
+// region Integration structs
+
+type Integration struct {
+ GKE *GKEIntegration `json:"gke,omitempty"`
+ DockerSwarm *DockerSwarmIntegration `json:"dockerSwarm,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+// region GKEIntegration structs
+
+type GKEIntegration struct {
+ ClusterID *string `json:"clusterIdentifier,omitempty"`
+ ClusterZoneName *string `json:"clusterZoneName,omitempty"`
+ AutoUpdate *bool `json:"autoUpdate,omitempty"`
+ AutoScale *AutoScaleGKE `json:"autoScale,omitempty"`
+ Location *string `json:"location,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+type AutoScaleGKE struct {
+ AutoScale // embedding
+ Labels []*AutoScaleLabel `json:"labels,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+// endregion
+
+// region DockerSwarmIntegration structs
+
+type DockerSwarmIntegration struct {
+ MasterHost *string `json:"masterHost,omitempty"`
+ MasterPort *int `json:"masterPort,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+// endregion
+
+// endregion
+
+// region API Operation structs
+
+// CreateGroupInput contains the Elastigroup description required when making a request to create an Elastigroup.
+type CreateGroupInput struct {
+ Group *Group `json:"group,omitempty"`
+}
+
+// CreateGroupOutput contains a definition of the created Elastigroup, including the generated Group ID.
+type CreateGroupOutput struct {
+ Group *Group `json:"group,omitempty"`
+}
+
+// DeleteGroupInput contains the required input to delete an existing Elastigroup.
+type DeleteGroupInput struct {
+ GroupID *string `json:"groupId,omitempty"`
+}
+
+// DeleteGroupOutput describes the response a deleted group. Empty at this time.
+type DeleteGroupOutput struct{}
+
+// ImportGKEClusterInput describes the input required when importing an existing GKE cluster into Elastigroup, if it exists.
+type ImportGKEClusterInput struct {
+ ClusterID *string `json:"clusterID,omitempty"`
+ ClusterZoneName *string `json:"clusterZoneName,omitempty"`
+ DryRun *bool `json:"dryRun,omitempty"`
+ Group *ImportGKEGroup `json:"group,omitempty"`
+}
+
+// ImportGKEClusterOutput contains a description of the Elastigroup and the imported GKE cluster.
+type ImportGKEClusterOutput struct {
+ Group *Group `json:"group,omitempty"`
+}
+
+// Instance describes an individual instance's status and is returned by a Status request
+type Instance struct {
+ CreatedAt *time.Time `json:"createdAt,omitempty"`
+ InstanceName *string `json:"instanceName,omitempty"`
+ LifeCycle *string `json:"lifeCycle,omitempty"`
+ MachineType *string `json:"machineType,omitempty"`
+ PrivateIP *string `json:"privateIpAddress,omitempty"`
+ PublicIP *string `json:"publicIpAddress,omitempty"`
+ StatusName *string `json:"statusName,omitempty"`
+ UpdatedAt *time.Time `json:"updatedAt,omitempty"`
+ Zone *string `json:"zone,omitempty"`
+}
+
+// ListGroupsInput describes the input required when making a request to list all groups in an account.
+type ListGroupsInput struct{}
+
+// ListGroupsOutput contains an array of groups.
+type ListGroupsOutput struct {
+ Groups []*Group `json:"groups,omitempty"`
+}
+
+// ReadGroupInput describes the input required when making a request to list a single Elastigroup.
+type ReadGroupInput struct {
+ GroupID *string `json:"groupId,omitempty"`
+}
+
+// ReadGroupOutput contains a description of the requested Elastigroup, if it exists.
+type ReadGroupOutput struct {
+ Group *Group `json:"group,omitempty"`
+}
+
+// StatusGroupInput describes the required input when making a request to see an Elastigroup's status.
+type StatusGroupInput struct {
+ GroupID *string `json:"groupId,omitempty"`
+}
+
+// StatusGroupOutput describes the status of the instances in the Elastigroup.
+type StatusGroupOutput struct {
+ Instances []*Instance `json:"instances,omitempty"`
+}
+
+// UpdateGroupInput contains a description of one or more valid attributes that will be applied to an existing Elastigroup.
+type UpdateGroupInput struct {
+ Group *Group `json:"group,omitempty"`
+}
+
+// UpdateGroupOutPut contains a description of the updated Elastigroup, if successful.
+type UpdateGroupOutput struct {
+ Group *Group `json:"group,omitempty"`
+}
+
+// endregion
+
+// region API Operations
+
+// Create creates a new Elastigroup using GCE resources.
+func (s *ServiceOp) Create(ctx context.Context, input *CreateGroupInput) (*CreateGroupOutput, error) {
+ r := client.NewRequest(http.MethodPost, "/gcp/gce/group")
+ r.Obj = input
+
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ gs, err := groupsFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ output := new(CreateGroupOutput)
+ if len(gs) > 0 {
+ output.Group = gs[0]
+ }
+
+ return output, nil
+}
+
+// Read returns the configuration of a single existing Elastigroup.
+func (s *ServiceOp) Read(ctx context.Context, input *ReadGroupInput) (*ReadGroupOutput, error) {
+ path, err := uritemplates.Expand("/gcp/gce/group/{groupId}", uritemplates.Values{
+ "groupId": spotinst.StringValue(input.GroupID),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ r := client.NewRequest(http.MethodGet, path)
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ gs, err := groupsFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ output := new(ReadGroupOutput)
+ if len(gs) > 0 {
+ output.Group = gs[0]
+ }
+
+ return output, nil
+}
+
+// Update modifies the configuration of a single existing Elastigroup.
+func (s *ServiceOp) Update(ctx context.Context, input *UpdateGroupInput) (*UpdateGroupOutput, error) {
+ path, err := uritemplates.Expand("/gcp/gce/group/{groupId}", uritemplates.Values{
+ "groupId": spotinst.StringValue(input.Group.ID),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ // We do NOT need the ID anymore, so let's drop it.
+ input.Group.ID = nil
+
+ r := client.NewRequest(http.MethodPut, path)
+ r.Obj = input
+
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ gs, err := groupsFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ output := new(UpdateGroupOutput)
+ if len(gs) > 0 {
+ output.Group = gs[0]
+ }
+
+ return output, nil
+}
+
+// Delete removes a single existing Elastigroup and destroys all associated GCE resources.
+func (s *ServiceOp) Delete(ctx context.Context, input *DeleteGroupInput) (*DeleteGroupOutput, error) {
+ path, err := uritemplates.Expand("/gcp/gce/group/{groupId}", uritemplates.Values{
+ "groupId": spotinst.StringValue(input.GroupID),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ r := client.NewRequest(http.MethodDelete, path)
+
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ return &DeleteGroupOutput{}, nil
+}
+
+// List returns the configuration of all existing Elastigroups in a given Spotinst GCE account.
+func (s *ServiceOp) List(ctx context.Context, input *ListGroupsInput) (*ListGroupsOutput, error) {
+ r := client.NewRequest(http.MethodGet, "/gcp/gce/group")
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ gs, err := groupsFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ return &ListGroupsOutput{Groups: gs}, nil
+}
+
+// ImportGKECluster imports an existing GKE cluster into Elastigroup.
+func (s *ServiceOp) ImportGKECluster(ctx context.Context, input *ImportGKEClusterInput) (*ImportGKEClusterOutput, error) {
+ r := client.NewRequest(http.MethodPost, "/gcp/gce/group/gke/import")
+
+ r.Params["clusterId"] = []string{spotinst.StringValue(input.ClusterID)}
+ r.Params["zone"] = []string{spotinst.StringValue(input.ClusterZoneName)}
+ r.Params["dryRun"] = []string{strconv.FormatBool(spotinst.BoolValue(input.DryRun))}
+
+ body := &ImportGKEClusterInput{Group: input.Group}
+ r.Obj = body
+
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ gs, err := groupsFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ output := new(ImportGKEClusterOutput)
+ if len(gs) > 0 {
+ output.Group = gs[0]
+ }
+
+ return output, nil
+}
+
+// Status describes the current status of the instances in a specific Elastigroup
+func (s *ServiceOp) Status(ctx context.Context, input *StatusGroupInput) (*StatusGroupOutput, error) {
+ path, err := uritemplates.Expand("/gcp/gce/group/{groupId}/status", uritemplates.Values{
+ "groupId": spotinst.StringValue(input.GroupID),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ r := client.NewRequest(http.MethodGet, path)
+ resp, err := client.RequireOK(s.Client.Do(ctx, r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ is, err := instancesFromHttpResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ return &StatusGroupOutput{Instances: is}, nil
+}
+
+// endregion
+
+// region Unmarshallers
+
+// groupFromJSON unmarshalls a single group
+func groupFromJSON(in []byte) (*Group, error) {
+ b := new(Group)
+ if err := json.Unmarshal(in, b); err != nil {
+ return nil, err
+ }
+ return b, nil
+}
+
+// groupsFromJSON unmarshalls an array of groups
+func groupsFromJSON(in []byte) ([]*Group, error) {
+ var rw client.Response
+ if err := json.Unmarshal(in, &rw); err != nil {
+ return nil, err
+ }
+ out := make([]*Group, len(rw.Response.Items))
+ if len(out) == 0 {
+ return out, nil
+ }
+ for i, rb := range rw.Response.Items {
+ b, err := groupFromJSON(rb)
+ if err != nil {
+ return nil, err
+ }
+ out[i] = b
+ }
+ return out, nil
+}
+
+// groupFromJSON reads a list of one or more groups from an http response
+func groupsFromHttpResponse(resp *http.Response) ([]*Group, error) {
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+ return groupsFromJSON(body)
+}
+
+// instanceFromJSON unmarshalls a single group
+func instanceFromJSON(in []byte) (*Instance, error) {
+ b := new(Instance)
+ if err := json.Unmarshal(in, b); err != nil {
+ return nil, err
+ }
+ return b, nil
+}
+
+// instancesFromJSON unmarshalls an array of instances
+func instancesFromJSON(in []byte) ([]*Instance, error) {
+ var rw client.Response
+ if err := json.Unmarshal(in, &rw); err != nil {
+ return nil, err
+ }
+ out := make([]*Instance, len(rw.Response.Items))
+ if len(out) == 0 {
+ return out, nil
+ }
+ for i, rb := range rw.Response.Items {
+ b, err := instanceFromJSON(rb)
+ if err != nil {
+ return nil, err
+ }
+ out[i] = b
+ }
+ return out, nil
+}
+
+// instancesFromHttpResponse reads a list of one or more instances from an http response
+func instancesFromHttpResponse(resp *http.Response) ([]*Instance, error) {
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+ return instancesFromJSON(body)
+}
+
+// endregion
+
+// region Group setters
+
+func (o Group) MarshalJSON() ([]byte, error) {
+ type noMethod Group
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+// SetID sets the group ID attribute
+func (o *Group) SetID(v *string) *Group {
+ if o.ID = v; o.ID == nil {
+ o.nullFields = append(o.nullFields, "ID")
+ }
+ return o
+}
+
+// SetName sets the group name
+func (o *Group) SetName(v *string) *Group {
+ if o.Name = v; o.Name == nil {
+ o.nullFields = append(o.nullFields, "Name")
+ }
+ return o
+}
+
+// SetDescription sets the description for the group
+func (o *Group) SetDescription(v *string) *Group {
+ if o.Description = v; o.Description == nil {
+ o.nullFields = append(o.nullFields, "Description")
+ }
+ return o
+}
+
+// SetNodeImage sets image that will be used for the node VMs
+func (o *Group) SetNodeImage(v *string) *Group {
+ if o.NodeImage = v; o.NodeImage == nil {
+ o.nullFields = append(o.nullFields, "NodeImage")
+ }
+ return o
+}
+
+// SetCapacity sets the Capacity object
+func (o *Group) SetCapacity(v *Capacity) *Group {
+ if o.Capacity = v; o.Capacity == nil {
+ o.nullFields = append(o.nullFields, "Capacity")
+ }
+ return o
+}
+
+// SetCompute sets the Compute object
+func (o *Group) SetCompute(v *Compute) *Group {
+ if o.Compute = v; o.Compute == nil {
+ o.nullFields = append(o.nullFields, "Compute")
+ }
+ return o
+}
+
+// SetScaling sets the Scaling object
+func (o *Group) SetScaling(v *Scaling) *Group {
+ if o.Scaling = v; o.Scaling == nil {
+ o.nullFields = append(o.nullFields, "Scaling")
+ }
+ return o
+}
+
+func (o *Group) SetScheduling(v *Scheduling) *Group {
+ if o.Scheduling = v; o.Scheduling == nil {
+ o.nullFields = append(o.nullFields, "Scheduling")
+ }
+ return o
+}
+
+// SetStrategy sets the Strategy object
+func (o *Group) SetStrategy(v *Strategy) *Group {
+ if o.Strategy = v; o.Strategy == nil {
+ o.nullFields = append(o.nullFields, "Strategy")
+ }
+ return o
+}
+
+// SetIntegration sets the integrations for the group
+func (o *Group) SetIntegration(v *Integration) *Group {
+ if o.Integration = v; o.Integration == nil {
+ o.nullFields = append(o.nullFields, "Integration")
+ }
+ return o
+}
+
+// endregion
+
+// region AutoScale setters
+
+func (o AutoScale) MarshalJSON() ([]byte, error) {
+ type noMethod AutoScale
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *AutoScale) SetIsEnabled(v *bool) *AutoScale {
+ if o.IsEnabled = v; o.IsEnabled == nil {
+ o.nullFields = append(o.nullFields, "IsEnabled")
+ }
+ return o
+}
+
+func (o *AutoScale) SetIsAutoConfig(v *bool) *AutoScale {
+ if o.IsAutoConfig = v; o.IsAutoConfig == nil {
+ o.nullFields = append(o.nullFields, "IsAutoConfig")
+ }
+ return o
+}
+
+func (o *AutoScale) SetCooldown(v *int) *AutoScale {
+ if o.Cooldown = v; o.Cooldown == nil {
+ o.nullFields = append(o.nullFields, "Cooldown")
+ }
+ return o
+}
+
+func (o *AutoScale) SetHeadroom(v *AutoScaleHeadroom) *AutoScale {
+ if o.Headroom = v; o.Headroom == nil {
+ o.nullFields = append(o.nullFields, "Headroom")
+ }
+ return o
+}
+
+func (o *AutoScale) SetDown(v *AutoScaleDown) *AutoScale {
+ if o.Down = v; o.Down == nil {
+ o.nullFields = append(o.nullFields, "Down")
+ }
+ return o
+}
+
+// region AutoScaleDown
+
+func (o AutoScaleDown) MarshalJSON() ([]byte, error) {
+ type noMethod AutoScaleDown
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *AutoScaleDown) SetEvaluationPeriods(v *int) *AutoScaleDown {
+ if o.EvaluationPeriods = v; o.EvaluationPeriods == nil {
+ o.nullFields = append(o.nullFields, "EvaluationPeriods")
+ }
+ return o
+}
+
+// endregion
+
+// region AutoScaleHeadroom
+
+func (o AutoScaleHeadroom) MarshalJSON() ([]byte, error) {
+ type noMethod AutoScaleHeadroom
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *AutoScaleHeadroom) SetCPUPerUnit(v *int) *AutoScaleHeadroom {
+ if o.CPUPerUnit = v; o.CPUPerUnit == nil {
+ o.nullFields = append(o.nullFields, "CPUPerUnit")
+ }
+ return o
+}
+
+func (o *AutoScaleHeadroom) SetMemoryPerUnit(v *int) *AutoScaleHeadroom {
+ if o.MemoryPerUnit = v; o.MemoryPerUnit == nil {
+ o.nullFields = append(o.nullFields, "MemoryPerUnit")
+ }
+ return o
+}
+
+func (o *AutoScaleHeadroom) SetNumOfUnits(v *int) *AutoScaleHeadroom {
+ if o.NumOfUnits = v; o.NumOfUnits == nil {
+ o.nullFields = append(o.nullFields, "NumOfUnits")
+ }
+ return o
+}
+
+// endregion
+
+// region AutoScaleLabel
+
+func (o AutoScaleLabel) MarshalJSON() ([]byte, error) {
+ type noMethod AutoScaleLabel
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *AutoScaleLabel) SetKey(v *string) *AutoScaleLabel {
+ if o.Key = v; o.Key == nil {
+ o.nullFields = append(o.nullFields, "Key")
+ }
+ return o
+}
+
+func (o *AutoScaleLabel) SetValue(v *string) *AutoScaleLabel {
+ if o.Value = v; o.Value == nil {
+ o.nullFields = append(o.nullFields, "Value")
+ }
+ return o
+}
+
+// endregion
+
+// endregion
+
+// region Capacity setters
+
+func (o Capacity) MarshalJSON() ([]byte, error) {
+ type noMethod Capacity
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+// SetMaximum sets the Maximum number of VMs in the group.
+func (o *Capacity) SetMaximum(v *int) *Capacity {
+ if o.Maximum = v; o.Maximum == nil {
+ o.nullFields = append(o.nullFields, "Maximum")
+ }
+ return o
+}
+
+// SetMinimum sets the minimum number of VMs in the group
+func (o *Capacity) SetMinimum(v *int) *Capacity {
+ if o.Minimum = v; o.Minimum == nil {
+ o.nullFields = append(o.nullFields, "Minimum")
+ }
+ return o
+}
+
+// SetTarget sets the desired number of running VMs in the group.
+func (o *Capacity) SetTarget(v *int) *Capacity {
+ if o.Target = v; o.Target == nil {
+ o.nullFields = append(o.nullFields, "Target")
+ }
+ return o
+}
+
+// endregion
+
+// region Compute setters
+
+func (o Compute) MarshalJSON() ([]byte, error) {
+ type noMethod Compute
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+// SetAvailabilityZones sets the list of availability zones for group resources.
+func (o *Compute) SetAvailabilityZones(v []string) *Compute {
+ if o.AvailabilityZones = v; o.AvailabilityZones == nil {
+ o.nullFields = append(o.nullFields, "AvailabilityZones")
+ }
+ return o
+}
+
+// SetGPU sets the GPU object
+func (o *Compute) SetGPU(v *GPU) *Compute {
+ if o.GPU = v; o.GPU == nil {
+ o.nullFields = append(o.nullFields, "GPU")
+ }
+ return o
+}
+
+// SetHealth sets the health check attributes for the group
+func (o *Compute) SetHealth(v *Health) *Compute {
+ if o.Health = v; o.Health == nil {
+ o.nullFields = append(o.nullFields, "Health")
+ }
+ return o
+}
+
+// SetInstanceTypes sets the instance types for the group.
+func (o *Compute) SetInstanceTypes(v *InstanceTypes) *Compute {
+ if o.InstanceTypes = v; o.InstanceTypes == nil {
+ o.nullFields = append(o.nullFields, "InstanceTypes")
+ }
+ return o
+}
+
+// SetLaunchSpecification sets the launch configuration of the group.
+func (o *Compute) SetLaunchConfiguration(v *LaunchSpecification) *Compute {
+ if o.LaunchSpecification = v; o.LaunchSpecification == nil {
+ o.nullFields = append(o.nullFields, "LaunchSpecification")
+ }
+ return o
+}
+
+// SetSubnets sets the subnets used by the group.
+func (o *Compute) SetSubnets(v []*Subnet) *Compute {
+ if o.Subnets = v; o.Subnets == nil {
+ o.nullFields = append(o.nullFields, "Subnets")
+ }
+ return o
+}
+
+// region GPU Setters
+
+func (o GPU) MarshalJSON() ([]byte, error) {
+ type noMethod GPU
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+// SetType sets the type of gpu
+func (o *GPU) SetType(v *string) *GPU {
+ if o.Type = v; o.Type == nil {
+ o.nullFields = append(o.nullFields, "Type")
+ }
+ return o
+}
+
+// SetCount sets the number of this type of gpu
+func (o *GPU) SetCount(v *int) *GPU {
+ if o.Count = v; o.Count == nil {
+ o.nullFields = append(o.nullFields, "Count")
+ }
+ return o
+}
+
+// endregion
+
+// region Health setters
+
+func (o Health) MarshalJSON() ([]byte, error) {
+ type noMethod Health
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+// SetGracePeriod sets the grace period time for the groups health check
+func (o *Health) SetGracePeriod(v *int) *Health {
+ fmt.Printf("o: %v\n", o)
+ if o.GracePeriod = v; o.GracePeriod == nil {
+ o.nullFields = append(o.nullFields, "GracePeriod")
+ }
+ return o
+}
+
+// SetHealthCheckType sets the type of helath check to perform
+func (o *Health) SetHealthCheckType(v *string) *Health {
+ if o.HealthCheckType = v; o.HealthCheckType == nil {
+ o.nullFields = append(o.nullFields, "HealthCheckType")
+ }
+ return o
+}
+
+// SetAutoHealing sets autohealing to true or false
+func (o *Health) SetAutoHealing(v *bool) *Health {
+ if o.AutoHealing = v; o.AutoHealing == nil {
+ o.nullFields = append(o.nullFields, "AutoHealing")
+ }
+ return o
+}
+
+func (o *Health) SetUnhealthyDuration(v *int) *Health {
+ if o.UnhealthyDuration = v; o.UnhealthyDuration == nil {
+ o.nullFields = append(o.nullFields, "UnhealthyDuration")
+ }
+ return o
+}
+
+// endregion
+
+// region InstanceTypes setters
+
+func (o InstanceTypes) MarshalJSON() ([]byte, error) {
+ type noMethod InstanceTypes
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+// SetCustom sets the memory and vCPU attributes for Custom Instance types
+func (o *InstanceTypes) SetCustom(v []*CustomInstance) *InstanceTypes {
+ if o.Custom = v; o.Custom == nil {
+ o.nullFields = append(o.nullFields, "Custom")
+ }
+ return o
+}
+
+// SetMemoryGiB sets the memory amount for a Custom Instance in intervals of 2, min 10
+func (o *CustomInstance) SetMemoryGiB(v *int) *CustomInstance {
+ if o.MemoryGiB = v; o.MemoryGiB == nil {
+ o.nullFields = append(o.nullFields, "MemoryGiB")
+ }
+ return o
+}
+
+// SetVCPU sets sets the number of vCPUs to use in a Custom instance type
+func (o *CustomInstance) SetVCPU(v *int) *CustomInstance {
+ if o.VCPU = v; o.VCPU == nil {
+ o.nullFields = append(o.nullFields, "VCPU")
+ }
+ return o
+}
+
+// SetOnDemand sets the kind of on demand instances to use for the group.
+func (o *InstanceTypes) SetOnDemand(v *string) *InstanceTypes {
+ if o.OnDemand = v; o.OnDemand == nil {
+ o.nullFields = append(o.nullFields, "OnDemand")
+ }
+ return o
+}
+
+// SetPreemptible sets the kind of premeptible instances to use with the group.
+func (o *InstanceTypes) SetPreemptible(v []string) *InstanceTypes {
+ if o.Preemptible = v; o.Preemptible == nil {
+ o.nullFields = append(o.nullFields, "Preemptible")
+ }
+ return o
+}
+
+// endregion
+
+// region LaunchSpecification setters
+
+func (o LaunchSpecification) MarshalJSON() ([]byte, error) {
+ type noMethod LaunchSpecification
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+// SetBackendServices sets the backend services to use with the group.
+func (o *LaunchSpecification) SetBackendServiceConfig(v *BackendServiceConfig) *LaunchSpecification {
+ if o.BackendServiceConfig = v; o.BackendServiceConfig == nil {
+ o.nullFields = append(o.nullFields, "BackendServiceConfig")
+ }
+ return o
+}
+
+// SetDisks sets the list of disks used by the group
+func (o *LaunchSpecification) SetDisks(v []*Disk) *LaunchSpecification {
+ if o.Disks = v; o.Disks == nil {
+ o.nullFields = append(o.nullFields, "Disks")
+ }
+ return o
+}
+
+// SetLabels sets the labels to be used with the group
+func (o *LaunchSpecification) SetLabels(v []*Label) *LaunchSpecification {
+ if o.Labels = v; o.Labels == nil {
+ o.nullFields = append(o.nullFields, "Labels")
+ }
+ return o
+}
+
+// SetIPForwarding sets whether to use IP forwarding for this group.
+func (o *LaunchSpecification) SetIPForwarding(v *bool) *LaunchSpecification {
+ if o.IPForwarding = v; o.IPForwarding == nil {
+ o.nullFields = append(o.nullFields, "IPForwarding")
+ }
+ return o
+}
+
+// SetNetworkInterfaces sets number and kinds of network interfaces used by the group.
+func (o *LaunchSpecification) SetNetworkInterfaces(v []*NetworkInterface) *LaunchSpecification {
+ if o.NetworkInterfaces = v; o.NetworkInterfaces == nil {
+ o.nullFields = append(o.nullFields, "NetworkInterfaces")
+ }
+ return o
+}
+
+// SetMetadata sets metadata for the group.
+func (o *LaunchSpecification) SetMetadata(v []*Metadata) *LaunchSpecification {
+ if o.Metadata = v; o.Metadata == nil {
+ o.nullFields = append(o.nullFields, "Metadata")
+ }
+ return o
+}
+
+// SetServiceAccount sets the service account used by the instances in the group
+func (o *LaunchSpecification) SetServiceAccount(v *string) *LaunchSpecification {
+ if o.ServiceAccount = v; o.ServiceAccount == nil {
+ o.nullFields = append(o.nullFields, "ServiceAccount")
+ }
+ return o
+}
+
+// SetStartupScript sets the startup script to be executed when the instance launches.
+func (o *LaunchSpecification) SetStartupScript(v *string) *LaunchSpecification {
+ if o.StartupScript = v; o.StartupScript == nil {
+ o.nullFields = append(o.nullFields, "StartupScript")
+ }
+ return o
+}
+
+// SetShutdownScript sets the script that will run when draining instances before termination
+func (o *LaunchSpecification) SetShutdownScript(v *string) *LaunchSpecification {
+ if o.ShutdownScript = v; o.ShutdownScript == nil {
+ o.nullFields = append(o.nullFields, "ShutdownScript")
+ }
+ return o
+}
+
+// SetTags sets the list of tags
+func (o *LaunchSpecification) SetTags(v []string) *LaunchSpecification {
+ if o.Tags = v; o.Tags == nil {
+ o.nullFields = append(o.nullFields, "Tags")
+ }
+ return o
+}
+
+// SetInstanceNamePrefix sets an instance name prefix to be used for all launched instances and their boot disk.
+func (o *LaunchSpecification) SetInstanceNamePrefix(v *string) *LaunchSpecification {
+ if o.InstanceNamePrefix = v; o.InstanceNamePrefix == nil {
+ o.nullFields = append(o.nullFields, "InstanceNamePrefix")
+ }
+ return o
+}
+
+// region BackendServiceConfig setters
+
+func (o BackendServiceConfig) MarshalJSON() ([]byte, error) {
+ type noMethod BackendServiceConfig
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+// SetBackendServices sets the backend service list
+func (o *BackendServiceConfig) SetBackendServices(v []*BackendService) *BackendServiceConfig {
+ if o.BackendServices = v; o.BackendServices == nil {
+ o.nullFields = append(o.nullFields, "BackendServices")
+ }
+ return o
+}
+
+// region Backend Service setters
+
+func (o BackendService) MarshalJSON() ([]byte, error) {
+ type noMethod BackendService
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+// SetBackendServiceName sets the name of the backend service.
+func (o *BackendService) SetBackendServiceName(v *string) *BackendService {
+ if o.BackendServiceName = v; o.BackendServiceName == nil {
+ o.nullFields = append(o.nullFields, "BackendServiceName")
+ }
+ return o
+}
+
+// SetLocationType sets the location type
+func (o *BackendService) SetLocationType(v *string) *BackendService {
+ if o.LocationType = v; o.LocationType == nil {
+ o.nullFields = append(o.nullFields, "LocationType")
+ }
+ return o
+}
+
+// SetScheme sets the scheme
+func (o *BackendService) SetScheme(v *string) *BackendService {
+ if o.Scheme = v; o.Scheme == nil {
+ o.nullFields = append(o.nullFields, "Scheme")
+ }
+ return o
+}
+
+// SetNamedPorts sets the named port object
+func (o *BackendService) SetNamedPorts(v *NamedPorts) *BackendService {
+ if o.NamedPorts = v; o.NamedPorts == nil {
+ o.nullFields = append(o.nullFields, "NamedPort")
+ }
+ return o
+}
+
+// region NamedPort setters
+
+func (o NamedPorts) MarshalJSON() ([]byte, error) {
+ type noMethod NamedPorts
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+// SetNamedPorts sets the name of the NamedPorts
+func (o *NamedPorts) SetName(v *string) *NamedPorts {
+ if o.Name = v; o.Name == nil {
+ o.nullFields = append(o.nullFields, "Name")
+ }
+ return o
+}
+
+// SetPorts sets the list of ports in the NamedPorts
+func (o *NamedPorts) SetPorts(v []int) *NamedPorts {
+ if o.Ports = v; o.Ports == nil {
+ o.nullFields = append(o.nullFields, "Ports")
+ }
+ return o
+}
+
+// endregion
+
+// endregion
+
+// endregion
+
+// region Disk setters
+
+func (o Disk) MarshalJSON() ([]byte, error) {
+ type noMethod Disk
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+// SetAutoDelete sets option to have disks autodelete
+func (o *Disk) SetAutoDelete(v *bool) *Disk {
+ if o.AutoDelete = v; o.AutoDelete == nil {
+ o.nullFields = append(o.nullFields, "AutoDelete")
+ }
+ return o
+}
+
+// SetBoot sets the boot option
+func (o *Disk) SetBoot(v *bool) *Disk {
+ if o.Boot = v; o.Boot == nil {
+ o.nullFields = append(o.nullFields, "Boot")
+ }
+ return o
+}
+
+// SetDeviceName sets the device name
+func (o *Disk) SetDeviceName(v *string) *Disk {
+ if o.DeviceName = v; o.DeviceName == nil {
+ o.nullFields = append(o.nullFields, "DeviceName")
+ }
+ return o
+}
+
+// SetInitializeParams sets the initialization paramters object
+func (o *Disk) SetInitializeParams(v *InitializeParams) *Disk {
+ if o.InitializeParams = v; o.InitializeParams == nil {
+ o.nullFields = append(o.nullFields, "InitializeParams")
+ }
+ return o
+}
+
+// SetInterface sets the interface
+func (o *Disk) SetInterface(v *string) *Disk {
+ if o.Interface = v; o.Interface == nil {
+ o.nullFields = append(o.nullFields, "Interface")
+ }
+ return o
+}
+
+// SetMode sets the mode
+func (o *Disk) SetMode(v *string) *Disk {
+ if o.Mode = v; o.Mode == nil {
+ o.nullFields = append(o.nullFields, "Mode")
+ }
+ return o
+}
+
+// SetSource sets the source
+func (o *Disk) SetSource(v *string) *Disk {
+ if o.Source = v; o.Source == nil {
+ o.nullFields = append(o.nullFields, "Source")
+ }
+ return o
+}
+
+// SetType sets the type of disk
+func (o *Disk) SetType(v *string) *Disk {
+ if o.Type = v; o.Type == nil {
+ o.nullFields = append(o.nullFields, "Type")
+ }
+ return o
+}
+
+// region InitializeParams setters
+
+func (o InitializeParams) MarshalJSON() ([]byte, error) {
+ type noMethod InitializeParams
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+// SetDiskSizeGB sets the disk size in gigabytes, in multiples of 2
+func (o *InitializeParams) SetDiskSizeGB(v *int) *InitializeParams {
+ if o.DiskSizeGB = v; o.DiskSizeGB == nil {
+ o.nullFields = append(o.nullFields, "DiskSizeGB")
+ }
+ return o
+}
+
+// SetDiskType sets the type of disk
+func (o *InitializeParams) SetDiskType(v *string) *InitializeParams {
+ if o.DiskType = v; o.DiskType == nil {
+ o.nullFields = append(o.nullFields, "DiskType")
+ }
+ return o
+}
+
+// SetSourceImage sets the source image to use
+func (o *InitializeParams) SetSourceImage(v *string) *InitializeParams {
+ if o.SourceImage = v; o.SourceImage == nil {
+ o.nullFields = append(o.nullFields, "SourceImage")
+ }
+ return o
+}
+
+// endregion
+
+// endregion
+
+// region Label setters
+
+func (o Label) MarshalJSON() ([]byte, error) {
+ type noMethod Label
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+// SetKey sets the key for the label
+func (o *Label) SetKey(v *string) *Label {
+ if o.Key = v; o.Key == nil {
+ o.nullFields = append(o.nullFields, "Key")
+ }
+ return o
+}
+
+// SetValue sets the value for the label
+func (o *Label) SetValue(v *string) *Label {
+ if o.Value = v; o.Value == nil {
+ o.nullFields = append(o.nullFields, "Value")
+ }
+ return o
+}
+
+// endregion
+
+// region NetworkInterface setters
+
+func (o NetworkInterface) MarshalJSON() ([]byte, error) {
+ type noMethod NetworkInterface
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+// SetAccessConfigs creates a list of one or more access configuration objects
+func (o *NetworkInterface) SetAccessConfigs(v []*AccessConfig) *NetworkInterface {
+ if o.AccessConfigs = v; o.AccessConfigs == nil {
+ o.nullFields = append(o.nullFields, "AccessConfigs")
+ }
+ return o
+}
+
+// SetAliasIPRanges sets a list of alias IP range objects
+func (o *NetworkInterface) SetAliasIPRanges(v []*AliasIPRange) *NetworkInterface {
+ if o.AliasIPRanges = v; o.AliasIPRanges == nil {
+ o.nullFields = append(o.nullFields, "AliasIPRanges")
+ }
+ return o
+}
+
+// SetNetwork sets the name of the network
+func (o *NetworkInterface) SetNetwork(v *string) *NetworkInterface {
+ if o.Network = v; o.Network == nil {
+ o.nullFields = append(o.nullFields, "Network")
+ }
+ return o
+}
+
+// SetProjectId sets the project identifier of the network.
+func (o *NetworkInterface) SetProjectId(v *string) *NetworkInterface {
+ if o.ProjectID = v; o.ProjectID == nil {
+ o.nullFields = append(o.nullFields, "ProjectID")
+ }
+ return o
+}
+
+// region AccessConfig setters
+
+func (o AccessConfig) MarshalJSON() ([]byte, error) {
+ type noMethod AccessConfig
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+// SetName sets the name of the access configuration
+func (o *AccessConfig) SetName(v *string) *AccessConfig {
+ if o.Name = v; o.Name == nil {
+ o.nullFields = append(o.nullFields, "Name")
+ }
+ return o
+}
+
+// SetType sets the type of access configuration
+func (o *AccessConfig) SetType(v *string) *AccessConfig {
+ if o.Type = v; o.Type == nil {
+ o.nullFields = append(o.nullFields, "Type")
+ }
+ return o
+}
+
+// endregion
+
+// region AliasIPRange setters
+
+func (o AliasIPRange) MarshalJSON() ([]byte, error) {
+ type noMethod AliasIPRange
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+// SetIPCIDRRange sets the ip/cidr range
+func (o *AliasIPRange) SetIPCIDRRange(v *string) *AliasIPRange {
+ if o.IPCIDRRange = v; o.IPCIDRRange == nil {
+ o.nullFields = append(o.nullFields, "IPCIDRRange")
+ }
+ return o
+}
+
+// SetSubnetworkRangeName sets the name of the subnetwork range
+func (o *AliasIPRange) SetSubnetworkRangeName(v *string) *AliasIPRange {
+ if o.SubnetworkRangeName = v; o.SubnetworkRangeName == nil {
+ o.nullFields = append(o.nullFields, "SubnetworkRangeName")
+ }
+ return o
+}
+
+// endregion
+
+// endregion
+
+// region Metadata setters
+
+func (o Metadata) MarshalJSON() ([]byte, error) {
+ type noMethod Metadata
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+// SetKey sets the metadata key
+func (o *Metadata) SetKey(v *string) *Metadata {
+ if o.Key = v; o.Key == nil {
+ o.nullFields = append(o.nullFields, "Key")
+ }
+ return o
+}
+
+// SetValue sets the metadata value
+func (o *Metadata) SetValue(v *string) *Metadata {
+ if o.Value = v; o.Value == nil {
+ o.nullFields = append(o.nullFields, "Value")
+ }
+ return o
+}
+
+// endregion
+
+// endregion
+
+// region Subnet setters
+
+func (o Subnet) MarshalJSON() ([]byte, error) {
+ type noMethod Subnet
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+// SetRegion sets the region the subnet is in.
+func (o *Subnet) SetRegion(v *string) *Subnet {
+ if o.Region = v; o.Region == nil {
+ o.nullFields = append(o.nullFields, "Region")
+ }
+ return o
+}
+
+// SetSubnetNames sets the list of subnets names to use
+func (o *Subnet) SetSubnetNames(v []string) *Subnet {
+ if o.SubnetNames = v; o.SubnetNames == nil {
+ o.nullFields = append(o.nullFields, "SubnetNames")
+ }
+ return o
+}
+
+// endregion
+
+// endregion
+
+// region ImportGKE setters
+
+func (o ImportGKEGroup) MarshalJSON() ([]byte, error) {
+ type noMethod ImportGKEGroup
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+// SetAvailabilityZones sets the availability zones for the gke group
+func (o *ImportGKEGroup) SetAvailabilityZones(v []string) *ImportGKEGroup {
+ if o.AvailabilityZones = v; o.AvailabilityZones == nil {
+ o.nullFields = append(o.nullFields, "AvailabilityZones")
+ }
+ return o
+}
+
+// SetCapacity sets the capacity for a gke group
+func (o *ImportGKEGroup) SetCapacity(v *CapacityGKE) *ImportGKEGroup {
+ if o.Capacity = v; o.Capacity == nil {
+ o.nullFields = append(o.nullFields, "Capacity")
+ }
+ return o
+}
+
+// SetInstanceTypes sets the instance types for the group.
+func (o *ImportGKEGroup) SetInstanceTypes(v *InstanceTypesGKE) *ImportGKEGroup {
+ if o.InstanceTypes = v; o.InstanceTypes == nil {
+ o.nullFields = append(o.nullFields, "InstanceTypes")
+ }
+ return o
+}
+
+// SetName sets the group name
+func (o *ImportGKEGroup) SetName(v *string) *ImportGKEGroup {
+ if o.Name = v; o.Name == nil {
+ o.nullFields = append(o.nullFields, "Name")
+ }
+ return o
+}
+
+// SetPreemptiblePercentage sets the preemptible percentage when importing a gke cluster into Elastigroup.
+func (o *ImportGKEGroup) SetPreemptiblePercentage(v *int) *ImportGKEGroup {
+ if o.PreemptiblePercentage = v; o.PreemptiblePercentage == nil {
+ o.nullFields = append(o.nullFields, "PreemptiblePercentage")
+ }
+ return o
+}
+
+// SetNodeImage sets the node image for the imported gke group.
+func (o *ImportGKEGroup) SetNodeImage(v *string) *ImportGKEGroup {
+ if o.NodeImage = v; o.NodeImage == nil {
+ o.nullFields = append(o.nullFields, "NodeImage")
+ }
+ return o
+}
+
+func (o InstanceTypesGKE) MarshalJSON() ([]byte, error) {
+ type noMethod InstanceTypesGKE
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+// SetOnDemand sets the instance types when importing a gke group
+func (o *InstanceTypesGKE) SetOnDemand(v *string) *InstanceTypesGKE {
+ if o.OnDemand = v; o.OnDemand == nil {
+ o.nullFields = append(o.nullFields, "OnDemand")
+ }
+ return o
+}
+
+// SetPreemptible sets the list of preemptible instance types
+func (o *InstanceTypesGKE) SetPreemptible(v []string) *InstanceTypesGKE {
+ if o.Preemptible = v; o.Preemptible == nil {
+ o.nullFields = append(o.nullFields, "Preemptible")
+ }
+ return o
+}
+
+// endregion
+
+// region Integration setters
+
+func (o Integration) MarshalJSON() ([]byte, error) {
+ type noMethod Integration
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+// SetGKEIntegration sets the GKE integration
+func (o *Integration) SetGKE(v *GKEIntegration) *Integration {
+ if o.GKE = v; o.GKE == nil {
+ o.nullFields = append(o.nullFields, "GKE")
+ }
+ return o
+}
+
+// SetDockerSwarm sets the DockerSwarm integration
+func (o *Integration) SetDockerSwarm(v *DockerSwarmIntegration) *Integration {
+ if o.DockerSwarm = v; o.DockerSwarm == nil {
+ o.nullFields = append(o.nullFields, "DockerSwarm")
+ }
+ return o
+}
+
+// region GKE integration setters
+
+func (o GKEIntegration) MarshalJSON() ([]byte, error) {
+ type noMethod GKEIntegration
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+// SetAutoUpdate sets the autoupdate flag
+func (o *GKEIntegration) SetAutoUpdate(v *bool) *GKEIntegration {
+ if o.AutoUpdate = v; o.AutoUpdate == nil {
+ o.nullFields = append(o.nullFields, "AutoUpdate")
+ }
+ return o
+}
+
+// SetAutoScale sets the AutoScale configuration used with the GKE integration
+func (o *GKEIntegration) SetAutoScale(v *AutoScaleGKE) *GKEIntegration {
+ if o.AutoScale = v; o.AutoScale == nil {
+ o.nullFields = append(o.nullFields, "AutoScale")
+ }
+ return o
+}
+
+// SetLocation sets the location that the cluster is located in
+func (o *GKEIntegration) SetLocation(v *string) *GKEIntegration {
+ if o.Location = v; o.Location == nil {
+ o.nullFields = append(o.nullFields, "Location")
+ }
+ return o
+}
+
+// SetClusterID sets the cluster ID
+func (o *GKEIntegration) SetClusterID(v *string) *GKEIntegration {
+ if o.ClusterID = v; o.ClusterID == nil {
+ o.nullFields = append(o.nullFields, "ClusterID")
+ }
+ return o
+}
+
+// region GKE AutoScaling setters
+
+func (o AutoScaleGKE) MarshalJSON() ([]byte, error) {
+ type noMethod AutoScaleGKE
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+// SetLabels sets the AutoScale labels for the GKE integration
+func (o *AutoScaleGKE) SetLabels(v []*AutoScaleLabel) *AutoScaleGKE {
+ if o.Labels = v; o.Labels == nil {
+ o.nullFields = append(o.nullFields, "Labels")
+ }
+ return o
+}
+
+// endregion
+
+// endregion
+
+// region DockerSwarm integration setters
+
+func (o DockerSwarmIntegration) MarshalJSON() ([]byte, error) {
+ type noMethod DockerSwarmIntegration
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+// SetMasterPort sets the master port
+func (o *DockerSwarmIntegration) SetMasterPort(v *int) *DockerSwarmIntegration {
+ if o.MasterPort = v; o.MasterPort == nil {
+ o.nullFields = append(o.nullFields, "MasterPort")
+ }
+ return o
+}
+
+// SetMasterHost sets the master host
+func (o *DockerSwarmIntegration) SetMasterHost(v *string) *DockerSwarmIntegration {
+ if o.MasterHost = v; o.MasterHost == nil {
+ o.nullFields = append(o.nullFields, "MasterHost")
+ }
+ return o
+}
+
+// endregion
+
+// endregion
+
+// region Scaling Policy setters
+
+func (o Scaling) MarshalJSON() ([]byte, error) {
+ type noMethod Scaling
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+// SetUp sets the scaling policy to usewhen increasing the number of instances in a group.
+func (o *Scaling) SetUp(v []*ScalingPolicy) *Scaling {
+ if o.Up = v; o.Up == nil {
+ o.nullFields = append(o.nullFields, "Up")
+ }
+ return o
+}
+
+// SetDown sets the scaling policy to use when decreasing the number of instances in a group.
+func (o *Scaling) SetDown(v []*ScalingPolicy) *Scaling {
+ if o.Down = v; o.Down == nil {
+ o.nullFields = append(o.nullFields, "Down")
+ }
+ return o
+}
+
+// region ScalingPolicy setters
+
+func (o ScalingPolicy) MarshalJSON() ([]byte, error) {
+ type noMethod ScalingPolicy
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+// SetAction sets the action to perform when scaling
+func (o *ScalingPolicy) SetAction(v *Action) *ScalingPolicy {
+ if o.Action = v; o.Action == nil {
+ o.nullFields = append(o.nullFields, "Action")
+ }
+ return o
+}
+
+// SetCooldown sets the cooldown time in seconds before triggered events can start
+func (o *ScalingPolicy) SetCooldown(v *int) *ScalingPolicy {
+ if o.Cooldown = v; o.Cooldown == nil {
+ o.nullFields = append(o.nullFields, "Cooldown")
+ }
+ return o
+}
+
+// SetDimensions sets the list of dimension objects
+func (o *ScalingPolicy) SetDimensions(v []*Dimension) *ScalingPolicy {
+ if o.Dimensions = v; o.Dimensions == nil {
+ o.nullFields = append(o.nullFields, "Dimensions")
+ }
+ return o
+}
+
+// SetEvaluationPeriods sets the number of periods over which data is compared
+func (o *ScalingPolicy) SetEvaluationPeriods(v *int) *ScalingPolicy {
+ if o.EvaluationPeriods = v; o.EvaluationPeriods == nil {
+ o.nullFields = append(o.nullFields, "EvaluationPeriods")
+ }
+ return o
+}
+
+// SetMetricName sets the name of the metric to compare
+func (o *ScalingPolicy) SetMetricName(v *string) *ScalingPolicy {
+ if o.MetricName = v; o.MetricName == nil {
+ o.nullFields = append(o.nullFields, "MetricName")
+ }
+ return o
+}
+
+// SetNamespace sets the namespace for the associated metric
+func (o *ScalingPolicy) SetNamespace(v *string) *ScalingPolicy {
+ if o.Namespace = v; o.Namespace == nil {
+ o.nullFields = append(o.nullFields, "Namespace")
+ }
+ return o
+}
+
+// SetOperator sets the operator (gte, lte)
+func (o *ScalingPolicy) SetOperator(v *string) *ScalingPolicy {
+ if o.Operator = v; o.Operator == nil {
+ o.nullFields = append(o.nullFields, "Operator")
+ }
+ return o
+}
+
+// SetPeriod sets the period in seconds over which the statistic is applied
+func (o *ScalingPolicy) SetPeriod(v *int) *ScalingPolicy {
+ if o.Period = v; o.Period == nil {
+ o.nullFields = append(o.nullFields, "Period")
+ }
+ return o
+}
+
+// SetPolicyName sets the name of the scaling policy
+func (o *ScalingPolicy) SetPolicyName(v *string) *ScalingPolicy {
+ if o.PolicyName = v; o.PolicyName == nil {
+ o.nullFields = append(o.nullFields, "PolicyName")
+ }
+ return o
+}
+
+// SetSource sets the source of the metric (spectrum, stackdriver)
+func (o *ScalingPolicy) SetSource(v *string) *ScalingPolicy {
+ if o.Source = v; o.Source == nil {
+ o.nullFields = append(o.nullFields, "Source")
+ }
+ return o
+}
+
+// SetStatistic sets the metric aggregator to return (average, sum, min, max)
+func (o *ScalingPolicy) SetStatistic(v *string) *ScalingPolicy {
+ if o.Statistic = v; o.Statistic == nil {
+ o.nullFields = append(o.nullFields, "Statistic")
+ }
+ return o
+}
+
+// SetThreshold sets the value against which the metric is compared
+func (o *ScalingPolicy) SetThreshold(v *float64) *ScalingPolicy {
+ if o.Threshold = v; o.Threshold == nil {
+ o.nullFields = append(o.nullFields, "Threshold")
+ }
+ return o
+}
+
+// SetUnit sets the unit for the associated metric
+func (o *ScalingPolicy) SetUnit(v *string) *ScalingPolicy {
+ if o.Unit = v; o.Unit == nil {
+ o.nullFields = append(o.nullFields, "Unit")
+ }
+ return o
+}
+
+// region Action setters
+
+func (o Action) MarshalJSON() ([]byte, error) {
+ type noMethod Action
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+// SetAdjustment sets the number associated with the action type
+func (o *Action) SetAdjustment(v *int) *Action {
+ if o.Adjustment = v; o.Adjustment == nil {
+ o.nullFields = append(o.nullFields, "Adjustment")
+ }
+ return o
+}
+
+// SetType sets the type of action to take when scaling (adjustment)
+func (o *Action) SetType(v *string) *Action {
+ if o.Type = v; o.Type == nil {
+ o.nullFields = append(o.nullFields, "Type")
+ }
+ return o
+}
+
+// endregion
+
+// region Dimension setters
+
+func (o Dimension) MarshalJSON() ([]byte, error) {
+ type noMethod Dimension
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+// SetName sets the name of the dimension
+func (o *Dimension) SetName(v *string) *Dimension {
+ if o.Name = v; o.Name == nil {
+ o.nullFields = append(o.nullFields, "Name")
+ }
+ return o
+}
+
+// SetValue sets the value of the dimension
+func (o *Dimension) SetValue(v *string) *Dimension {
+ if o.Value = v; o.Value == nil {
+ o.nullFields = append(o.nullFields, "Value")
+ }
+ return o
+}
+
+// endregion
+
+// endregion
+
+// endregion
+
+// region Scheduling
+
+func (o Scheduling) MarshalJSON() ([]byte, error) {
+ type noMethod Scheduling
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Scheduling) SetTasks(v []*Task) *Scheduling {
+ if o.Tasks = v; o.Tasks == nil {
+ o.nullFields = append(o.nullFields, "Tasks")
+ }
+ return o
+}
+
+// endregion
+
+// region Task
+
+func (o Task) MarshalJSON() ([]byte, error) {
+ type noMethod Task
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Task) SetIsEnabled(v *bool) *Task {
+ if o.IsEnabled = v; o.IsEnabled == nil {
+ o.nullFields = append(o.nullFields, "IsEnabled")
+ }
+ return o
+}
+
+func (o *Task) SetType(v *string) *Task {
+ if o.Type = v; o.Type == nil {
+ o.nullFields = append(o.nullFields, "Type")
+ }
+ return o
+}
+
+func (o *Task) SetCronExpression(v *string) *Task {
+ if o.CronExpression = v; o.CronExpression == nil {
+ o.nullFields = append(o.nullFields, "CronExpression")
+ }
+ return o
+}
+
+func (o *Task) SetTargetCapacity(v *int) *Task {
+ if o.TargetCapacity = v; o.TargetCapacity == nil {
+ o.nullFields = append(o.nullFields, "TargetCapacity")
+ }
+ return o
+}
+
+func (o *Task) SetMinCapacity(v *int) *Task {
+ if o.MinCapacity = v; o.MinCapacity == nil {
+ o.nullFields = append(o.nullFields, "MinCapacity")
+ }
+ return o
+}
+
+func (o *Task) SetMaxCapacity(v *int) *Task {
+ if o.MaxCapacity = v; o.MaxCapacity == nil {
+ o.nullFields = append(o.nullFields, "MaxCapacity")
+ }
+ return o
+}
+
+// endregion
+
+// region Strategy setters
+
+func (o Strategy) MarshalJSON() ([]byte, error) {
+ type noMethod Strategy
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+// SetDrainingTimeout sets the time to keep an instance alive after detaching it from the group
+func (o *Strategy) SetDrainingTimeout(v *int) *Strategy {
+ if o.DrainingTimeout = v; o.DrainingTimeout == nil {
+ o.nullFields = append(o.nullFields, "DrainingTimeout")
+ }
+ return o
+}
+
+// SetFallbackToOnDemand sets the option to fallback to on demand instances if preemptible instances arent available
+func (o *Strategy) SetFallbackToOnDemand(v *bool) *Strategy {
+ if o.FallbackToOnDemand = v; o.FallbackToOnDemand == nil {
+ o.nullFields = append(o.nullFields, "FallbackToOnDemand")
+ }
+ return o
+}
+
+// SetPreemptiblePercentage sets the ratio of preemptible instances to use in the group
+func (o *Strategy) SetPreemptiblePercentage(v *int) *Strategy {
+ if o.PreemptiblePercentage = v; o.PreemptiblePercentage == nil {
+ o.nullFields = append(o.nullFields, "PreemptiblePercentage")
+ }
+ return o
+}
+
+// SetOnDemandCount sets the number of on demand instances to use in the group.
+func (o *Strategy) SetOnDemandCount(v *int) *Strategy {
+ if o.OnDemandCount = v; o.OnDemandCount == nil {
+ o.nullFields = append(o.nullFields, "OnDemandCount")
+ }
+ return o
+}
+
+func (o *Strategy) SetProvisioningModel(v *string) *Strategy {
+ if o.ProvisioningModel = v; o.ProvisioningModel == nil {
+ o.nullFields = append(o.nullFields, "ProvisioningModel")
+ }
+ return o
+}
+
+// endregion
diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gcp/service.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gcp/service.go
new file mode 100644
index 000000000000..cb14107364f7
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gcp/service.go
@@ -0,0 +1,38 @@
+package gcp
+
+import (
+ "context"
+
+ "github.com/spotinst/spotinst-sdk-go/spotinst"
+ "github.com/spotinst/spotinst-sdk-go/spotinst/client"
+ "github.com/spotinst/spotinst-sdk-go/spotinst/session"
+)
+
+// Service provides the API operation methods for making requests to endpoints
+// of the Spotinst API. See this package's package overview docs for details on
+// the service.
+type Service interface {
+ Create(context.Context, *CreateGroupInput) (*CreateGroupOutput, error)
+ Read(context.Context, *ReadGroupInput) (*ReadGroupOutput, error)
+ Update(context.Context, *UpdateGroupInput) (*UpdateGroupOutput, error)
+ Delete(context.Context, *DeleteGroupInput) (*DeleteGroupOutput, error)
+ List(context.Context, *ListGroupsInput) (*ListGroupsOutput, error)
+ ImportGKECluster(context.Context, *ImportGKEClusterInput) (*ImportGKEClusterOutput, error)
+ Status(context.Context, *StatusGroupInput) (*StatusGroupOutput, error)
+}
+
+type ServiceOp struct {
+ Client *client.Client
+}
+
+var _ Service = &ServiceOp{}
+
+func New(sess *session.Session, cfgs ...*spotinst.Config) *ServiceOp {
+ cfg := &spotinst.Config{}
+ cfg.Merge(sess.Config)
+ cfg.Merge(cfgs...)
+
+ return &ServiceOp{
+ Client: client.New(sess.Config),
+ }
+}
diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gcp/tag.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gcp/tag.go
new file mode 100644
index 000000000000..b39119548e03
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gcp/tag.go
@@ -0,0 +1,31 @@
+package gcp
+
+import "github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil"
+
+type Tag struct {
+ Key *string `json:"tagKey,omitempty"`
+ Value *string `json:"tagValue,omitempty"`
+
+ forceSendFields []string
+ nullFields []string
+}
+
+func (o Tag) MarshalJSON() ([]byte, error) {
+ type noMethod Tag
+ raw := noMethod(o)
+ return jsonutil.MarshalJSON(raw, o.forceSendFields, o.nullFields)
+}
+
+func (o *Tag) SetKey(v *string) *Tag {
+ if o.Key = v; o.Key == nil {
+ o.nullFields = append(o.nullFields, "Key")
+ }
+ return o
+}
+
+func (o *Tag) SetValue(v *string) *Tag {
+ if o.Value = v; o.Value == nil {
+ o.nullFields = append(o.nullFields, "Value")
+ }
+ return o
+}
diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client/client.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client/client.go
new file mode 100644
index 000000000000..4e2db7c1cb7c
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client/client.go
@@ -0,0 +1,93 @@
+package client
+
+import (
+ "context"
+ "net/http"
+ "net/http/httputil"
+ "net/url"
+
+ "github.com/spotinst/spotinst-sdk-go/spotinst"
+)
+
+// Client provides a client to the API.
+type Client struct {
+ config *spotinst.Config
+}
+
+// New returns a new client.
+func New(cfg *spotinst.Config) *Client {
+ if cfg == nil {
+ cfg = spotinst.DefaultConfig()
+ }
+ return &Client{cfg}
+}
+
+// NewRequest is used to create a new request.
+func NewRequest(method, path string) *Request {
+ return &Request{
+ method: method,
+ url: &url.URL{
+ Path: path,
+ },
+ header: make(http.Header),
+ Params: make(url.Values),
+ }
+}
+
+// Do runs a request with our client.
+func (c *Client) Do(ctx context.Context, r *Request) (*http.Response, error) {
+ req, err := r.toHTTP(ctx, c.config)
+ if err != nil {
+ return nil, err
+ }
+ c.logRequest(req)
+ resp, err := c.config.HTTPClient.Do(req)
+ c.logResponse(resp)
+ return resp, err
+}
+
+func (c *Client) logf(format string, args ...interface{}) {
+ if c.config.Logger != nil {
+ c.config.Logger.Printf(format, args...)
+ }
+}
+
+const logReqMsg = `SPOTINST: Request "%s %s" details:
+---[ REQUEST ]---------------------------------------
+%s
+-----------------------------------------------------`
+
+func (c *Client) logRequest(req *http.Request) {
+ if c.config.Logger != nil && req != nil {
+ out, err := httputil.DumpRequestOut(req, true)
+ if err == nil {
+ c.logf(logReqMsg, req.Method, req.URL, string(out))
+ }
+ }
+}
+
+const logRespMsg = `SPOTINST: Response "%s %s" details:
+---[ RESPONSE ]----------------------------------------
+%s
+-------------------------------------------------------`
+
+func (c *Client) logResponse(resp *http.Response) {
+ if c.config.Logger != nil && resp != nil {
+ out, err := httputil.DumpResponse(resp, true)
+ if err == nil {
+ c.logf(logRespMsg, resp.Request.Method, resp.Request.URL, string(out))
+ }
+ }
+}
+
+// Do runs a request with our client.
+func (c *Client) DoOrg(ctx context.Context, r *Request) (*http.Response, error) {
+ req, err := r.toHTTPOrg(ctx, c.config)
+ if err != nil {
+ return nil, err
+ }
+ c.logRequest(req)
+ resp, err := c.config.HTTPClient.Do(req)
+ c.logResponse(resp)
+ return resp, err
+}
diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client/request.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client/request.go
new file mode 100644
index 000000000000..f00a68fcf8c9
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client/request.go
@@ -0,0 +1,119 @@
+package client
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "io"
+ "net/http"
+ "net/url"
+
+ "github.com/spotinst/spotinst-sdk-go/spotinst"
+)
+
+type Request struct {
+ Obj interface{}
+ Params url.Values
+ url *url.URL
+ method string
+ body io.Reader
+ header http.Header
+}
+
+// toHTTP converts the request to an HTTP request.
+func (r *Request) toHTTP(ctx context.Context, cfg *spotinst.Config) (*http.Request, error) {
+ // Set the user credentials.
+ creds, err := cfg.Credentials.Get()
+ if err != nil {
+ return nil, err
+ }
+ if creds.Token != "" {
+ r.header.Set("Authorization", "Bearer "+creds.Token)
+ }
+ if creds.Account != "" {
+ r.Params.Set("accountId", creds.Account)
+ }
+
+ // Encode the query parameters.
+ r.url.RawQuery = r.Params.Encode()
+
+ // Check if we should encode the body.
+ if r.body == nil && r.Obj != nil {
+ if b, err := EncodeBody(r.Obj); err != nil {
+ return nil, err
+ } else {
+ r.body = b
+ }
+ }
+
+ // Create the HTTP request.
+ req, err := http.NewRequest(r.method, r.url.RequestURI(), r.body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Set request base URL.
+ req.URL.Host = cfg.BaseURL.Host
+ req.URL.Scheme = cfg.BaseURL.Scheme
+
+ // Set request headers.
+ req.Host = cfg.BaseURL.Host
+ req.Header = r.header
+ req.Header.Set("Content-Type", cfg.ContentType)
+ req.Header.Add("Accept", cfg.ContentType)
+ req.Header.Add("User-Agent", cfg.UserAgent)
+
+ return req.WithContext(ctx), nil
+}
+
+// EncodeBody is used to encode a request body
+func EncodeBody(obj interface{}) (io.Reader, error) {
+ buf := bytes.NewBuffer(nil)
+ if err := json.NewEncoder(buf).Encode(obj); err != nil {
+ return nil, err
+ }
+ return buf, nil
+}
+
+// toHTTP converts the request to an HTTP request.
+func (r *Request) toHTTPOrg(ctx context.Context, cfg *spotinst.Config) (*http.Request, error) {
+ // Set the user credentials.
+ creds, err := cfg.Credentials.Get()
+ if err != nil {
+ return nil, err
+ }
+ if creds.Token != "" {
+ r.header.Set("Authorization", "Bearer "+creds.Token)
+ }
+
+ // Encode the query parameters.
+ r.url.RawQuery = r.Params.Encode()
+
+ // Check if we should encode the body.
+ if r.body == nil && r.Obj != nil {
+ if b, err := EncodeBody(r.Obj); err != nil {
+ return nil, err
+ } else {
+ r.body = b
+ }
+ }
+
+ // Create the HTTP request.
+ req, err := http.NewRequest(r.method, r.url.RequestURI(), r.body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Set request base URL.
+ req.URL.Host = cfg.BaseURL.Host
+ req.URL.Scheme = cfg.BaseURL.Scheme
+
+ // Set request headers.
+ req.Host = cfg.BaseURL.Host
+ req.Header = r.header
+ req.Header.Set("Content-Type", cfg.ContentType)
+ req.Header.Add("Accept", cfg.ContentType)
+ req.Header.Add("User-Agent", cfg.UserAgent)
+
+ return req.WithContext(ctx), nil
+}
diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client/response.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client/response.go
new file mode 100644
index 000000000000..0956ea42b9d0
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/client/response.go
@@ -0,0 +1,110 @@
+package client
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "strconv"
+)
+
+type Response struct {
+ Request struct {
+ ID string `json:"id"`
+ } `json:"request"`
+ Response struct {
+ Errors []responseError `json:"errors"`
+ Items []json.RawMessage `json:"items"`
+ } `json:"response"`
+}
+
+type responseError struct {
+ Code string `json:"code"`
+ Message string `json:"message"`
+ Field string `json:"field"`
+}
+
+type Error struct {
+ Response *http.Response `json:"-"`
+ Code string `json:"code"`
+ Message string `json:"message"`
+ Field string `json:"field"`
+ RequestID string `json:"requestId"`
+}
+
+func (e Error) Error() string {
+ msg := fmt.Sprintf("%v %v: %d (request: %q) %v: %v",
+ e.Response.Request.Method, e.Response.Request.URL,
+ e.Response.StatusCode, e.RequestID, e.Code, e.Message)
+
+ if e.Field != "" {
+ msg = fmt.Sprintf("%s (field: %v)", msg, e.Field)
+ }
+
+ return msg
+}
+
+type Errors []Error
+
+func (es Errors) Error() string {
+ var stack string
+ for _, e := range es {
+ stack += e.Error() + "\n"
+ }
+ return stack
+}
+
+// DecodeBody is used to JSON decode a body
+func DecodeBody(resp *http.Response, out interface{}) error {
+ return json.NewDecoder(resp.Body).Decode(out)
+}
+
+// RequireOK is used to verify response status code is a successful one (200 OK)
+func RequireOK(resp *http.Response, err error) (*http.Response, error) {
+ if err != nil {
+ return nil, err
+ }
+ if resp.StatusCode != http.StatusOK {
+ return nil, extractError(resp)
+ }
+ return resp, nil
+}
+
+// extractError is used to extract inner/logical errors from the response
+func extractError(resp *http.Response) error {
+ buf := bytes.NewBuffer(nil)
+
+ // TeeReader returns a Reader that writes to b what it reads from r.Body.
+ reader := io.TeeReader(resp.Body, buf)
+ defer resp.Body.Close()
+ resp.Body = ioutil.NopCloser(buf)
+
+ var out Response
+ if err := json.NewDecoder(reader).Decode(&out); err != nil {
+ return err
+ }
+
+ var errors Errors
+ if errs := out.Response.Errors; len(errs) > 0 {
+ for _, err := range errs {
+ errors = append(errors, Error{
+ Response: resp,
+ RequestID: out.Request.ID,
+ Code: err.Code,
+ Message: err.Message,
+ Field: err.Field,
+ })
+ }
+ } else {
+ errors = append(errors, Error{
+ Response: resp,
+ RequestID: out.Request.ID,
+ Code: strconv.Itoa(resp.StatusCode),
+ Message: http.StatusText(resp.StatusCode),
+ })
+ }
+
+ return errors
+}
diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/config.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/config.go
new file mode 100644
index 000000000000..7639fe3c3a07
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/config.go
@@ -0,0 +1,183 @@
+package spotinst
+
+import (
+ "net"
+ "net/http"
+ "net/url"
+ "runtime"
+ "strings"
+ "time"
+
+ "github.com/spotinst/spotinst-sdk-go/spotinst/credentials"
+ "github.com/spotinst/spotinst-sdk-go/spotinst/log"
+ "github.com/spotinst/spotinst-sdk-go/spotinst/util/useragent"
+)
+
+const (
+ // defaultBaseURL is the default base URL of the Spotinst API.
+ // It is used e.g. when initializing a new Client without a specific address.
+ defaultBaseURL = "https://api.spotinst.io"
+
+ // defaultContentType is the default content type to use when making HTTP calls.
+ defaultContentType = "application/json"
+)
+
+// A Config provides Configuration to a service client instance.
+type Config struct {
+ // The base URL the SDK's HTTP client will use when invoking HTTP requests.
+ BaseURL *url.URL
+
+ // The HTTP Client the SDK's API clients will use to invoke HTTP requests.
+ //
+ // Defaults to a DefaultHTTPClient allowing API clients to create copies of
+ // the HTTP client for service specific customizations.
+ HTTPClient *http.Client
+
+ // The credentials object to use when signing requests.
+ //
+ // Defaults to a chain of credential providers to search for credentials in
+ // environment variables and shared credential file.
+ Credentials *credentials.Credentials
+
+ // The logger writer interface to write logging messages to.
+ //
+ // Defaults to standard out.
+ Logger log.Logger
+
+ // The User-Agent and Content-Type HTTP headers to set when invoking HTTP
+ // requests.
+ UserAgent, ContentType string
+}
+
+// DefaultBaseURL returns the default base URL.
+func DefaultBaseURL() *url.URL {
+ baseURL, _ := url.Parse(defaultBaseURL)
+ return baseURL
+}
+
+// DefaultUserAgent returns the default User-Agent header.
+func DefaultUserAgent() string {
+ return useragent.New(
+ SDKName,
+ SDKVersion,
+ runtime.Version(),
+ runtime.GOOS,
+ runtime.GOARCH).String()
+}
+
+// DefaultContentType returns the default Content-Type header.
+func DefaultContentType() string {
+ return defaultContentType
+}
+
+// DefaultTransport returns a new http.Transport with similar default values to
+// http.DefaultTransport. Do not use this for transient transports as it can
+// leak file descriptors over time. Only use this for transports that will be
+// re-used for the same host(s).
+func DefaultTransport() *http.Transport {
+ return &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).DialContext,
+ TLSHandshakeTimeout: 10 * time.Second,
+ DisableKeepAlives: false,
+ MaxIdleConnsPerHost: 1,
+ }
+}
+
+// DefaultHTTPClient returns a new http.Client with similar default values to
+// http.Client, but with a non-shared Transport, idle connections disabled, and
+// KeepAlives disabled.
+func DefaultHTTPClient() *http.Client {
+ return &http.Client{
+ Transport: DefaultTransport(),
+ }
+}
+
+// DefaultConfig returns a default configuration for the client. By default this
+// will pool and reuse idle connections to API. If you have a long-lived client
+// object, this is the desired behavior and should make the most efficient use
+// of the connections to API.
+func DefaultConfig() *Config {
+ return &Config{
+ BaseURL: DefaultBaseURL(),
+ HTTPClient: DefaultHTTPClient(),
+ UserAgent: DefaultUserAgent(),
+ ContentType: DefaultContentType(),
+ Credentials: credentials.NewChainCredentials(
+ new(credentials.EnvProvider),
+ new(credentials.FileProvider),
+ ),
+ }
+}
+
+// WithBaseURL defines the base URL of the Spotinst API.
+func (c *Config) WithBaseURL(rawurl string) *Config {
+ baseURL, _ := url.Parse(rawurl)
+ c.BaseURL = baseURL
+ return c
+}
+
+// WithHTTPClient defines the HTTP client.
+func (c *Config) WithHTTPClient(client *http.Client) *Config {
+ c.HTTPClient = client
+ return c
+}
+
+// WithCredentials defines the credentials.
+func (c *Config) WithCredentials(creds *credentials.Credentials) *Config {
+ c.Credentials = creds
+ return c
+}
+
+// WithUserAgent defines the user agent.
+func (c *Config) WithUserAgent(ua string) *Config {
+ c.UserAgent = strings.TrimSpace(strings.Join([]string{ua, c.UserAgent}, " "))
+ return c
+}
+
+// WithContentType defines the content type.
+func (c *Config) WithContentType(ct string) *Config {
+ c.ContentType = ct
+ return c
+}
+
+// WithLogger defines the logger for informational messages, e.g. requests
+// and their response times. It is nil by default.
+func (c *Config) WithLogger(logger log.Logger) *Config {
+ c.Logger = logger
+ return c
+}
+
+// Merge merges the passed in configs into the existing config object.
+func (c *Config) Merge(cfgs ...*Config) {
+ for _, cfg := range cfgs {
+ mergeConfigs(c, cfg)
+ }
+}
+
+func mergeConfigs(c1, c2 *Config) {
+ if c2 == nil {
+ return
+ }
+ if c2.BaseURL != nil {
+ c1.BaseURL = c2.BaseURL
+ }
+ if c2.Credentials != nil {
+ c1.Credentials = c2.Credentials
+ }
+ if c2.HTTPClient != nil {
+ c1.HTTPClient = c2.HTTPClient
+ }
+ if c2.UserAgent != "" {
+ c1.UserAgent = c2.UserAgent
+ }
+ if c2.ContentType != "" {
+ c1.ContentType = c2.ContentType
+ }
+ if c2.Logger != nil {
+ c1.Logger = c2.Logger
+ }
+}
diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/credentials.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/credentials.go
new file mode 100644
index 000000000000..fcea42bd9e4b
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/credentials.go
@@ -0,0 +1,67 @@
+package credentials
+
+import (
+ "errors"
+ "sync"
+)
+
+// ErrNoValidTokenFound is returned when there is no valid token.
+var ErrNoValidTokenFound = errors.New("spotinst: no valid token found")
+
+// A Credentials provides synchronous safe retrieval of Spotinst credentials.
+// Credentials will cache the credentials value.
+//
+// Credentials is safe to use across multiple goroutines and will manage the
+// synchronous state so the Providers do not need to implement their own
+// synchronization.
+//
+// The first Credentials.Get() will always call Provider.Retrieve() to get the
+// first instance of the credentials Value. All calls to Get() after that will
+// return the cached credentials Value.
+type Credentials struct {
+ provider Provider
+ mu sync.Mutex
+ forceRefresh bool
+ creds Value
+}
+
+// NewCredentials returns a pointer to a new Credentials with the provider set.
+func NewCredentials(provider Provider) *Credentials {
+ return &Credentials{
+ provider: provider,
+ forceRefresh: true,
+ }
+}
+
+// Get returns the credentials value, or error if the credentials Value failed
+// to be retrieved.
+//
+// Will return the cached credentials Value. If the credentials Value is empty
+// the Provider's Retrieve() will be called to refresh the credentials.
+func (c *Credentials) Get() (Value, error) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.creds.Token == "" || c.forceRefresh {
+ creds, err := c.provider.Retrieve()
+ if err != nil {
+ return Value{}, err
+ }
+ if creds.Token == "" {
+ return Value{ProviderName: creds.ProviderName}, ErrNoValidTokenFound
+ }
+ c.creds = creds
+ c.forceRefresh = false
+ }
+
+ return c.creds, nil
+}
+
+// Refresh refreshes the credentials and forces them to be retrieved on the next
+// call to Get().
+func (c *Credentials) Refresh() {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ c.forceRefresh = true
+}
diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider.go
new file mode 100644
index 000000000000..1468ff3fd85c
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider.go
@@ -0,0 +1,44 @@
+package credentials
+
+import "fmt"
+
+// A Value is the Spotinst credentials value for individual credential fields.
+type Value struct {
+ // Spotinst API token.
+ Token string `ini:"token" json:"token"`
+
+ // Spotinst account ID.
+ Account string `ini:"account" json:"account"`
+
+ // Provider used to get credentials.
+ ProviderName string `ini:"-" json:"-"`
+}
+
+// A Provider is the interface for any component which will provide credentials
+// Value.
+//
+// The Provider should not need to implement its own mutexes, because that will
+// be managed by Credentials.
+type Provider interface {
+ fmt.Stringer
+
+ // Refresh returns nil if it successfully retrieved the value. Error is
+ // returned if the value were not obtainable, or empty.
+ Retrieve() (Value, error)
+}
+
+// IsEmpty if all fields of a Value are empty.
+func (v *Value) IsEmpty() bool { return v.Token == "" && v.Account == "" }
+
+// IsComplete if all fields of a Value are set.
+func (v *Value) IsComplete() bool { return v.Token != "" && v.Account != "" }
+
+// Merge merges the passed in Value into the existing Value object.
+func (v *Value) Merge(v2 Value) {
+ if v.Token == "" {
+ v.Token = v2.Token
+ }
+ if v.Account == "" {
+ v.Account = v2.Account
+ }
+}
diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_chain.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_chain.go
new file mode 100644
index 000000000000..32b568048838
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_chain.go
@@ -0,0 +1,117 @@
+package credentials
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/spotinst/spotinst-sdk-go/spotinst/featureflag"
+)
+
+// ErrNoValidProvidersFoundInChain is returned when there are no valid credentials
+// providers in the ChainProvider.
+var ErrNoValidProvidersFoundInChain = errors.New("spotinst: no valid " +
+ "credentials providers in chain")
+
+// A ChainProvider will search for a provider which returns credentials.
+//
+// The ChainProvider provides a way of chaining multiple providers together which
+// will pick the first available using priority order of the Providers in the list.
+//
+// If none of the Providers retrieve valid credentials Value, ChainProvider's
+// Retrieve() will return the error ErrNoValidProvidersFoundInChain.
+//
+// If a Provider is found which returns valid credentials Value ChainProvider
+// will cache that Provider for all calls until Retrieve is called again.
+//
+// Example of ChainProvider to be used with an EnvCredentialsProvider and
+// FileCredentialsProvider. In this example EnvProvider will first check if any
+// credentials are available via the environment variables. If there are none
+// ChainProvider will check the next Provider in the list, FileProvider in this
+// case. If FileCredentialsProvider does not return any credentials ChainProvider
+// will return the error ErrNoValidProvidersFoundInChain.
+//
+// creds := credentials.NewChainCredentials(
+// new(credentials.EnvProvider),
+// new(credentials.FileProvider),
+// )
+type ChainProvider struct {
+ Providers []Provider
+}
+
+// NewChainCredentials returns a pointer to a new Credentials object
+// wrapping a chain of providers.
+func NewChainCredentials(providers ...Provider) *Credentials {
+ return NewCredentials(&ChainProvider{
+ Providers: providers,
+ })
+}
+
+// Retrieve returns the credentials value or error if no provider returned
+// without error.
+func (c *ChainProvider) Retrieve() (Value, error) {
+ var value Value
+ var errs errorList
+
+ for _, p := range c.Providers {
+ v, err := p.Retrieve()
+ if err == nil {
+ if featureflag.MergeCredentialsChain.Enabled() {
+ value.Merge(v)
+ if value.IsComplete() {
+ return value, nil
+ }
+ } else {
+ value = v
+ break
+ }
+ } else {
+ errs = append(errs, err)
+ }
+ }
+
+ if value.Token == "" {
+ err := ErrNoValidProvidersFoundInChain
+ if len(errs) > 0 {
+ err = errs
+ }
+
+ return Value{ProviderName: c.String()}, err
+ }
+
+ return value, nil
+}
+
+// String returns the string representation of the provider.
+func (c *ChainProvider) String() string {
+ var out string
+ for i, provider := range c.Providers {
+ out += provider.String()
+ if i < len(c.Providers)-1 {
+ out += " "
+ }
+ }
+ return out
+}
+
+// An error list that satisfies the error interface.
+type errorList []error
+
+// Error returns the string representation of the error.
+//
+// Satisfies the error interface.
+func (e errorList) Error() string {
+ msg := ""
+ if size := len(e); size > 0 {
+ for i := 0; i < size; i++ {
+ msg += fmt.Sprintf("%s", e[i].Error())
+
+ // Check the next index to see if it is within the slice. If it is,
+ // append a newline. We do this, because unit tests could be broken
+ // with the additional '\n'.
+ if i+1 < size {
+ msg += "\n"
+ }
+ }
+ }
+ return msg
+}
diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_env.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_env.go
new file mode 100644
index 000000000000..92b1088ee052
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_env.go
@@ -0,0 +1,56 @@
+package credentials
+
+import (
+ "fmt"
+ "os"
+)
+
+const (
+ // EnvCredentialsProviderName specifies the name of the Env provider.
+ EnvCredentialsProviderName = "EnvCredentialsProvider"
+
+ // EnvCredentialsVarToken specifies the name of the environment variable
+ // points to the Spotinst Token.
+ EnvCredentialsVarToken = "SPOTINST_TOKEN"
+
+ // EnvCredentialsVarAccount specifies the name of the environment variable
+ // points to the Spotinst account ID.
+ EnvCredentialsVarAccount = "SPOTINST_ACCOUNT"
+)
+
+// ErrEnvCredentialsNotFound is returned when no credentials can be found in the
+// process's environment.
+var ErrEnvCredentialsNotFound = fmt.Errorf("spotinst: %s and %s not found "+
+ "in environment", EnvCredentialsVarToken, EnvCredentialsVarAccount)
+
+// A EnvProvider retrieves credentials from the environment variables of the
+// running process.
+//
+// Environment variables used:
+// * Token : SPOTINST_TOKEN
+// * Account : SPOTINST_ACCOUNT
+type EnvProvider struct{}
+
+// NewEnvCredentials returns a pointer to a new Credentials object wrapping the
+// environment variable provider.
+func NewEnvCredentials() *Credentials {
+ return NewCredentials(&EnvProvider{})
+}
+
+// Retrieve retrieves the keys from the environment.
+func (e *EnvProvider) Retrieve() (Value, error) {
+ value := Value{
+ Token: os.Getenv(EnvCredentialsVarToken),
+ Account: os.Getenv(EnvCredentialsVarAccount),
+ ProviderName: EnvCredentialsProviderName,
+ }
+
+ if value.IsEmpty() {
+ return value, ErrEnvCredentialsNotFound
+ }
+
+ return value, nil
+}
+
+// String returns the string representation of the provider.
+func (e *EnvProvider) String() string { return EnvCredentialsProviderName }
diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_file.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_file.go
new file mode 100644
index 000000000000..6b7236de3b9d
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_file.go
@@ -0,0 +1,207 @@
+package credentials
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+
+ "gopkg.in/ini.v1"
+)
+
+const (
+ // FileCredentialsProviderName specifies the name of the File provider.
+ FileCredentialsProviderName = "FileCredentialsProvider"
+
+ // FileCredentialsEnvVarFile specifies the name of the environment variable
+ // points to the location of the credentials file.
+ FileCredentialsEnvVarFile = "SPOTINST_CREDENTIALS_FILE"
+
+ // FileCredentialsEnvVarProfile specifies the name of the environment variable
+ // points to a profile name to use when loading credentials.
+ FileCredentialsEnvVarProfile = "SPOTINST_CREDENTIALS_PROFILE"
+)
+
+var (
+ // ErrFileCredentialsLoadFailed is returned when the provider is unable to load
+ // credentials from the credentials file.
+ ErrFileCredentialsLoadFailed = errors.New("spotinst: failed to load credentials file")
+
+ // ErrFileCredentialsNotFound is returned when the loaded credentials
+ // are empty.
+ ErrFileCredentialsNotFound = errors.New("spotinst: credentials file or profile is empty")
+)
+
+// DefaultProfile returns the SDK's default profile name to use when loading
+// credentials.
+func DefaultProfile() string {
+ return "default"
+}
+
+// DefaultFilename returns the SDK's default file path for the credentials file.
+//
+// Builds the config file path based on the OS's platform.
+// - Linux/Unix : $HOME/.spotinst/credentials
+// - Windows : %USERPROFILE%\.spotinst\credentials
+func DefaultFilename() string {
+ return filepath.Join(userHomeDir(), ".spotinst", "credentials")
+}
+
+// A FileProvider retrieves credentials from the current user's home directory.
+type FileProvider struct {
+ // Profile to load.
+ Profile string
+
+ // Path to the credentials file.
+ //
+ // If empty will look for FileCredentialsEnvVarFile env variable. If the
+ // env value is empty will default to current user's home directory.
+ // - Linux/Unix : $HOME/.spotinst/credentials
+ // - Windows : %USERPROFILE%\.spotinst\credentials
+ Filename string
+
+ // retrieved states if the credentials have been successfully retrieved.
+ retrieved bool
+}
+
+// NewFileCredentials returns a pointer to a new Credentials object wrapping the
+// file provider.
+func NewFileCredentials(profile, filename string) *Credentials {
+ return NewCredentials(&FileProvider{
+ Profile: profile,
+ Filename: filename,
+ })
+}
+
+// Retrieve reads and extracts the shared credentials from the current users home
+// directory.
+func (p *FileProvider) Retrieve() (Value, error) {
+ p.retrieved = false
+
+ value, err := p.loadCredentials(p.profile(), p.filename())
+ if err != nil {
+ return value, err
+ }
+
+ if len(value.ProviderName) == 0 {
+ value.ProviderName = FileCredentialsProviderName
+ }
+
+ p.retrieved = true
+ return value, nil
+}
+
+// String returns the string representation of the provider.
+func (p *FileProvider) String() string { return FileCredentialsProviderName }
+
+// profile returns the profile to use to read the user credentials.
+func (p *FileProvider) profile() string {
+ if p.Profile == "" {
+ if p.Profile = os.Getenv(FileCredentialsEnvVarProfile); p.Profile != "" {
+ return p.Profile
+ }
+
+ p.Profile = DefaultProfile()
+ }
+
+ return p.Profile
+}
+
+// filename returns the filename to use to read the user credentials.
+func (p *FileProvider) filename() string {
+ if p.Filename == "" {
+ if p.Filename = os.Getenv(FileCredentialsEnvVarFile); p.Filename != "" {
+ return p.Filename
+ }
+
+ p.Filename = DefaultFilename()
+ }
+
+ return p.Filename
+}
+
+// loadCredentials loads the credentials from the file pointed to by filename.
+// The credentials retrieved from the profile will be returned or error. Error
+// will be returned if it fails to read from the file, or the data is invalid.
+func (p *FileProvider) loadCredentials(profile, filename string) (Value, error) {
+ var value Value
+ var iniErr, jsonErr error
+
+ if value, iniErr = p.loadCredentialsINI(profile, filename); iniErr != nil {
+ if value, jsonErr = p.loadCredentialsJSON(profile, filename); jsonErr != nil {
+ return value, fmt.Errorf("%v: %v", ErrFileCredentialsLoadFailed, iniErr)
+ }
+ }
+
+ if value.IsEmpty() {
+ return value, ErrFileCredentialsNotFound
+ }
+
+ return value, nil
+}
+
+func (p *FileProvider) loadCredentialsINI(profile, filename string) (Value, error) {
+ var value Value
+
+ config, err := ini.Load(filename)
+ if err != nil {
+ return value, err
+ }
+
+ value, err = getCredentialsFromINIProfile(profile, config)
+ if err != nil {
+ return value, err
+ }
+
+ // Try to complete missing fields with default profile.
+ if profile != DefaultProfile() && !value.IsComplete() {
+ defaultValue, err := getCredentialsFromINIProfile(DefaultProfile(), config)
+ if err == nil {
+ value.Merge(defaultValue)
+ }
+ }
+
+ return value, nil
+}
+
+func getCredentialsFromINIProfile(profile string, config *ini.File) (Value, error) {
+ var value Value
+
+ section, err := config.GetSection(profile)
+ if err != nil {
+ return value, err
+ }
+
+ if err := section.StrictMapTo(&value); err != nil {
+ return value, err
+ }
+
+ return value, nil
+}
+
+func (p *FileProvider) loadCredentialsJSON(profile, filename string) (Value, error) {
+ var value Value
+
+ f, err := os.Open(filename)
+ if err != nil {
+ return value, err
+ }
+ defer f.Close()
+
+ if err := json.NewDecoder(f).Decode(&value); err != nil {
+ return value, err
+ }
+
+ return value, nil
+}
+
+func userHomeDir() string {
+ if runtime.GOOS == "windows" { // Windows
+ return os.Getenv("USERPROFILE")
+ }
+
+ // *nix
+ return os.Getenv("HOME")
+}
diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_static.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_static.go
new file mode 100644
index 000000000000..2ed6b3db8572
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/credentials/provider_static.go
@@ -0,0 +1,38 @@
+package credentials
+
+import (
+ "errors"
+)
+
+// StaticCredentialsProviderName specifies the name of the Static provider.
+const StaticCredentialsProviderName = "StaticCredentialsProvider"
+
+// ErrStaticCredentialsEmpty is returned when static credentials are empty.
+var ErrStaticCredentialsEmpty = errors.New("spotinst: static credentials are empty")
+
+// A StaticProvider is a set of credentials which are set programmatically.
+type StaticProvider struct {
+ Value
+}
+
+// NewStaticCredentials returns a pointer to a new Credentials object wrapping
+// a static credentials value provider.
+func NewStaticCredentials(token, account string) *Credentials {
+ return NewCredentials(&StaticProvider{Value: Value{
+ ProviderName: StaticCredentialsProviderName,
+ Token: token,
+ Account: account,
+ }})
+}
+
+// Retrieve returns the credentials or error if the credentials are invalid.
+func (s *StaticProvider) Retrieve() (Value, error) {
+ if s.IsEmpty() {
+ return s.Value, ErrStaticCredentialsEmpty
+ }
+
+ return s.Value, nil
+}
+
+// String returns the string representation of the provider.
+func (s *StaticProvider) String() string { return StaticCredentialsProviderName }
diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/featureflag/featureflag.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/featureflag/featureflag.go
new file mode 100644
index 000000000000..c065c35fd670
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/featureflag/featureflag.go
@@ -0,0 +1,119 @@
+package featureflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+// All registered feature flags.
+var (
+ flagsMutex sync.Mutex
+ flags = make(map[string]FeatureFlag)
+)
+
+// FeatureFlag indicates whether a given feature is enabled or not.
+type FeatureFlag interface {
+ fmt.Stringer
+
+ // Name returns the name of the feature flag.
+ Name() string
+
+ // Enabled returns true if the feature is enabled.
+ Enabled() bool
+}
+
+// featureFlag represents a feature being gated.
+type featureFlag struct {
+ name string
+ enabled bool
+}
+
+// New returns a new feature flag.
+func New(name string, enabled bool) FeatureFlag {
+ flagsMutex.Lock()
+ defer flagsMutex.Unlock()
+
+ ff, ok := flags[name]
+ if !ok {
+ ff = &featureFlag{name: name}
+ flags[name] = ff
+ }
+
+ ff.(*featureFlag).enabled = enabled
+ return ff
+}
+
+// Name returns the name of the feature flag.
+func (f *featureFlag) Name() string { return f.name }
+
+// Enabled returns true if the feature is enabled.
+func (f *featureFlag) Enabled() bool { return f.enabled }
+
+// String returns the string representation of the feature flag.
+func (f *featureFlag) String() string { return fmt.Sprintf("%s=%t", f.name, f.enabled) }
+
+// Set parses and stores features from a string like "feature1=true,feature2=false".
+func Set(features string) {
+ for _, s := range strings.Split(strings.TrimSpace(features), ",") {
+ if len(s) == 0 {
+ continue
+ }
+
+ segments := strings.SplitN(s, "=", 2)
+ name := strings.TrimSpace(segments[0])
+
+ enabled := true
+ if len(segments) > 1 {
+ value := strings.TrimSpace(segments[1])
+ enabled, _ = strconv.ParseBool(value) // ignore errors and fallback to `false`
+ }
+
+ New(name, enabled)
+ }
+}
+
+// Get returns a specific feature flag by name.
+func Get(name string) FeatureFlag {
+ flagsMutex.Lock()
+ defer flagsMutex.Unlock()
+
+ f, ok := flags[name]
+ if !ok {
+ f = new(featureFlag)
+ }
+
+ return &featureFlag{
+ name: name,
+ enabled: f.Enabled(),
+ }
+}
+
+// All returns a list of all known feature flags.
+func All() FeatureFlags {
+ flagsMutex.Lock()
+ defer flagsMutex.Unlock()
+
+ features := make(FeatureFlags, 0, len(flags))
+ for name, flag := range flags {
+ features = append(features, &featureFlag{
+ name: name,
+ enabled: flag.Enabled(),
+ })
+ }
+
+ return features
+}
+
+// FeatureFlags defines a list of feature flags.
+type FeatureFlags []FeatureFlag
+
+// String returns the string representation of a list of feature flags.
+func (f FeatureFlags) String() string {
+ features := make([]string, len(f))
+ for i, ff := range f {
+ features[i] = ff.String()
+ }
+ return strings.Join(features, ",")
+}
diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/featureflag/features.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/featureflag/features.go
new file mode 100644
index 000000000000..04a5e132ff67
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/featureflag/features.go
@@ -0,0 +1,24 @@
+package featureflag
+
+import "os"
+
+// Default features.
+var (
+ // Toggle the usage of merging credentials in chain provider.
+ //
+ // This feature allows users to configure their credentials using multiple
+ // providers. For example, a token can be statically configured using a file,
+ // while the account can be dynamically configured via environment variables.
+ MergeCredentialsChain = New("MergeCredentialsChain", false)
+)
+
+// EnvVar is the name of the environment variable to read feature flags from.
+// The value should be a comma-separated list of K=V flags, while V is optional.
+const EnvVar = "SPOTINST_FEATURE_FLAGS"
+
+// setFromEnv reads an environment variable and sets features from its value.
+func setFromEnv() { Set(os.Getenv(EnvVar)) }
+
+func init() {
+ setFromEnv()
+}
diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/log/log.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/log/log.go
new file mode 100644
index 000000000000..f207ac14f7fa
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/log/log.go
@@ -0,0 +1,25 @@
+package log
+
+import (
+ "log"
+ "os"
+)
+
+// DefaultStdLogger represents the default logger which will write log messages
+// to stdout, and use same formatting runes as the stdlib log.Logger.
+var DefaultStdLogger Logger = log.New(os.Stderr, "", log.LstdFlags)
+
+// Logger specifies the interface for all log operations.
+type Logger interface {
+ Printf(format string, args ...interface{})
+}
+
+// The LoggerFunc type is an adapter to allow the use of ordinary functions as
+// Logger. If f is a function with the appropriate signature, LoggerFunc(f) is
+// a Logger that calls f.
+type LoggerFunc func(format string, args ...interface{})
+
+// Printf calls f(format, args).
+func (f LoggerFunc) Printf(format string, args ...interface{}) {
+ f(format, args...)
+}
diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/session/session.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/session/session.go
new file mode 100644
index 000000000000..1911a4553185
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/session/session.go
@@ -0,0 +1,22 @@
+package session
+
+import (
+ "github.com/spotinst/spotinst-sdk-go/spotinst"
+)
+
+// A Session provides a central location to create service clients.
+//
+// Sessions are safe to create service clients concurrently, but it is not safe
+// to mutate the Session concurrently.
+type Session struct {
+ Config *spotinst.Config
+}
+
+// New creates a new instance of Session. Once the Session is created it
+// can be mutated to modify the Config. The Session is safe to be read
+// concurrently, but it should not be written to concurrently.
+func New(cfgs ...*spotinst.Config) *Session {
+ s := &Session{Config: spotinst.DefaultConfig()}
+ s.Config.Merge(cfgs...)
+ return s
+}
diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/types.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/types.go
new file mode 100644
index 000000000000..2c10b87c0a7a
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/types.go
@@ -0,0 +1,357 @@
+package spotinst
+
+import "time"
+
+// String returns a pointer to of the string value passed in.
+func String(v string) *string {
+ return &v
+}
+
+// StringValue returns the value of the string pointer passed in or
+// "" if the pointer is nil.
+func StringValue(v *string) string {
+ if v != nil {
+ return *v
+ }
+ return ""
+}
+
+// StringSlice converts a slice of string values into a slice of
+// string pointers
+func StringSlice(src []string) []*string {
+ dst := make([]*string, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// StringValueSlice converts a slice of string pointers into a slice of
+// string values
+func StringValueSlice(src []*string) []string {
+ dst := make([]string, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// StringMap converts a string map of string values into a string
+// map of string pointers
+func StringMap(src map[string]string) map[string]*string {
+ dst := make(map[string]*string)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// StringValueMap converts a string map of string pointers into a string
+// map of string values
+func StringValueMap(src map[string]*string) map[string]string {
+ dst := make(map[string]string)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Bool returns a pointer to of the bool value passed in.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// BoolValue returns the value of the bool pointer passed in or
+// false if the pointer is nil.
+func BoolValue(v *bool) bool {
+ if v != nil {
+ return *v
+ }
+ return false
+}
+
+// BoolSlice converts a slice of bool values into a slice of
+// bool pointers
+func BoolSlice(src []bool) []*bool {
+ dst := make([]*bool, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// BoolValueSlice converts a slice of bool pointers into a slice of
+// bool values
+func BoolValueSlice(src []*bool) []bool {
+ dst := make([]bool, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// BoolMap converts a string map of bool values into a string
+// map of bool pointers
+func BoolMap(src map[string]bool) map[string]*bool {
+ dst := make(map[string]*bool)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// BoolValueMap converts a string map of bool pointers into a string
+// map of bool values
+func BoolValueMap(src map[string]*bool) map[string]bool {
+ dst := make(map[string]bool)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Int returns a pointer to of the int value passed in.
+func Int(v int) *int {
+ return &v
+}
+
+// IntValue returns the value of the int pointer passed in or
+// 0 if the pointer is nil.
+func IntValue(v *int) int {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// IntSlice converts a slice of int values into a slice of
+// int pointers.
+func IntSlice(src []int) []*int {
+ dst := make([]*int, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// IntValueSlice converts a slice of int pointers into a slice of
+// int values.
+func IntValueSlice(src []*int) []int {
+ dst := make([]int, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// IntMap converts a string map of int values into a string
+// map of int pointers.
+func IntMap(src map[string]int) map[string]*int {
+ dst := make(map[string]*int)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// IntValueMap converts a string map of int pointers into a string
+// map of int values.
+func IntValueMap(src map[string]*int) map[string]int {
+ dst := make(map[string]int)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Int64 returns a pointer to of the int64 value passed in.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Int64Value returns the value of the int64 pointer passed in or
+// 0 if the pointer is nil.
+func Int64Value(v *int64) int64 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Int64Slice converts a slice of int64 values into a slice of
+// int64 pointers.
+func Int64Slice(src []int64) []*int64 {
+ dst := make([]*int64, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Int64ValueSlice converts a slice of int64 pointers into a slice of
+// int64 values.
+func Int64ValueSlice(src []*int64) []int64 {
+ dst := make([]int64, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Int64Map converts a string map of int64 values into a string
+// map of int64 pointers.
+func Int64Map(src map[string]int64) map[string]*int64 {
+ dst := make(map[string]*int64)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Int64ValueMap converts a string map of int64 pointers into a string
+// map of int64 values.
+func Int64ValueMap(src map[string]*int64) map[string]int64 {
+ dst := make(map[string]int64)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Float64 returns a pointer to of the float64 value passed in.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Float64Value returns the value of the float64 pointer passed in or
+// 0 if the pointer is nil.
+func Float64Value(v *float64) float64 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Float64Slice converts a slice of float64 values into a slice of
+// float64 pointers.
+func Float64Slice(src []float64) []*float64 {
+ dst := make([]*float64, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Float64ValueSlice converts a slice of float64 pointers into a slice of
+// float64 values.
+func Float64ValueSlice(src []*float64) []float64 {
+ dst := make([]float64, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Float64Map converts a string map of float64 values into a string
+// map of float64 pointers.
+func Float64Map(src map[string]float64) map[string]*float64 {
+ dst := make(map[string]*float64)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Float64ValueMap converts a string map of float64 pointers into a string
+// map of float64 values.
+func Float64ValueMap(src map[string]*float64) map[string]float64 {
+ dst := make(map[string]float64)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Time returns a pointer to of the time.Time value passed in.
+func Time(v time.Time) *time.Time {
+ return &v
+}
+
+// TimeValue returns the value of the time.Time pointer passed in or
+// time.Time{} if the pointer is nil.
+func TimeValue(v *time.Time) time.Time {
+ if v != nil {
+ return *v
+ }
+ return time.Time{}
+}
+
+// TimeSlice converts a slice of time.Time values into a slice of
+// time.Time pointers.
+func TimeSlice(src []time.Time) []*time.Time {
+ dst := make([]*time.Time, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// TimeValueSlice converts a slice of time.Time pointers into a slice of
+// time.Time values.
+func TimeValueSlice(src []*time.Time) []time.Time {
+ dst := make([]time.Time, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// TimeMap converts a string map of time.Time values into a string
+// map of time.Time pointers.
+func TimeMap(src map[string]time.Time) map[string]*time.Time {
+ dst := make(map[string]*time.Time)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// TimeValueMap converts a string map of time.Time pointers into a string
+// map of time.Time values.
+func TimeValueMap(src map[string]*time.Time) map[string]time.Time {
+ dst := make(map[string]time.Time)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil/json.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil/json.go
new file mode 100644
index 000000000000..e6a3ae6642a8
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil/json.go
@@ -0,0 +1,237 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package jsonutil
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "strings"
+ "unsafe"
+)
+
+// MarshalJSON returns a JSON encoding of schema containing only selected fields.
+// A field is selected if any of the following is true:
+// - it has a non-empty value
+// - its field name is present in forceSendFields and it is not a nil pointer or nil interface
+// - its field name is present in nullFields.
+//
+// The JSON key for each selected field is taken from the field's json: struct tag.
+func MarshalJSON(schema interface{}, forceSendFields, nullFields []string) ([]byte, error) {
+ mustInclude := make(map[string]struct{})
+ for _, f := range forceSendFields {
+ mustInclude[f] = struct{}{}
+ }
+
+ useNull := make(map[string]struct{})
+ for _, f := range nullFields {
+ useNull[f] = struct{}{}
+ }
+
+ dataMap, err := schemaToMap(schema, mustInclude, useNull)
+ if err != nil {
+ return nil, err
+ }
+
+ return json.Marshal(dataMap)
+}
+
+func schemaToMap(schema interface{}, mustInclude, useNull map[string]struct{}) (map[string]interface{}, error) {
+ m := make(map[string]interface{})
+ s := reflect.ValueOf(schema)
+ st := s.Type()
+
+ for i := 0; i < s.NumField(); i++ {
+ sv := s.Field(i)
+ sf := st.Field(i)
+
+ isUnexported := sf.PkgPath != ""
+ if sf.Anonymous {
+ t := sf.Type
+ if t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ if isUnexported && t.Kind() != reflect.Struct {
+ // Ignore embedded fields of unexported non-struct types.
+ continue
+ }
+
+ // Allow access to unexported fields by creating an addressable copy.
+ sfe := reflect.New(sf.Type).Elem()
+ sfe.Set(sv)
+
+ // Get a copy of `forceSendFields` slice.
+ var forceSendFields []string
+ if f := sfe.FieldByName("forceSendFields"); f.IsValid() {
+ i := reflect.NewAt(f.Type(), unsafe.Pointer(f.UnsafeAddr())).Elem().Interface()
+ if v, ok := i.([]string); ok {
+ forceSendFields = v
+ }
+ }
+
+ // Get a copy of `nullFields` slice.
+ var nullFields []string
+ if f := sfe.FieldByName("nullFields"); f.IsValid() {
+ i := reflect.NewAt(f.Type(), unsafe.Pointer(f.UnsafeAddr())).Elem().Interface()
+ if v, ok := i.([]string); ok {
+ nullFields = v
+ }
+ }
+
+ // Marshal the embedded field.
+ b, err := MarshalJSON(sv.Interface(), forceSendFields, nullFields)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal anonymous field %q: %v", sf.Name, err)
+ }
+
+ // Append all fields to the output map.
+ im := make(map[string]interface{})
+ json.Unmarshal(b, &im)
+ for k, v := range im {
+ m[k] = v
+ }
+
+ // Nothing else to do.
+ continue
+ } else if isUnexported {
+ // Ignore unexported non-embedded fields.
+ continue
+ }
+
+ jsonTag := sf.Tag.Get("json")
+ if jsonTag == "" {
+ continue
+ }
+
+ tag, err := parseJSONTag(jsonTag)
+ if err != nil {
+ return nil, err
+ }
+ if tag.ignore {
+ continue
+ }
+
+ if _, ok := useNull[sf.Name]; ok {
+ if !isEmptyValue(sv) {
+ return nil, fmt.Errorf("field %q in `nullFields` has non-empty value", sf.Name)
+ }
+ m[tag.apiName] = nil
+ continue
+ }
+ if !includeField(sv, sf, mustInclude) {
+ continue
+ }
+
+ // nil maps are treated as empty maps.
+ if sf.Type.Kind() == reflect.Map && sv.IsNil() {
+ m[tag.apiName] = map[string]string{}
+ continue
+ }
+
+ // nil slices are treated as empty slices.
+ if sf.Type.Kind() == reflect.Slice && sv.IsNil() {
+ m[tag.apiName] = []bool{}
+ continue
+ }
+
+ if tag.stringFormat {
+ m[tag.apiName] = formatAsString(sv, sf.Type.Kind())
+ } else {
+ m[tag.apiName] = sv.Interface()
+ }
+ }
+
+ return m, nil
+}
+
+// formatAsString returns a string representation of v, dereferencing it first if possible.
+func formatAsString(v reflect.Value, kind reflect.Kind) string {
+ if kind == reflect.Ptr && !v.IsNil() {
+ v = v.Elem()
+ }
+
+ return fmt.Sprintf("%v", v.Interface())
+}
+
+// jsonTag represents a restricted version of the struct tag format used by encoding/json.
+// It is used to describe the JSON encoding of fields in a Schema struct.
+type jsonTag struct {
+ apiName string
+ stringFormat bool
+ ignore bool
+}
+
+// parseJSONTag parses a restricted version of the struct tag format used by encoding/json.
+// The format of the tag must match that generated by the Schema.writeSchemaStruct method
+// in the api generator.
+func parseJSONTag(val string) (*jsonTag, error) {
+ if val == "-" {
+ return &jsonTag{ignore: true}, nil
+ }
+
+ i := strings.Index(val, ",")
+ if i == -1 || val[:i] == "" {
+ return nil, fmt.Errorf("malformed json tag: %s", val)
+ }
+
+ tag := &jsonTag{
+ apiName: val[:i],
+ }
+
+ switch val[i+1:] {
+ case "omitempty":
+ case "omitempty,string":
+ tag.stringFormat = true
+ default:
+ return nil, fmt.Errorf("malformed json tag: %s", val)
+ }
+
+ return tag, nil
+}
+
+// Reports whether the struct field "f" with value "v" should be included in JSON output.
+func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string]struct{}) bool {
+ // The regular JSON encoding of a nil pointer is "null", which means "delete this field".
+ // Therefore, we could enable field deletion by honoring pointer fields' presence in the mustInclude set.
+ // However, many fields are not pointers, so there would be no way to delete these fields.
+ // Rather than partially supporting field deletion, we ignore mustInclude for nil pointer fields.
+ // Deletion will be handled by a separate mechanism.
+ if f.Type.Kind() == reflect.Ptr && v.IsNil() {
+ return false
+ }
+
+ // The "any" type is represented as an interface{}. If this interface
+ // is nil, there is no reasonable representation to send. We ignore
+ // these fields, for the same reasons as given above for pointers.
+ if f.Type.Kind() == reflect.Interface && v.IsNil() {
+ return false
+ }
+
+ _, ok := mustInclude[f.Name]
+ return ok || !isEmptyValue(v)
+}
+
+// isEmptyValue reports whether v is the empty value for its type. This
+// implementation is based on that of the encoding/json package, but its
+// correctness does not depend on it being identical. What's important is that
+// this function return false in situations where v should not be sent as part
+// of a PATCH operation.
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/stringutil/stringutil.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/stringutil/stringutil.go
new file mode 100644
index 000000000000..87d89e7f6afb
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/stringutil/stringutil.go
@@ -0,0 +1,69 @@
+package stringutil
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+)
+
+// Stringify attempts to create a reasonable string representation of types.
+// It does things like resolve pointers to their values and omits struct
+// fields with nil values.
+func Stringify(message interface{}) string {
+ var buf bytes.Buffer
+ v := reflect.ValueOf(message)
+ stringifyValue(&buf, v)
+ return buf.String()
+}
+
+// stringifyValue was heavily inspired by the goprotobuf library.
+func stringifyValue(w io.Writer, val reflect.Value) {
+ if val.Kind() == reflect.Ptr && val.IsNil() {
+ w.Write([]byte(""))
+ return
+ }
+ v := reflect.Indirect(val)
+ switch v.Kind() {
+ case reflect.String:
+ fmt.Fprintf(w, `"%s"`, v)
+ case reflect.Slice:
+ w.Write([]byte{'['})
+ for i := 0; i < v.Len(); i++ {
+ if i > 0 {
+ w.Write([]byte{' '})
+ }
+ stringifyValue(w, v.Index(i))
+ }
+ w.Write([]byte{']'})
+ return
+ case reflect.Struct:
+ if v.Type().Name() != "" {
+ w.Write([]byte(v.Type().String()))
+ }
+ w.Write([]byte{'{'})
+ var sep bool
+ for i := 0; i < v.NumField(); i++ {
+ fv := v.Field(i)
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ continue
+ }
+ if fv.Kind() == reflect.Slice && fv.IsNil() {
+ continue
+ }
+ if sep {
+ w.Write([]byte(", "))
+ } else {
+ sep = true
+ }
+ w.Write([]byte(v.Type().Field(i).Name))
+ w.Write([]byte{':'})
+ stringifyValue(w, fv)
+ }
+ w.Write([]byte{'}'})
+ default:
+ if v.CanInterface() {
+ fmt.Fprint(w, v.Interface())
+ }
+ }
+}
diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates/LICENSE b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates/LICENSE
new file mode 100644
index 000000000000..de9c88cb65cb
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates/LICENSE
@@ -0,0 +1,18 @@
+Copyright (c) 2013 Joshua Tacoma
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates/uritemplates.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates/uritemplates.go
new file mode 100644
index 000000000000..6f2e00ab20dc
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates/uritemplates.go
@@ -0,0 +1,360 @@
+// Copyright 2013 Joshua Tacoma. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package uritemplates is a level 4 implementation of RFC 6570 (URI
+// Template, http://tools.ietf.org/html/rfc6570).
+//
+// To use uritemplates, parse a template string and expand it with a value
+// map:
+//
+// template, _ := uritemplates.Parse("https://api.github.com/repos{/user,repo}")
+// values := make(map[string]interface{})
+// values["user"] = "jtacoma"
+// values["repo"] = "uritemplates"
+// expanded, _ := template.ExpandString(values)
+// fmt.Printf(expanded)
+package uritemplates
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]")
+ reserved = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]")
+ validname = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$")
+ hex = []byte("0123456789ABCDEF")
+)
+
+func pctEncode(src []byte) []byte {
+ dst := make([]byte, len(src)*3)
+ for i, b := range src {
+ buf := dst[i*3 : i*3+3]
+ buf[0] = 0x25
+ buf[1] = hex[b/16]
+ buf[2] = hex[b%16]
+ }
+ return dst
+}
+
+func escape(s string, allowReserved bool) (escaped string) {
+ if allowReserved {
+ escaped = string(reserved.ReplaceAllFunc([]byte(s), pctEncode))
+ } else {
+ escaped = string(unreserved.ReplaceAllFunc([]byte(s), pctEncode))
+ }
+ return escaped
+}
+
+type Values map[string]interface{}
+
+// A Template is a parsed representation of a URI template.
+type Template struct {
+ raw string
+ parts []templatePart
+}
+
+// Parse parses a URI template string into a Template object.
+func Parse(rawtemplate string) (template *Template, err error) {
+ template = new(Template)
+ template.raw = rawtemplate
+ split := strings.Split(rawtemplate, "{")
+ template.parts = make([]templatePart, len(split)*2-1)
+ for i, s := range split {
+ if i == 0 {
+ if strings.Contains(s, "}") {
+ err = errors.New("unexpected }")
+ break
+ }
+ template.parts[i].raw = s
+ } else {
+ subsplit := strings.Split(s, "}")
+ if len(subsplit) != 2 {
+ err = errors.New("malformed template")
+ break
+ }
+ expression := subsplit[0]
+ template.parts[i*2-1], err = parseExpression(expression)
+ if err != nil {
+ break
+ }
+ template.parts[i*2].raw = subsplit[1]
+ }
+ }
+ if err != nil {
+ template = nil
+ }
+ return template, err
+}
+
+type templatePart struct {
+ raw string
+ terms []templateTerm
+ first string
+ sep string
+ named bool
+ ifemp string
+ allowReserved bool
+}
+
+type templateTerm struct {
+ name string
+ explode bool
+ truncate int
+}
+
+func parseExpression(expression string) (result templatePart, err error) {
+ switch expression[0] {
+ case '+':
+ result.sep = ","
+ result.allowReserved = true
+ expression = expression[1:]
+ case '.':
+ result.first = "."
+ result.sep = "."
+ expression = expression[1:]
+ case '/':
+ result.first = "/"
+ result.sep = "/"
+ expression = expression[1:]
+ case ';':
+ result.first = ";"
+ result.sep = ";"
+ result.named = true
+ expression = expression[1:]
+ case '?':
+ result.first = "?"
+ result.sep = "&"
+ result.named = true
+ result.ifemp = "="
+ expression = expression[1:]
+ case '&':
+ result.first = "&"
+ result.sep = "&"
+ result.named = true
+ result.ifemp = "="
+ expression = expression[1:]
+ case '#':
+ result.first = "#"
+ result.sep = ","
+ result.allowReserved = true
+ expression = expression[1:]
+ default:
+ result.sep = ","
+ }
+ rawterms := strings.Split(expression, ",")
+ result.terms = make([]templateTerm, len(rawterms))
+ for i, raw := range rawterms {
+ result.terms[i], err = parseTerm(raw)
+ if err != nil {
+ break
+ }
+ }
+ return result, err
+}
+
+func parseTerm(term string) (result templateTerm, err error) {
+ if strings.HasSuffix(term, "*") {
+ result.explode = true
+ term = term[:len(term)-1]
+ }
+ split := strings.Split(term, ":")
+ if len(split) == 1 {
+ result.name = term
+ } else if len(split) == 2 {
+ result.name = split[0]
+ var parsed int64
+ parsed, err = strconv.ParseInt(split[1], 10, 0)
+ result.truncate = int(parsed)
+ } else {
+ err = errors.New("multiple colons in same term")
+ }
+ if !validname.MatchString(result.name) {
+ err = errors.New("not a valid name: " + result.name)
+ }
+ if result.explode && result.truncate > 0 {
+ err = errors.New("both explode and prefix modifers on same term")
+ }
+ return result, err
+}
+
+// Expand expands a URI template with a set of values to produce a string.
+func (self *Template) Expand(value interface{}) (string, error) {
+ values, ismap := value.(Values)
+ if !ismap {
+ if m, ismap := struct2map(value); !ismap {
+ return "", errors.New("expected Values, struct, or pointer to struct")
+ } else {
+ return self.Expand(m)
+ }
+ }
+ var buf bytes.Buffer
+ for _, p := range self.parts {
+ err := p.expand(&buf, values)
+ if err != nil {
+ return "", err
+ }
+ }
+ return buf.String(), nil
+}
+
+func (self *templatePart) expand(buf *bytes.Buffer, values Values) error {
+ if len(self.raw) > 0 {
+ buf.WriteString(self.raw)
+ return nil
+ }
+ var zeroLen = buf.Len()
+ buf.WriteString(self.first)
+ var firstLen = buf.Len()
+ for _, term := range self.terms {
+ value, exists := values[term.name]
+ if !exists {
+ continue
+ }
+ if buf.Len() != firstLen {
+ buf.WriteString(self.sep)
+ }
+ switch v := value.(type) {
+ case string:
+ self.expandString(buf, term, v)
+ case []interface{}:
+ self.expandArray(buf, term, v)
+ case map[string]interface{}:
+ if term.truncate > 0 {
+ return errors.New("cannot truncate a map expansion")
+ }
+ self.expandMap(buf, term, v)
+ default:
+ if m, ismap := struct2map(value); ismap {
+ if term.truncate > 0 {
+ return errors.New("cannot truncate a map expansion")
+ }
+ self.expandMap(buf, term, m)
+ } else {
+ str := fmt.Sprintf("%v", value)
+ self.expandString(buf, term, str)
+ }
+ }
+ }
+ if buf.Len() == firstLen {
+ original := buf.Bytes()[:zeroLen]
+ buf.Reset()
+ buf.Write(original)
+ }
+ return nil
+}
+
+func (self *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) {
+ if self.named {
+ buf.WriteString(name)
+ if empty {
+ buf.WriteString(self.ifemp)
+ } else {
+ buf.WriteString("=")
+ }
+ }
+}
+
+func (self *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) {
+ if len(s) > t.truncate && t.truncate > 0 {
+ s = s[:t.truncate]
+ }
+ self.expandName(buf, t.name, len(s) == 0)
+ buf.WriteString(escape(s, self.allowReserved))
+}
+
+func (self *templatePart) expandArray(buf *bytes.Buffer, t templateTerm, a []interface{}) {
+ if len(a) == 0 {
+ return
+ } else if !t.explode {
+ self.expandName(buf, t.name, false)
+ }
+ for i, value := range a {
+ if t.explode && i > 0 {
+ buf.WriteString(self.sep)
+ } else if i > 0 {
+ buf.WriteString(",")
+ }
+ var s string
+ switch v := value.(type) {
+ case string:
+ s = v
+ default:
+ s = fmt.Sprintf("%v", v)
+ }
+ if len(s) > t.truncate && t.truncate > 0 {
+ s = s[:t.truncate]
+ }
+ if self.named && t.explode {
+ self.expandName(buf, t.name, len(s) == 0)
+ }
+ buf.WriteString(escape(s, self.allowReserved))
+ }
+}
+
+func (self *templatePart) expandMap(buf *bytes.Buffer, t templateTerm, m map[string]interface{}) {
+ if len(m) == 0 {
+ return
+ }
+ if !t.explode {
+ self.expandName(buf, t.name, len(m) == 0)
+ }
+ var firstLen = buf.Len()
+ for k, value := range m {
+ if firstLen != buf.Len() {
+ if t.explode {
+ buf.WriteString(self.sep)
+ } else {
+ buf.WriteString(",")
+ }
+ }
+ var s string
+ switch v := value.(type) {
+ case string:
+ s = v
+ default:
+ s = fmt.Sprintf("%v", v)
+ }
+ if t.explode {
+ buf.WriteString(escape(k, self.allowReserved))
+ buf.WriteRune('=')
+ buf.WriteString(escape(s, self.allowReserved))
+ } else {
+ buf.WriteString(escape(k, self.allowReserved))
+ buf.WriteRune(',')
+ buf.WriteString(escape(s, self.allowReserved))
+ }
+ }
+}
+
+func struct2map(v interface{}) (map[string]interface{}, bool) {
+ value := reflect.ValueOf(v)
+ switch value.Type().Kind() {
+ case reflect.Ptr:
+ return struct2map(value.Elem().Interface())
+ case reflect.Struct:
+ m := make(map[string]interface{})
+ for i := 0; i < value.NumField(); i++ {
+ tag := value.Type().Field(i).Tag
+ var name string
+ if strings.Contains(string(tag), ":") {
+ name = tag.Get("uri")
+ } else {
+ name = strings.TrimSpace(string(tag))
+ }
+ if len(name) == 0 {
+ name = value.Type().Field(i).Name
+ }
+ m[name] = value.Field(i).Interface()
+ }
+ return m, true
+ }
+ return nil, false
+}
diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates/utils.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates/utils.go
new file mode 100644
index 000000000000..bde51c857bf6
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates/utils.go
@@ -0,0 +1,9 @@
+package uritemplates
+
+func Expand(path string, values Values) (string, error) {
+ template, err := Parse(path)
+ if err != nil {
+ return "", err
+ }
+ return template.Expand(values)
+}
diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/useragent/useragent.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/useragent/useragent.go
new file mode 100644
index 000000000000..7fd9561dfe6d
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/util/useragent/useragent.go
@@ -0,0 +1,50 @@
+package useragent
+
+import (
+ "fmt"
+ "strings"
+)
+
+// UserAgent represents a User-Agent header.
+type UserAgent struct {
+ // Product identifier; its name or development codename.
+ Product string `json:"product"`
+ // Version number of the product.
+ Version string `json:"version"`
+ // Zero or more comments containing more details.
+ Comment []string `json:"comment"`
+}
+
+// UserAgents represents one or more UserAgents.
+type UserAgents []UserAgent
+
+// New returns a UserAgent.
+func New(product, version string, comment ...string) UserAgent {
+ return UserAgent{
+ Product: product,
+ Version: version,
+ Comment: comment,
+ }
+}
+
+// String returns the string representation of UserAgent.
+func (ua UserAgent) String() string {
+ s := fmt.Sprintf("%s/%s", ua.Product, ua.Version)
+
+ if len(ua.Comment) > 0 {
+ s += fmt.Sprintf(" (%s)", strings.Join(ua.Comment, "; "))
+ }
+
+ return s
+}
+
+// String concatenates all the user-defined UserAgents.
+func (uas UserAgents) String() string {
+ ss := make([]string, len(uas))
+
+ for i, ua := range uas {
+ ss[i] = ua.String()
+ }
+
+ return strings.Join(ss, " ")
+}
diff --git a/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/version.go b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/version.go
new file mode 100644
index 000000000000..00c7c4c13432
--- /dev/null
+++ b/cluster-autoscaler/vendor/github.com/spotinst/spotinst-sdk-go/spotinst/version.go
@@ -0,0 +1,7 @@
+package spotinst
+
+// SDKVersion is the current version of the SDK.
+const SDKVersion = "1.181.0"
+
+// SDKName is the name of the SDK.
+const SDKName = "spotinst-sdk-go"
diff --git a/cluster-autoscaler/vendor/gopkg.in/ini.v1/.editorconfig b/cluster-autoscaler/vendor/gopkg.in/ini.v1/.editorconfig
new file mode 100644
index 000000000000..4a2d9180f96f
--- /dev/null
+++ b/cluster-autoscaler/vendor/gopkg.in/ini.v1/.editorconfig
@@ -0,0 +1,12 @@
+# http://editorconfig.org
+
+root = true
+
+[*]
+charset = utf-8
+end_of_line = lf
+insert_final_newline = true
+trim_trailing_whitespace = true
+
+[*_test.go]
+trim_trailing_whitespace = false
diff --git a/cluster-autoscaler/vendor/gopkg.in/ini.v1/.gitignore b/cluster-autoscaler/vendor/gopkg.in/ini.v1/.gitignore
new file mode 100644
index 000000000000..588388bda28d
--- /dev/null
+++ b/cluster-autoscaler/vendor/gopkg.in/ini.v1/.gitignore
@@ -0,0 +1,7 @@
+testdata/conf_out.ini
+ini.sublime-project
+ini.sublime-workspace
+testdata/conf_reflect.ini
+.idea
+/.vscode
+.DS_Store
diff --git a/cluster-autoscaler/vendor/gopkg.in/ini.v1/.golangci.yml b/cluster-autoscaler/vendor/gopkg.in/ini.v1/.golangci.yml
new file mode 100644
index 000000000000..631e369254d3
--- /dev/null
+++ b/cluster-autoscaler/vendor/gopkg.in/ini.v1/.golangci.yml
@@ -0,0 +1,27 @@
+linters-settings:
+ staticcheck:
+ checks: [
+ "all",
+ "-SA1019" # There are valid use cases of strings.Title
+ ]
+ nakedret:
+ max-func-lines: 0 # Disallow any unnamed return statement
+
+linters:
+ enable:
+ - deadcode
+ - errcheck
+ - gosimple
+ - govet
+ - ineffassign
+ - staticcheck
+ - structcheck
+ - typecheck
+ - unused
+ - varcheck
+ - nakedret
+ - gofmt
+ - rowserrcheck
+ - unconvert
+ - goimports
+ - unparam
diff --git a/cluster-autoscaler/vendor/gopkg.in/ini.v1/LICENSE b/cluster-autoscaler/vendor/gopkg.in/ini.v1/LICENSE
new file mode 100644
index 000000000000..d361bbcdf5c9
--- /dev/null
+++ b/cluster-autoscaler/vendor/gopkg.in/ini.v1/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright 2014 Unknwon
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/cluster-autoscaler/vendor/gopkg.in/ini.v1/Makefile b/cluster-autoscaler/vendor/gopkg.in/ini.v1/Makefile
new file mode 100644
index 000000000000..f3b0dae2d298
--- /dev/null
+++ b/cluster-autoscaler/vendor/gopkg.in/ini.v1/Makefile
@@ -0,0 +1,15 @@
+.PHONY: build test bench vet coverage
+
+build: vet bench
+
+test:
+ go test -v -cover -race
+
+bench:
+ go test -v -cover -test.bench=. -test.benchmem
+
+vet:
+ go vet
+
+coverage:
+ go test -coverprofile=c.out && go tool cover -html=c.out && rm c.out
diff --git a/cluster-autoscaler/vendor/gopkg.in/ini.v1/README.md b/cluster-autoscaler/vendor/gopkg.in/ini.v1/README.md
new file mode 100644
index 000000000000..30606d9700a8
--- /dev/null
+++ b/cluster-autoscaler/vendor/gopkg.in/ini.v1/README.md
@@ -0,0 +1,43 @@
+# INI
+
+[![GitHub Workflow Status](https://img.shields.io/github/checks-status/go-ini/ini/main?logo=github&style=for-the-badge)](https://github.com/go-ini/ini/actions?query=branch%3Amain)
+[![codecov](https://img.shields.io/codecov/c/github/go-ini/ini/master?logo=codecov&style=for-the-badge)](https://codecov.io/gh/go-ini/ini)
+[![GoDoc](https://img.shields.io/badge/GoDoc-Reference-blue?style=for-the-badge&logo=go)](https://pkg.go.dev/github.com/go-ini/ini?tab=doc)
+[![Sourcegraph](https://img.shields.io/badge/view%20on-Sourcegraph-brightgreen.svg?style=for-the-badge&logo=sourcegraph)](https://sourcegraph.com/github.com/go-ini/ini)
+
+![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200)
+
+Package ini provides INI file read and write functionality in Go.
+
+## Features
+
+- Load from multiple data sources(file, `[]byte`, `io.Reader` and `io.ReadCloser`) with overwrites.
+- Read with recursion values.
+- Read with parent-child sections.
+- Read with auto-increment key names.
+- Read with multiple-line values.
+- Read with tons of helper methods.
+- Read and convert values to Go types.
+- Read and **WRITE** comments of sections and keys.
+- Manipulate sections, keys and comments with ease.
+- Keep sections and keys in order as you parse and save.
+
+## Installation
+
+The minimum requirement of Go is **1.13**.
+
+```sh
+$ go get gopkg.in/ini.v1
+```
+
+Please add `-u` flag to update in the future.
+
+## Getting Help
+
+- [Getting Started](https://ini.unknwon.io/docs/intro/getting_started)
+- [API Documentation](https://gowalker.org/gopkg.in/ini.v1)
+- 中国大陆镜像:https://ini.unknwon.cn
+
+## License
+
+This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text.
diff --git a/cluster-autoscaler/vendor/gopkg.in/ini.v1/codecov.yml b/cluster-autoscaler/vendor/gopkg.in/ini.v1/codecov.yml
new file mode 100644
index 000000000000..e02ec84bc05f
--- /dev/null
+++ b/cluster-autoscaler/vendor/gopkg.in/ini.v1/codecov.yml
@@ -0,0 +1,16 @@
+coverage:
+ range: "60...95"
+ status:
+ project:
+ default:
+ threshold: 1%
+ informational: true
+ patch:
+ defualt:
+ only_pulls: true
+ informational: true
+
+comment:
+ layout: 'diff'
+
+github_checks: false
diff --git a/cluster-autoscaler/vendor/gopkg.in/ini.v1/data_source.go b/cluster-autoscaler/vendor/gopkg.in/ini.v1/data_source.go
new file mode 100644
index 000000000000..c3a541f1d1b5
--- /dev/null
+++ b/cluster-autoscaler/vendor/gopkg.in/ini.v1/data_source.go
@@ -0,0 +1,76 @@
+// Copyright 2019 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+)
+
+var (
+ _ dataSource = (*sourceFile)(nil)
+ _ dataSource = (*sourceData)(nil)
+ _ dataSource = (*sourceReadCloser)(nil)
+)
+
+// dataSource is an interface that returns object which can be read and closed.
+type dataSource interface {
+ ReadCloser() (io.ReadCloser, error)
+}
+
+// sourceFile represents an object that contains content on the local file system.
+type sourceFile struct {
+ name string
+}
+
+func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) {
+ return os.Open(s.name)
+}
+
+// sourceData represents an object that contains content in memory.
+type sourceData struct {
+ data []byte
+}
+
+func (s *sourceData) ReadCloser() (io.ReadCloser, error) {
+ return ioutil.NopCloser(bytes.NewReader(s.data)), nil
+}
+
+// sourceReadCloser represents an input stream with Close method.
+type sourceReadCloser struct {
+ reader io.ReadCloser
+}
+
+func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) {
+ return s.reader, nil
+}
+
+func parseDataSource(source interface{}) (dataSource, error) {
+ switch s := source.(type) {
+ case string:
+ return sourceFile{s}, nil
+ case []byte:
+ return &sourceData{s}, nil
+ case io.ReadCloser:
+ return &sourceReadCloser{s}, nil
+ case io.Reader:
+ return &sourceReadCloser{ioutil.NopCloser(s)}, nil
+ default:
+ return nil, fmt.Errorf("error parsing data source: unknown type %q", s)
+ }
+}
diff --git a/cluster-autoscaler/vendor/gopkg.in/ini.v1/deprecated.go b/cluster-autoscaler/vendor/gopkg.in/ini.v1/deprecated.go
new file mode 100644
index 000000000000..48b8e66d6d6f
--- /dev/null
+++ b/cluster-autoscaler/vendor/gopkg.in/ini.v1/deprecated.go
@@ -0,0 +1,22 @@
+// Copyright 2019 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+var (
+ // Deprecated: Use "DefaultSection" instead.
+ DEFAULT_SECTION = DefaultSection
+ // Deprecated: AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE.
+ AllCapsUnderscore = SnackCase
+)
diff --git a/cluster-autoscaler/vendor/gopkg.in/ini.v1/error.go b/cluster-autoscaler/vendor/gopkg.in/ini.v1/error.go
new file mode 100644
index 000000000000..f66bc94b8b69
--- /dev/null
+++ b/cluster-autoscaler/vendor/gopkg.in/ini.v1/error.go
@@ -0,0 +1,49 @@
+// Copyright 2016 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "fmt"
+)
+
+// ErrDelimiterNotFound indicates the error type of no delimiter is found which there should be one.
+type ErrDelimiterNotFound struct {
+ Line string
+}
+
+// IsErrDelimiterNotFound returns true if the given error is an instance of ErrDelimiterNotFound.
+func IsErrDelimiterNotFound(err error) bool {
+ _, ok := err.(ErrDelimiterNotFound)
+ return ok
+}
+
+func (err ErrDelimiterNotFound) Error() string {
+ return fmt.Sprintf("key-value delimiter not found: %s", err.Line)
+}
+
+// ErrEmptyKeyName indicates the error type of no key name is found which there should be one.
+type ErrEmptyKeyName struct {
+ Line string
+}
+
+// IsErrEmptyKeyName returns true if the given error is an instance of ErrEmptyKeyName.
+func IsErrEmptyKeyName(err error) bool {
+ _, ok := err.(ErrEmptyKeyName)
+ return ok
+}
+
+func (err ErrEmptyKeyName) Error() string {
+ return fmt.Sprintf("empty key name: %s", err.Line)
+}
diff --git a/cluster-autoscaler/vendor/gopkg.in/ini.v1/file.go b/cluster-autoscaler/vendor/gopkg.in/ini.v1/file.go
new file mode 100644
index 000000000000..f8b22408be51
--- /dev/null
+++ b/cluster-autoscaler/vendor/gopkg.in/ini.v1/file.go
@@ -0,0 +1,541 @@
+// Copyright 2017 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "strings"
+ "sync"
+)
+
+// File represents a combination of one or more INI files in memory.
+type File struct {
+ options LoadOptions
+ dataSources []dataSource
+
+ // Should make things safe, but sometimes doesn't matter.
+ BlockMode bool
+ lock sync.RWMutex
+
+ // To keep data in order.
+ sectionList []string
+ // To keep track of the index of a section with same name.
+ // This meta list is only used with non-unique section names are allowed.
+ sectionIndexes []int
+
+ // Actual data is stored here.
+ sections map[string][]*Section
+
+ NameMapper
+ ValueMapper
+}
+
+// newFile initializes File object with given data sources.
+func newFile(dataSources []dataSource, opts LoadOptions) *File {
+ if len(opts.KeyValueDelimiters) == 0 {
+ opts.KeyValueDelimiters = "=:"
+ }
+ if len(opts.KeyValueDelimiterOnWrite) == 0 {
+ opts.KeyValueDelimiterOnWrite = "="
+ }
+ if len(opts.ChildSectionDelimiter) == 0 {
+ opts.ChildSectionDelimiter = "."
+ }
+
+ return &File{
+ BlockMode: true,
+ dataSources: dataSources,
+ sections: make(map[string][]*Section),
+ options: opts,
+ }
+}
+
+// Empty returns an empty file object.
+func Empty(opts ...LoadOptions) *File {
+ var opt LoadOptions
+ if len(opts) > 0 {
+ opt = opts[0]
+ }
+
+ // Ignore error here, we are sure our data is good.
+ f, _ := LoadSources(opt, []byte(""))
+ return f
+}
+
+// NewSection creates a new section.
+func (f *File) NewSection(name string) (*Section, error) {
+ if len(name) == 0 {
+ return nil, errors.New("empty section name")
+ }
+
+ if (f.options.Insensitive || f.options.InsensitiveSections) && name != DefaultSection {
+ name = strings.ToLower(name)
+ }
+
+ if f.BlockMode {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ }
+
+ if !f.options.AllowNonUniqueSections && inSlice(name, f.sectionList) {
+ return f.sections[name][0], nil
+ }
+
+ f.sectionList = append(f.sectionList, name)
+
+ // NOTE: Append to indexes must happen before appending to sections,
+ // otherwise index will have off-by-one problem.
+ f.sectionIndexes = append(f.sectionIndexes, len(f.sections[name]))
+
+ sec := newSection(f, name)
+ f.sections[name] = append(f.sections[name], sec)
+
+ return sec, nil
+}
+
+// NewRawSection creates a new section with an unparseable body.
+func (f *File) NewRawSection(name, body string) (*Section, error) {
+ section, err := f.NewSection(name)
+ if err != nil {
+ return nil, err
+ }
+
+ section.isRawSection = true
+ section.rawBody = body
+ return section, nil
+}
+
+// NewSections creates a list of sections.
+func (f *File) NewSections(names ...string) (err error) {
+ for _, name := range names {
+ if _, err = f.NewSection(name); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// GetSection returns section by given name.
+func (f *File) GetSection(name string) (*Section, error) {
+ secs, err := f.SectionsByName(name)
+ if err != nil {
+ return nil, err
+ }
+
+ return secs[0], err
+}
+
+// HasSection returns true if the file contains a section with given name.
+func (f *File) HasSection(name string) bool {
+ section, _ := f.GetSection(name)
+ return section != nil
+}
+
+// SectionsByName returns all sections with given name.
+func (f *File) SectionsByName(name string) ([]*Section, error) {
+ if len(name) == 0 {
+ name = DefaultSection
+ }
+ if f.options.Insensitive || f.options.InsensitiveSections {
+ name = strings.ToLower(name)
+ }
+
+ if f.BlockMode {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+ }
+
+ secs := f.sections[name]
+ if len(secs) == 0 {
+ return nil, fmt.Errorf("section %q does not exist", name)
+ }
+
+ return secs, nil
+}
+
+// Section assumes named section exists and returns a zero-value when not.
+func (f *File) Section(name string) *Section {
+ sec, err := f.GetSection(name)
+ if err != nil {
+ if name == "" {
+ name = DefaultSection
+ }
+ sec, _ = f.NewSection(name)
+ return sec
+ }
+ return sec
+}
+
+// SectionWithIndex assumes named section exists and returns a new section when not.
+func (f *File) SectionWithIndex(name string, index int) *Section {
+ secs, err := f.SectionsByName(name)
+ if err != nil || len(secs) <= index {
+ // NOTE: It's OK here because the only possible error is empty section name,
+ // but if it's empty, this piece of code won't be executed.
+ newSec, _ := f.NewSection(name)
+ return newSec
+ }
+
+ return secs[index]
+}
+
+// Sections returns a list of Section stored in the current instance.
+func (f *File) Sections() []*Section {
+ if f.BlockMode {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+ }
+
+ sections := make([]*Section, len(f.sectionList))
+ for i, name := range f.sectionList {
+ sections[i] = f.sections[name][f.sectionIndexes[i]]
+ }
+ return sections
+}
+
+// ChildSections returns a list of child sections of given section name.
+func (f *File) ChildSections(name string) []*Section {
+ return f.Section(name).ChildSections()
+}
+
+// SectionStrings returns list of section names.
+func (f *File) SectionStrings() []string {
+ list := make([]string, len(f.sectionList))
+ copy(list, f.sectionList)
+ return list
+}
+
+// DeleteSection deletes a section or all sections with given name.
+func (f *File) DeleteSection(name string) {
+ secs, err := f.SectionsByName(name)
+ if err != nil {
+ return
+ }
+
+ for i := 0; i < len(secs); i++ {
+ // For non-unique sections, it is always needed to remove the first one so
+ // in the next iteration, the subsequent section continue having index 0.
+ // Ignoring the error as index 0 never returns an error.
+ _ = f.DeleteSectionWithIndex(name, 0)
+ }
+}
+
+// DeleteSectionWithIndex deletes a section with given name and index.
+func (f *File) DeleteSectionWithIndex(name string, index int) error {
+ if !f.options.AllowNonUniqueSections && index != 0 {
+ return fmt.Errorf("delete section with non-zero index is only allowed when non-unique sections is enabled")
+ }
+
+ if len(name) == 0 {
+ name = DefaultSection
+ }
+ if f.options.Insensitive || f.options.InsensitiveSections {
+ name = strings.ToLower(name)
+ }
+
+ if f.BlockMode {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ }
+
+ // Count occurrences of the sections
+ occurrences := 0
+
+ sectionListCopy := make([]string, len(f.sectionList))
+ copy(sectionListCopy, f.sectionList)
+
+ for i, s := range sectionListCopy {
+ if s != name {
+ continue
+ }
+
+ if occurrences == index {
+ if len(f.sections[name]) <= 1 {
+ delete(f.sections, name) // The last one in the map
+ } else {
+ f.sections[name] = append(f.sections[name][:index], f.sections[name][index+1:]...)
+ }
+
+ // Fix section lists
+ f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...)
+ f.sectionIndexes = append(f.sectionIndexes[:i], f.sectionIndexes[i+1:]...)
+
+ } else if occurrences > index {
+ // Fix the indices of all following sections with this name.
+ f.sectionIndexes[i-1]--
+ }
+
+ occurrences++
+ }
+
+ return nil
+}
+
+func (f *File) reload(s dataSource) error {
+ r, err := s.ReadCloser()
+ if err != nil {
+ return err
+ }
+ defer r.Close()
+
+ return f.parse(r)
+}
+
+// Reload reloads and parses all data sources.
+func (f *File) Reload() (err error) {
+ for _, s := range f.dataSources {
+ if err = f.reload(s); err != nil {
+ // In loose mode, we create an empty default section for nonexistent files.
+ if os.IsNotExist(err) && f.options.Loose {
+ _ = f.parse(bytes.NewBuffer(nil))
+ continue
+ }
+ return err
+ }
+ if f.options.ShortCircuit {
+ return nil
+ }
+ }
+ return nil
+}
+
+// Append appends one or more data sources and reloads automatically.
+func (f *File) Append(source interface{}, others ...interface{}) error {
+ ds, err := parseDataSource(source)
+ if err != nil {
+ return err
+ }
+ f.dataSources = append(f.dataSources, ds)
+ for _, s := range others {
+ ds, err = parseDataSource(s)
+ if err != nil {
+ return err
+ }
+ f.dataSources = append(f.dataSources, ds)
+ }
+ return f.Reload()
+}
+
+func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) {
+ equalSign := DefaultFormatLeft + f.options.KeyValueDelimiterOnWrite + DefaultFormatRight
+
+ if PrettyFormat || PrettyEqual {
+ equalSign = fmt.Sprintf(" %s ", f.options.KeyValueDelimiterOnWrite)
+ }
+
+ // Use buffer to make sure target is safe until finish encoding.
+ buf := bytes.NewBuffer(nil)
+ lastSectionIdx := len(f.sectionList) - 1
+ for i, sname := range f.sectionList {
+ sec := f.SectionWithIndex(sname, f.sectionIndexes[i])
+ if len(sec.Comment) > 0 {
+ // Support multiline comments
+ lines := strings.Split(sec.Comment, LineBreak)
+ for i := range lines {
+ if lines[i][0] != '#' && lines[i][0] != ';' {
+ lines[i] = "; " + lines[i]
+ } else {
+ lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:])
+ }
+
+ if _, err := buf.WriteString(lines[i] + LineBreak); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ if i > 0 || DefaultHeader || (i == 0 && strings.ToUpper(sec.name) != DefaultSection) {
+ if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil {
+ return nil, err
+ }
+ } else {
+ // Write nothing if default section is empty
+ if len(sec.keyList) == 0 {
+ continue
+ }
+ }
+
+ isLastSection := i == lastSectionIdx
+ if sec.isRawSection {
+ if _, err := buf.WriteString(sec.rawBody); err != nil {
+ return nil, err
+ }
+
+ if PrettySection && !isLastSection {
+ // Put a line between sections
+ if _, err := buf.WriteString(LineBreak); err != nil {
+ return nil, err
+ }
+ }
+ continue
+ }
+
+ // Count and generate alignment length and buffer spaces using the
+ // longest key. Keys may be modified if they contain certain characters so
+ // we need to take that into account in our calculation.
+ alignLength := 0
+ if PrettyFormat {
+ for _, kname := range sec.keyList {
+ keyLength := len(kname)
+ // First case will surround key by ` and second by """
+ if strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters) {
+ keyLength += 2
+ } else if strings.Contains(kname, "`") {
+ keyLength += 6
+ }
+
+ if keyLength > alignLength {
+ alignLength = keyLength
+ }
+ }
+ }
+ alignSpaces := bytes.Repeat([]byte(" "), alignLength)
+
+ KeyList:
+ for _, kname := range sec.keyList {
+ key := sec.Key(kname)
+ if len(key.Comment) > 0 {
+ if len(indent) > 0 && sname != DefaultSection {
+ buf.WriteString(indent)
+ }
+
+ // Support multiline comments
+ lines := strings.Split(key.Comment, LineBreak)
+ for i := range lines {
+ if lines[i][0] != '#' && lines[i][0] != ';' {
+ lines[i] = "; " + strings.TrimSpace(lines[i])
+ } else {
+ lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:])
+ }
+
+ if _, err := buf.WriteString(lines[i] + LineBreak); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ if len(indent) > 0 && sname != DefaultSection {
+ buf.WriteString(indent)
+ }
+
+ switch {
+ case key.isAutoIncrement:
+ kname = "-"
+ case strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters):
+ kname = "`" + kname + "`"
+ case strings.Contains(kname, "`"):
+ kname = `"""` + kname + `"""`
+ }
+
+ writeKeyValue := func(val string) (bool, error) {
+ if _, err := buf.WriteString(kname); err != nil {
+ return false, err
+ }
+
+ if key.isBooleanType {
+ buf.WriteString(LineBreak)
+ return true, nil
+ }
+
+ // Write out alignment spaces before "=" sign
+ if PrettyFormat {
+ buf.Write(alignSpaces[:alignLength-len(kname)])
+ }
+
+ // In case key value contains "\n", "`", "\"", "#" or ";"
+ if strings.ContainsAny(val, "\n`") {
+ val = `"""` + val + `"""`
+ } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") {
+ val = "`" + val + "`"
+ } else if len(strings.TrimSpace(val)) != len(val) {
+ val = `"` + val + `"`
+ }
+ if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil {
+ return false, err
+ }
+ return false, nil
+ }
+
+ shadows := key.ValueWithShadows()
+ if len(shadows) == 0 {
+ if _, err := writeKeyValue(""); err != nil {
+ return nil, err
+ }
+ }
+
+ for _, val := range shadows {
+ exitLoop, err := writeKeyValue(val)
+ if err != nil {
+ return nil, err
+ } else if exitLoop {
+ continue KeyList
+ }
+ }
+
+ for _, val := range key.nestedValues {
+ if _, err := buf.WriteString(indent + " " + val + LineBreak); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ if PrettySection && !isLastSection {
+ // Put a line between sections
+ if _, err := buf.WriteString(LineBreak); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+// WriteToIndent writes content into io.Writer with given indention.
+// If PrettyFormat has been set to be true,
+// it will align "=" sign with spaces under each section.
+func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) {
+ buf, err := f.writeToBuffer(indent)
+ if err != nil {
+ return 0, err
+ }
+ return buf.WriteTo(w)
+}
+
+// WriteTo writes file content into io.Writer.
+func (f *File) WriteTo(w io.Writer) (int64, error) {
+ return f.WriteToIndent(w, "")
+}
+
+// SaveToIndent writes content to file system with given value indention.
+func (f *File) SaveToIndent(filename, indent string) error {
+ // Note: Because we are truncating with os.Create,
+ // so it's safer to save to a temporary file location and rename after done.
+ buf, err := f.writeToBuffer(indent)
+ if err != nil {
+ return err
+ }
+
+ return ioutil.WriteFile(filename, buf.Bytes(), 0666)
+}
+
+// SaveTo writes content to file system.
+func (f *File) SaveTo(filename string) error {
+ return f.SaveToIndent(filename, "")
+}
diff --git a/cluster-autoscaler/vendor/gopkg.in/ini.v1/helper.go b/cluster-autoscaler/vendor/gopkg.in/ini.v1/helper.go
new file mode 100644
index 000000000000..f9d80a682a55
--- /dev/null
+++ b/cluster-autoscaler/vendor/gopkg.in/ini.v1/helper.go
@@ -0,0 +1,24 @@
+// Copyright 2019 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+func inSlice(str string, s []string) bool {
+ for _, v := range s {
+ if str == v {
+ return true
+ }
+ }
+ return false
+}
diff --git a/cluster-autoscaler/vendor/gopkg.in/ini.v1/ini.go b/cluster-autoscaler/vendor/gopkg.in/ini.v1/ini.go
new file mode 100644
index 000000000000..99e7f86511a4
--- /dev/null
+++ b/cluster-autoscaler/vendor/gopkg.in/ini.v1/ini.go
@@ -0,0 +1,176 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+// Package ini provides INI file read and write functionality in Go.
+package ini
+
+import (
+ "os"
+ "regexp"
+ "runtime"
+ "strings"
+)
+
+const (
+ // Maximum allowed depth when recursively substituing variable names.
+ depthValues = 99
+)
+
+var (
+ // DefaultSection is the name of default section. You can use this var or the string literal.
+ // In most of cases, an empty string is all you need to access the section.
+ DefaultSection = "DEFAULT"
+
+ // LineBreak is the delimiter to determine or compose a new line.
+ // This variable will be changed to "\r\n" automatically on Windows at package init time.
+ LineBreak = "\n"
+
+ // Variable regexp pattern: %(variable)s
+ varPattern = regexp.MustCompile(`%\(([^)]+)\)s`)
+
+ // DefaultHeader explicitly writes default section header.
+ DefaultHeader = false
+
+ // PrettySection indicates whether to put a line between sections.
+ PrettySection = true
+ // PrettyFormat indicates whether to align "=" sign with spaces to produce pretty output
+ // or reduce all possible spaces for compact format.
+ PrettyFormat = true
+ // PrettyEqual places spaces around "=" sign even when PrettyFormat is false.
+ PrettyEqual = false
+ // DefaultFormatLeft places custom spaces on the left when PrettyFormat and PrettyEqual are both disabled.
+ DefaultFormatLeft = ""
+ // DefaultFormatRight places custom spaces on the right when PrettyFormat and PrettyEqual are both disabled.
+ DefaultFormatRight = ""
+)
+
+var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test")
+
+func init() {
+ if runtime.GOOS == "windows" && !inTest {
+ LineBreak = "\r\n"
+ }
+}
+
+// LoadOptions contains all customized options used for load data source(s).
+type LoadOptions struct {
+ // Loose indicates whether the parser should ignore nonexistent files or return error.
+ Loose bool
+ // Insensitive indicates whether the parser forces all section and key names to lowercase.
+ Insensitive bool
+ // InsensitiveSections indicates whether the parser forces all section to lowercase.
+ InsensitiveSections bool
+ // InsensitiveKeys indicates whether the parser forces all key names to lowercase.
+ InsensitiveKeys bool
+ // IgnoreContinuation indicates whether to ignore continuation lines while parsing.
+ IgnoreContinuation bool
+ // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value.
+ IgnoreInlineComment bool
+ // SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs.
+ SkipUnrecognizableLines bool
+ // ShortCircuit indicates whether to ignore other configuration sources after loaded the first available configuration source.
+ ShortCircuit bool
+ // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing.
+ // This type of keys are mostly used in my.cnf.
+ AllowBooleanKeys bool
+ // AllowShadows indicates whether to keep track of keys with same name under same section.
+ AllowShadows bool
+ // AllowNestedValues indicates whether to allow AWS-like nested values.
+ // Docs: http://docs.aws.amazon.com/cli/latest/topic/config-vars.html#nested-values
+ AllowNestedValues bool
+ // AllowPythonMultilineValues indicates whether to allow Python-like multi-line values.
+ // Docs: https://docs.python.org/3/library/configparser.html#supported-ini-file-structure
+ // Relevant quote: Values can also span multiple lines, as long as they are indented deeper
+ // than the first line of the value.
+ AllowPythonMultilineValues bool
+ // SpaceBeforeInlineComment indicates whether to allow comment symbols (\# and \;) inside value.
+ // Docs: https://docs.python.org/2/library/configparser.html
+ // Quote: Comments may appear on their own in an otherwise empty line, or may be entered in lines holding values or section names.
+ // In the latter case, they need to be preceded by a whitespace character to be recognized as a comment.
+ SpaceBeforeInlineComment bool
+ // UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format
+ // when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value"
+ UnescapeValueDoubleQuotes bool
+ // UnescapeValueCommentSymbols indicates to unescape comment symbols (\# and \;) inside value to regular format
+ // when value is NOT surrounded by any quotes.
+ // Note: UNSTABLE, behavior might change to only unescape inside double quotes but may noy necessary at all.
+ UnescapeValueCommentSymbols bool
+ // UnparseableSections stores a list of blocks that are allowed with raw content which do not otherwise
+ // conform to key/value pairs. Specify the names of those blocks here.
+ UnparseableSections []string
+ // KeyValueDelimiters is the sequence of delimiters that are used to separate key and value. By default, it is "=:".
+ KeyValueDelimiters string
+ // KeyValueDelimiterOnWrite is the delimiter that are used to separate key and value output. By default, it is "=".
+ KeyValueDelimiterOnWrite string
+ // ChildSectionDelimiter is the delimiter that is used to separate child sections. By default, it is ".".
+ ChildSectionDelimiter string
+ // PreserveSurroundedQuote indicates whether to preserve surrounded quote (single and double quotes).
+ PreserveSurroundedQuote bool
+ // DebugFunc is called to collect debug information (currently only useful to debug parsing Python-style multiline values).
+ DebugFunc DebugFunc
+ // ReaderBufferSize is the buffer size of the reader in bytes.
+ ReaderBufferSize int
+ // AllowNonUniqueSections indicates whether to allow sections with the same name multiple times.
+ AllowNonUniqueSections bool
+ // AllowDuplicateShadowValues indicates whether values for shadowed keys should be deduplicated.
+ AllowDuplicateShadowValues bool
+}
+
+// DebugFunc is the type of function called to log parse events.
+type DebugFunc func(message string)
+
+// LoadSources allows caller to apply customized options for loading from data source(s).
+func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) {
+ sources := make([]dataSource, len(others)+1)
+ sources[0], err = parseDataSource(source)
+ if err != nil {
+ return nil, err
+ }
+ for i := range others {
+ sources[i+1], err = parseDataSource(others[i])
+ if err != nil {
+ return nil, err
+ }
+ }
+ f := newFile(sources, opts)
+ if err = f.Reload(); err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+// Load loads and parses from INI data sources.
+// Arguments can be mixed of file name with string type, or raw data in []byte.
+// It will return error if list contains nonexistent files.
+func Load(source interface{}, others ...interface{}) (*File, error) {
+ return LoadSources(LoadOptions{}, source, others...)
+}
+
+// LooseLoad has exactly same functionality as Load function
+// except it ignores nonexistent files instead of returning error.
+func LooseLoad(source interface{}, others ...interface{}) (*File, error) {
+ return LoadSources(LoadOptions{Loose: true}, source, others...)
+}
+
+// InsensitiveLoad has exactly same functionality as Load function
+// except it forces all section and key names to be lowercased.
+func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) {
+ return LoadSources(LoadOptions{Insensitive: true}, source, others...)
+}
+
+// ShadowLoad has exactly same functionality as Load function
+// except it allows have shadow keys.
+func ShadowLoad(source interface{}, others ...interface{}) (*File, error) {
+ return LoadSources(LoadOptions{AllowShadows: true}, source, others...)
+}
diff --git a/cluster-autoscaler/vendor/gopkg.in/ini.v1/key.go b/cluster-autoscaler/vendor/gopkg.in/ini.v1/key.go
new file mode 100644
index 000000000000..a19d9f38ef14
--- /dev/null
+++ b/cluster-autoscaler/vendor/gopkg.in/ini.v1/key.go
@@ -0,0 +1,837 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Key represents a key under a section.
+type Key struct {
+ s *Section
+ Comment string
+ name string
+ value string
+ isAutoIncrement bool
+ isBooleanType bool
+
+ isShadow bool
+ shadows []*Key
+
+ nestedValues []string
+}
+
+// newKey simply return a key object with given values.
+func newKey(s *Section, name, val string) *Key {
+ return &Key{
+ s: s,
+ name: name,
+ value: val,
+ }
+}
+
+func (k *Key) addShadow(val string) error {
+ if k.isShadow {
+ return errors.New("cannot add shadow to another shadow key")
+ } else if k.isAutoIncrement || k.isBooleanType {
+ return errors.New("cannot add shadow to auto-increment or boolean key")
+ }
+
+ if !k.s.f.options.AllowDuplicateShadowValues {
+ // Deduplicate shadows based on their values.
+ if k.value == val {
+ return nil
+ }
+ for i := range k.shadows {
+ if k.shadows[i].value == val {
+ return nil
+ }
+ }
+ }
+
+ shadow := newKey(k.s, k.name, val)
+ shadow.isShadow = true
+ k.shadows = append(k.shadows, shadow)
+ return nil
+}
+
+// AddShadow adds a new shadow key to itself.
+func (k *Key) AddShadow(val string) error {
+ if !k.s.f.options.AllowShadows {
+ return errors.New("shadow key is not allowed")
+ }
+ return k.addShadow(val)
+}
+
+func (k *Key) addNestedValue(val string) error {
+ if k.isAutoIncrement || k.isBooleanType {
+ return errors.New("cannot add nested value to auto-increment or boolean key")
+ }
+
+ k.nestedValues = append(k.nestedValues, val)
+ return nil
+}
+
+// AddNestedValue adds a nested value to the key.
+func (k *Key) AddNestedValue(val string) error {
+ if !k.s.f.options.AllowNestedValues {
+ return errors.New("nested value is not allowed")
+ }
+ return k.addNestedValue(val)
+}
+
+// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv
+type ValueMapper func(string) string
+
+// Name returns name of key.
+func (k *Key) Name() string {
+ return k.name
+}
+
+// Value returns raw value of key for performance purpose.
+func (k *Key) Value() string {
+ return k.value
+}
+
+// ValueWithShadows returns raw values of key and its shadows if any. Shadow
+// keys with empty values are ignored from the returned list.
+func (k *Key) ValueWithShadows() []string {
+ if len(k.shadows) == 0 {
+ if k.value == "" {
+ return []string{}
+ }
+ return []string{k.value}
+ }
+
+ vals := make([]string, 0, len(k.shadows)+1)
+ if k.value != "" {
+ vals = append(vals, k.value)
+ }
+ for _, s := range k.shadows {
+ if s.value != "" {
+ vals = append(vals, s.value)
+ }
+ }
+ return vals
+}
+
+// NestedValues returns nested values stored in the key.
+// It is possible returned value is nil if no nested values stored in the key.
+func (k *Key) NestedValues() []string {
+ return k.nestedValues
+}
+
+// transformValue takes a raw value and transforms to its final string.
+func (k *Key) transformValue(val string) string {
+ if k.s.f.ValueMapper != nil {
+ val = k.s.f.ValueMapper(val)
+ }
+
+ // Fail-fast if no indicate char found for recursive value
+ if !strings.Contains(val, "%") {
+ return val
+ }
+ for i := 0; i < depthValues; i++ {
+ vr := varPattern.FindString(val)
+ if len(vr) == 0 {
+ break
+ }
+
+ // Take off leading '%(' and trailing ')s'.
+ noption := vr[2 : len(vr)-2]
+
+ // Search in the same section.
+ // If not found or found the key itself, then search again in default section.
+ nk, err := k.s.GetKey(noption)
+ if err != nil || k == nk {
+ nk, _ = k.s.f.Section("").GetKey(noption)
+ if nk == nil {
+ // Stop when no results found in the default section,
+ // and returns the value as-is.
+ break
+ }
+ }
+
+ // Substitute by new value and take off leading '%(' and trailing ')s'.
+ val = strings.Replace(val, vr, nk.value, -1)
+ }
+ return val
+}
+
+// String returns string representation of value.
+func (k *Key) String() string {
+ return k.transformValue(k.value)
+}
+
+// Validate accepts a validate function which can
+// return modifed result as key value.
+func (k *Key) Validate(fn func(string) string) string {
+ return fn(k.String())
+}
+
+// parseBool returns the boolean value represented by the string.
+//
+// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On,
+// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off.
+// Any other value returns an error.
+func parseBool(str string) (value bool, err error) {
+ switch str {
+ case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On":
+ return true, nil
+ case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off":
+ return false, nil
+ }
+ return false, fmt.Errorf("parsing \"%s\": invalid syntax", str)
+}
+
+// Bool returns bool type value.
+func (k *Key) Bool() (bool, error) {
+ return parseBool(k.String())
+}
+
+// Float64 returns float64 type value.
+func (k *Key) Float64() (float64, error) {
+ return strconv.ParseFloat(k.String(), 64)
+}
+
+// Int returns int type value.
+func (k *Key) Int() (int, error) {
+ v, err := strconv.ParseInt(k.String(), 0, 64)
+ return int(v), err
+}
+
+// Int64 returns int64 type value.
+func (k *Key) Int64() (int64, error) {
+ return strconv.ParseInt(k.String(), 0, 64)
+}
+
+// Uint returns uint type valued.
+func (k *Key) Uint() (uint, error) {
+ u, e := strconv.ParseUint(k.String(), 0, 64)
+ return uint(u), e
+}
+
+// Uint64 returns uint64 type value.
+func (k *Key) Uint64() (uint64, error) {
+ return strconv.ParseUint(k.String(), 0, 64)
+}
+
+// Duration returns time.Duration type value.
+func (k *Key) Duration() (time.Duration, error) {
+ return time.ParseDuration(k.String())
+}
+
+// TimeFormat parses with given format and returns time.Time type value.
+func (k *Key) TimeFormat(format string) (time.Time, error) {
+ return time.Parse(format, k.String())
+}
+
+// Time parses with RFC3339 format and returns time.Time type value.
+func (k *Key) Time() (time.Time, error) {
+ return k.TimeFormat(time.RFC3339)
+}
+
+// MustString returns default value if key value is empty.
+func (k *Key) MustString(defaultVal string) string {
+ val := k.String()
+ if len(val) == 0 {
+ k.value = defaultVal
+ return defaultVal
+ }
+ return val
+}
+
+// MustBool always returns value without error,
+// it returns false if error occurs.
+func (k *Key) MustBool(defaultVal ...bool) bool {
+ val, err := k.Bool()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatBool(defaultVal[0])
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustFloat64 always returns value without error,
+// it returns 0.0 if error occurs.
+func (k *Key) MustFloat64(defaultVal ...float64) float64 {
+ val, err := k.Float64()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustInt always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustInt(defaultVal ...int) int {
+ val, err := k.Int()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatInt(int64(defaultVal[0]), 10)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustInt64 always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustInt64(defaultVal ...int64) int64 {
+ val, err := k.Int64()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatInt(defaultVal[0], 10)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustUint always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustUint(defaultVal ...uint) uint {
+ val, err := k.Uint()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatUint(uint64(defaultVal[0]), 10)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustUint64 always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustUint64(defaultVal ...uint64) uint64 {
+ val, err := k.Uint64()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = strconv.FormatUint(defaultVal[0], 10)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustDuration always returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration {
+ val, err := k.Duration()
+ if len(defaultVal) > 0 && err != nil {
+ k.value = defaultVal[0].String()
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustTimeFormat always parses with given format and returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time {
+ val, err := k.TimeFormat(format)
+ if len(defaultVal) > 0 && err != nil {
+ k.value = defaultVal[0].Format(format)
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustTime always parses with RFC3339 format and returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustTime(defaultVal ...time.Time) time.Time {
+ return k.MustTimeFormat(time.RFC3339, defaultVal...)
+}
+
+// In always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) In(defaultVal string, candidates []string) string {
+ val := k.String()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InFloat64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 {
+ val := k.MustFloat64()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InInt always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InInt(defaultVal int, candidates []int) int {
+ val := k.MustInt()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InInt64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 {
+ val := k.MustInt64()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InUint always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InUint(defaultVal uint, candidates []uint) uint {
+ val := k.MustUint()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InUint64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 {
+ val := k.MustUint64()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InTimeFormat always parses with given format and returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time {
+ val := k.MustTimeFormat(format)
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InTime always parses with RFC3339 format and returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time {
+ return k.InTimeFormat(time.RFC3339, defaultVal, candidates)
+}
+
+// RangeFloat64 checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 {
+ val := k.MustFloat64()
+ if val < min || val > max {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeInt checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeInt(defaultVal, min, max int) int {
+ val := k.MustInt()
+ if val < min || val > max {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeInt64 checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeInt64(defaultVal, min, max int64) int64 {
+ val := k.MustInt64()
+ if val < min || val > max {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeTimeFormat checks if value with given format is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time {
+ val := k.MustTimeFormat(format)
+ if val.Unix() < min.Unix() || val.Unix() > max.Unix() {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeTime checks if value with RFC3339 format is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time {
+ return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max)
+}
+
+// Strings returns list of string divided by given delimiter.
+func (k *Key) Strings(delim string) []string {
+ str := k.String()
+ if len(str) == 0 {
+ return []string{}
+ }
+
+ runes := []rune(str)
+ vals := make([]string, 0, 2)
+ var buf bytes.Buffer
+ escape := false
+ idx := 0
+ for {
+ if escape {
+ escape = false
+ if runes[idx] != '\\' && !strings.HasPrefix(string(runes[idx:]), delim) {
+ buf.WriteRune('\\')
+ }
+ buf.WriteRune(runes[idx])
+ } else {
+ if runes[idx] == '\\' {
+ escape = true
+ } else if strings.HasPrefix(string(runes[idx:]), delim) {
+ idx += len(delim) - 1
+ vals = append(vals, strings.TrimSpace(buf.String()))
+ buf.Reset()
+ } else {
+ buf.WriteRune(runes[idx])
+ }
+ }
+ idx++
+ if idx == len(runes) {
+ break
+ }
+ }
+
+ if buf.Len() > 0 {
+ vals = append(vals, strings.TrimSpace(buf.String()))
+ }
+
+ return vals
+}
+
+// StringsWithShadows returns list of string divided by given delimiter.
+// Shadows will also be appended if any.
+func (k *Key) StringsWithShadows(delim string) []string {
+ vals := k.ValueWithShadows()
+ results := make([]string, 0, len(vals)*2)
+ for i := range vals {
+ if len(vals) == 0 {
+ continue
+ }
+
+ results = append(results, strings.Split(vals[i], delim)...)
+ }
+
+ for i := range results {
+ results[i] = k.transformValue(strings.TrimSpace(results[i]))
+ }
+ return results
+}
+
+// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Float64s(delim string) []float64 {
+ vals, _ := k.parseFloat64s(k.Strings(delim), true, false)
+ return vals
+}
+
+// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Ints(delim string) []int {
+ vals, _ := k.parseInts(k.Strings(delim), true, false)
+ return vals
+}
+
+// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Int64s(delim string) []int64 {
+ vals, _ := k.parseInt64s(k.Strings(delim), true, false)
+ return vals
+}
+
+// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Uints(delim string) []uint {
+ vals, _ := k.parseUints(k.Strings(delim), true, false)
+ return vals
+}
+
+// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Uint64s(delim string) []uint64 {
+ vals, _ := k.parseUint64s(k.Strings(delim), true, false)
+ return vals
+}
+
+// Bools returns list of bool divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Bools(delim string) []bool {
+ vals, _ := k.parseBools(k.Strings(delim), true, false)
+ return vals
+}
+
+// TimesFormat parses with given format and returns list of time.Time divided by given delimiter.
+// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
+func (k *Key) TimesFormat(format, delim string) []time.Time {
+ vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false)
+ return vals
+}
+
+// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter.
+// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
+func (k *Key) Times(delim string) []time.Time {
+ return k.TimesFormat(time.RFC3339, delim)
+}
+
+// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then
+// it will not be included to result list.
+func (k *Key) ValidFloat64s(delim string) []float64 {
+ vals, _ := k.parseFloat64s(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will
+// not be included to result list.
+func (k *Key) ValidInts(delim string) []int {
+ vals, _ := k.parseInts(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer,
+// then it will not be included to result list.
+func (k *Key) ValidInt64s(delim string) []int64 {
+ vals, _ := k.parseInt64s(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer,
+// then it will not be included to result list.
+func (k *Key) ValidUints(delim string) []uint {
+ vals, _ := k.parseUints(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned
+// integer, then it will not be included to result list.
+func (k *Key) ValidUint64s(delim string) []uint64 {
+ vals, _ := k.parseUint64s(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidBools returns list of bool divided by given delimiter. If some value is not 64-bit unsigned
+// integer, then it will not be included to result list.
+func (k *Key) ValidBools(delim string) []bool {
+ vals, _ := k.parseBools(k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter.
+func (k *Key) ValidTimesFormat(format, delim string) []time.Time {
+ vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false)
+ return vals
+}
+
+// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter.
+func (k *Key) ValidTimes(delim string) []time.Time {
+ return k.ValidTimesFormat(time.RFC3339, delim)
+}
+
+// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input.
+func (k *Key) StrictFloat64s(delim string) ([]float64, error) {
+ return k.parseFloat64s(k.Strings(delim), false, true)
+}
+
+// StrictInts returns list of int divided by given delimiter or error on first invalid input.
+func (k *Key) StrictInts(delim string) ([]int, error) {
+ return k.parseInts(k.Strings(delim), false, true)
+}
+
+// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input.
+func (k *Key) StrictInt64s(delim string) ([]int64, error) {
+ return k.parseInt64s(k.Strings(delim), false, true)
+}
+
+// StrictUints returns list of uint divided by given delimiter or error on first invalid input.
+func (k *Key) StrictUints(delim string) ([]uint, error) {
+ return k.parseUints(k.Strings(delim), false, true)
+}
+
+// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input.
+func (k *Key) StrictUint64s(delim string) ([]uint64, error) {
+ return k.parseUint64s(k.Strings(delim), false, true)
+}
+
+// StrictBools returns list of bool divided by given delimiter or error on first invalid input.
+func (k *Key) StrictBools(delim string) ([]bool, error) {
+ return k.parseBools(k.Strings(delim), false, true)
+}
+
+// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter
+// or error on first invalid input.
+func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) {
+ return k.parseTimesFormat(format, k.Strings(delim), false, true)
+}
+
+// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter
+// or error on first invalid input.
+func (k *Key) StrictTimes(delim string) ([]time.Time, error) {
+ return k.StrictTimesFormat(time.RFC3339, delim)
+}
+
+// parseBools transforms strings to bools.
+func (k *Key) parseBools(strs []string, addInvalid, returnOnInvalid bool) ([]bool, error) {
+ vals := make([]bool, 0, len(strs))
+ parser := func(str string) (interface{}, error) {
+ val, err := parseBool(str)
+ return val, err
+ }
+ rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
+ if err == nil {
+ for _, val := range rawVals {
+ vals = append(vals, val.(bool))
+ }
+ }
+ return vals, err
+}
+
+// parseFloat64s transforms strings to float64s.
+func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) {
+ vals := make([]float64, 0, len(strs))
+ parser := func(str string) (interface{}, error) {
+ val, err := strconv.ParseFloat(str, 64)
+ return val, err
+ }
+ rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
+ if err == nil {
+ for _, val := range rawVals {
+ vals = append(vals, val.(float64))
+ }
+ }
+ return vals, err
+}
+
+// parseInts transforms strings to ints.
+func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) {
+ vals := make([]int, 0, len(strs))
+ parser := func(str string) (interface{}, error) {
+ val, err := strconv.ParseInt(str, 0, 64)
+ return val, err
+ }
+ rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
+ if err == nil {
+ for _, val := range rawVals {
+ vals = append(vals, int(val.(int64)))
+ }
+ }
+ return vals, err
+}
+
+// parseInt64s transforms strings to int64s.
+func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) {
+ vals := make([]int64, 0, len(strs))
+ parser := func(str string) (interface{}, error) {
+ val, err := strconv.ParseInt(str, 0, 64)
+ return val, err
+ }
+
+ rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
+ if err == nil {
+ for _, val := range rawVals {
+ vals = append(vals, val.(int64))
+ }
+ }
+ return vals, err
+}
+
+// parseUints transforms strings to uints.
+func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) {
+ vals := make([]uint, 0, len(strs))
+ parser := func(str string) (interface{}, error) {
+ val, err := strconv.ParseUint(str, 0, 64)
+ return val, err
+ }
+
+ rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
+ if err == nil {
+ for _, val := range rawVals {
+ vals = append(vals, uint(val.(uint64)))
+ }
+ }
+ return vals, err
+}
+
+// parseUint64s transforms strings to uint64s.
+func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) {
+ vals := make([]uint64, 0, len(strs))
+ parser := func(str string) (interface{}, error) {
+ val, err := strconv.ParseUint(str, 0, 64)
+ return val, err
+ }
+ rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
+ if err == nil {
+ for _, val := range rawVals {
+ vals = append(vals, val.(uint64))
+ }
+ }
+ return vals, err
+}
+
+type Parser func(str string) (interface{}, error)
+
+// parseTimesFormat transforms strings to times in given format.
+func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) {
+ vals := make([]time.Time, 0, len(strs))
+ parser := func(str string) (interface{}, error) {
+ val, err := time.Parse(format, str)
+ return val, err
+ }
+ rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser)
+ if err == nil {
+ for _, val := range rawVals {
+ vals = append(vals, val.(time.Time))
+ }
+ }
+ return vals, err
+}
+
+// doParse transforms strings to different types
+func (k *Key) doParse(strs []string, addInvalid, returnOnInvalid bool, parser Parser) ([]interface{}, error) {
+ vals := make([]interface{}, 0, len(strs))
+ for _, str := range strs {
+ val, err := parser(str)
+ if err != nil && returnOnInvalid {
+ return nil, err
+ }
+ if err == nil || addInvalid {
+ vals = append(vals, val)
+ }
+ }
+ return vals, nil
+}
+
+// SetValue changes key value.
+func (k *Key) SetValue(v string) {
+ if k.s.f.BlockMode {
+ k.s.f.lock.Lock()
+ defer k.s.f.lock.Unlock()
+ }
+
+ k.value = v
+ k.s.keysHash[k.name] = v
+}
diff --git a/cluster-autoscaler/vendor/gopkg.in/ini.v1/parser.go b/cluster-autoscaler/vendor/gopkg.in/ini.v1/parser.go
new file mode 100644
index 000000000000..44fc526c2cb6
--- /dev/null
+++ b/cluster-autoscaler/vendor/gopkg.in/ini.v1/parser.go
@@ -0,0 +1,520 @@
+// Copyright 2015 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "regexp"
+ "strconv"
+ "strings"
+ "unicode"
+)
+
+const minReaderBufferSize = 4096
+
+var pythonMultiline = regexp.MustCompile(`^([\t\f ]+)(.*)`)
+
+type parserOptions struct {
+ IgnoreContinuation bool
+ IgnoreInlineComment bool
+ AllowPythonMultilineValues bool
+ SpaceBeforeInlineComment bool
+ UnescapeValueDoubleQuotes bool
+ UnescapeValueCommentSymbols bool
+ PreserveSurroundedQuote bool
+ DebugFunc DebugFunc
+ ReaderBufferSize int
+}
+
+type parser struct {
+ buf *bufio.Reader
+ options parserOptions
+
+ isEOF bool
+ count int
+ comment *bytes.Buffer
+}
+
+func (p *parser) debug(format string, args ...interface{}) {
+ if p.options.DebugFunc != nil {
+ p.options.DebugFunc(fmt.Sprintf(format, args...))
+ }
+}
+
+func newParser(r io.Reader, opts parserOptions) *parser {
+ size := opts.ReaderBufferSize
+ if size < minReaderBufferSize {
+ size = minReaderBufferSize
+ }
+
+ return &parser{
+ buf: bufio.NewReaderSize(r, size),
+ options: opts,
+ count: 1,
+ comment: &bytes.Buffer{},
+ }
+}
+
+// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format.
+// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding
+func (p *parser) BOM() error {
+ mask, err := p.buf.Peek(2)
+ if err != nil && err != io.EOF {
+ return err
+ } else if len(mask) < 2 {
+ return nil
+ }
+
+ switch {
+ case mask[0] == 254 && mask[1] == 255:
+ fallthrough
+ case mask[0] == 255 && mask[1] == 254:
+ _, err = p.buf.Read(mask)
+ if err != nil {
+ return err
+ }
+ case mask[0] == 239 && mask[1] == 187:
+ mask, err := p.buf.Peek(3)
+ if err != nil && err != io.EOF {
+ return err
+ } else if len(mask) < 3 {
+ return nil
+ }
+ if mask[2] == 191 {
+ _, err = p.buf.Read(mask)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (p *parser) readUntil(delim byte) ([]byte, error) {
+ data, err := p.buf.ReadBytes(delim)
+ if err != nil {
+ if err == io.EOF {
+ p.isEOF = true
+ } else {
+ return nil, err
+ }
+ }
+ return data, nil
+}
+
+func cleanComment(in []byte) ([]byte, bool) {
+ i := bytes.IndexAny(in, "#;")
+ if i == -1 {
+ return nil, false
+ }
+ return in[i:], true
+}
+
+func readKeyName(delimiters string, in []byte) (string, int, error) {
+ line := string(in)
+
+ // Check if key name surrounded by quotes.
+ var keyQuote string
+ if line[0] == '"' {
+ if len(line) > 6 && line[0:3] == `"""` {
+ keyQuote = `"""`
+ } else {
+ keyQuote = `"`
+ }
+ } else if line[0] == '`' {
+ keyQuote = "`"
+ }
+
+ // Get out key name
+ var endIdx int
+ if len(keyQuote) > 0 {
+ startIdx := len(keyQuote)
+ // FIXME: fail case -> """"""name"""=value
+ pos := strings.Index(line[startIdx:], keyQuote)
+ if pos == -1 {
+ return "", -1, fmt.Errorf("missing closing key quote: %s", line)
+ }
+ pos += startIdx
+
+ // Find key-value delimiter
+ i := strings.IndexAny(line[pos+startIdx:], delimiters)
+ if i < 0 {
+ return "", -1, ErrDelimiterNotFound{line}
+ }
+ endIdx = pos + i
+ return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil
+ }
+
+ endIdx = strings.IndexAny(line, delimiters)
+ if endIdx < 0 {
+ return "", -1, ErrDelimiterNotFound{line}
+ }
+ if endIdx == 0 {
+ return "", -1, ErrEmptyKeyName{line}
+ }
+
+ return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil
+}
+
+func (p *parser) readMultilines(line, val, valQuote string) (string, error) {
+ for {
+ data, err := p.readUntil('\n')
+ if err != nil {
+ return "", err
+ }
+ next := string(data)
+
+ pos := strings.LastIndex(next, valQuote)
+ if pos > -1 {
+ val += next[:pos]
+
+ comment, has := cleanComment([]byte(next[pos:]))
+ if has {
+ p.comment.Write(bytes.TrimSpace(comment))
+ }
+ break
+ }
+ val += next
+ if p.isEOF {
+ return "", fmt.Errorf("missing closing key quote from %q to %q", line, next)
+ }
+ }
+ return val, nil
+}
+
+func (p *parser) readContinuationLines(val string) (string, error) {
+ for {
+ data, err := p.readUntil('\n')
+ if err != nil {
+ return "", err
+ }
+ next := strings.TrimSpace(string(data))
+
+ if len(next) == 0 {
+ break
+ }
+ val += next
+ if val[len(val)-1] != '\\' {
+ break
+ }
+ val = val[:len(val)-1]
+ }
+ return val, nil
+}
+
+// hasSurroundedQuote check if and only if the first and last characters
+// are quotes \" or \'.
+// It returns false if any other parts also contain same kind of quotes.
+func hasSurroundedQuote(in string, quote byte) bool {
+ return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote &&
+ strings.IndexByte(in[1:], quote) == len(in)-2
+}
+
+func (p *parser) readValue(in []byte, bufferSize int) (string, error) {
+
+ line := strings.TrimLeftFunc(string(in), unicode.IsSpace)
+ if len(line) == 0 {
+ if p.options.AllowPythonMultilineValues && len(in) > 0 && in[len(in)-1] == '\n' {
+ return p.readPythonMultilines(line, bufferSize)
+ }
+ return "", nil
+ }
+
+ var valQuote string
+ if len(line) > 3 && line[0:3] == `"""` {
+ valQuote = `"""`
+ } else if line[0] == '`' {
+ valQuote = "`"
+ } else if p.options.UnescapeValueDoubleQuotes && line[0] == '"' {
+ valQuote = `"`
+ }
+
+ if len(valQuote) > 0 {
+ startIdx := len(valQuote)
+ pos := strings.LastIndex(line[startIdx:], valQuote)
+ // Check for multi-line value
+ if pos == -1 {
+ return p.readMultilines(line, line[startIdx:], valQuote)
+ }
+
+ if p.options.UnescapeValueDoubleQuotes && valQuote == `"` {
+ return strings.Replace(line[startIdx:pos+startIdx], `\"`, `"`, -1), nil
+ }
+ return line[startIdx : pos+startIdx], nil
+ }
+
+ lastChar := line[len(line)-1]
+ // Won't be able to reach here if value only contains whitespace
+ line = strings.TrimSpace(line)
+ trimmedLastChar := line[len(line)-1]
+
+ // Check continuation lines when desired
+ if !p.options.IgnoreContinuation && trimmedLastChar == '\\' {
+ return p.readContinuationLines(line[:len(line)-1])
+ }
+
+ // Check if ignore inline comment
+ if !p.options.IgnoreInlineComment {
+ var i int
+ if p.options.SpaceBeforeInlineComment {
+ i = strings.Index(line, " #")
+ if i == -1 {
+ i = strings.Index(line, " ;")
+ }
+
+ } else {
+ i = strings.IndexAny(line, "#;")
+ }
+
+ if i > -1 {
+ p.comment.WriteString(line[i:])
+ line = strings.TrimSpace(line[:i])
+ }
+
+ }
+
+ // Trim single and double quotes
+ if (hasSurroundedQuote(line, '\'') ||
+ hasSurroundedQuote(line, '"')) && !p.options.PreserveSurroundedQuote {
+ line = line[1 : len(line)-1]
+ } else if len(valQuote) == 0 && p.options.UnescapeValueCommentSymbols {
+ line = strings.ReplaceAll(line, `\;`, ";")
+ line = strings.ReplaceAll(line, `\#`, "#")
+ } else if p.options.AllowPythonMultilineValues && lastChar == '\n' {
+ return p.readPythonMultilines(line, bufferSize)
+ }
+
+ return line, nil
+}
+
+func (p *parser) readPythonMultilines(line string, bufferSize int) (string, error) {
+ parserBufferPeekResult, _ := p.buf.Peek(bufferSize)
+ peekBuffer := bytes.NewBuffer(parserBufferPeekResult)
+
+ for {
+ peekData, peekErr := peekBuffer.ReadBytes('\n')
+ if peekErr != nil && peekErr != io.EOF {
+ p.debug("readPythonMultilines: failed to peek with error: %v", peekErr)
+ return "", peekErr
+ }
+
+ p.debug("readPythonMultilines: parsing %q", string(peekData))
+
+ peekMatches := pythonMultiline.FindStringSubmatch(string(peekData))
+ p.debug("readPythonMultilines: matched %d parts", len(peekMatches))
+ for n, v := range peekMatches {
+ p.debug(" %d: %q", n, v)
+ }
+
+ // Return if not a Python multiline value.
+ if len(peekMatches) != 3 {
+ p.debug("readPythonMultilines: end of value, got: %q", line)
+ return line, nil
+ }
+
+ // Advance the parser reader (buffer) in-sync with the peek buffer.
+ _, err := p.buf.Discard(len(peekData))
+ if err != nil {
+ p.debug("readPythonMultilines: failed to skip to the end, returning error")
+ return "", err
+ }
+
+ line += "\n" + peekMatches[0]
+ }
+}
+
+// parse parses data through an io.Reader.
+func (f *File) parse(reader io.Reader) (err error) {
+ p := newParser(reader, parserOptions{
+ IgnoreContinuation: f.options.IgnoreContinuation,
+ IgnoreInlineComment: f.options.IgnoreInlineComment,
+ AllowPythonMultilineValues: f.options.AllowPythonMultilineValues,
+ SpaceBeforeInlineComment: f.options.SpaceBeforeInlineComment,
+ UnescapeValueDoubleQuotes: f.options.UnescapeValueDoubleQuotes,
+ UnescapeValueCommentSymbols: f.options.UnescapeValueCommentSymbols,
+ PreserveSurroundedQuote: f.options.PreserveSurroundedQuote,
+ DebugFunc: f.options.DebugFunc,
+ ReaderBufferSize: f.options.ReaderBufferSize,
+ })
+ if err = p.BOM(); err != nil {
+ return fmt.Errorf("BOM: %v", err)
+ }
+
+ // Ignore error because default section name is never empty string.
+ name := DefaultSection
+ if f.options.Insensitive || f.options.InsensitiveSections {
+ name = strings.ToLower(DefaultSection)
+ }
+ section, _ := f.NewSection(name)
+
+ // This "last" is not strictly equivalent to "previous one" if current key is not the first nested key
+ var isLastValueEmpty bool
+ var lastRegularKey *Key
+
+ var line []byte
+ var inUnparseableSection bool
+
+ // NOTE: Iterate and increase `currentPeekSize` until
+ // the size of the parser buffer is found.
+ // TODO(unknwon): When Golang 1.10 is the lowest version supported, replace with `parserBufferSize := p.buf.Size()`.
+ parserBufferSize := 0
+ // NOTE: Peek 4kb at a time.
+ currentPeekSize := minReaderBufferSize
+
+ if f.options.AllowPythonMultilineValues {
+ for {
+ peekBytes, _ := p.buf.Peek(currentPeekSize)
+ peekBytesLength := len(peekBytes)
+
+ if parserBufferSize >= peekBytesLength {
+ break
+ }
+
+ currentPeekSize *= 2
+ parserBufferSize = peekBytesLength
+ }
+ }
+
+ for !p.isEOF {
+ line, err = p.readUntil('\n')
+ if err != nil {
+ return err
+ }
+
+ if f.options.AllowNestedValues &&
+ isLastValueEmpty && len(line) > 0 {
+ if line[0] == ' ' || line[0] == '\t' {
+ err = lastRegularKey.addNestedValue(string(bytes.TrimSpace(line)))
+ if err != nil {
+ return err
+ }
+ continue
+ }
+ }
+
+ line = bytes.TrimLeftFunc(line, unicode.IsSpace)
+ if len(line) == 0 {
+ continue
+ }
+
+ // Comments
+ if line[0] == '#' || line[0] == ';' {
+ // Note: we do not care ending line break,
+ // it is needed for adding second line,
+ // so just clean it once at the end when set to value.
+ p.comment.Write(line)
+ continue
+ }
+
+ // Section
+ if line[0] == '[' {
+ // Read to the next ']' (TODO: support quoted strings)
+ closeIdx := bytes.LastIndexByte(line, ']')
+ if closeIdx == -1 {
+ return fmt.Errorf("unclosed section: %s", line)
+ }
+
+ name := string(line[1:closeIdx])
+ section, err = f.NewSection(name)
+ if err != nil {
+ return err
+ }
+
+ comment, has := cleanComment(line[closeIdx+1:])
+ if has {
+ p.comment.Write(comment)
+ }
+
+ section.Comment = strings.TrimSpace(p.comment.String())
+
+ // Reset auto-counter and comments
+ p.comment.Reset()
+ p.count = 1
+ // Nested values can't span sections
+ isLastValueEmpty = false
+
+ inUnparseableSection = false
+ for i := range f.options.UnparseableSections {
+ if f.options.UnparseableSections[i] == name ||
+ ((f.options.Insensitive || f.options.InsensitiveSections) && strings.EqualFold(f.options.UnparseableSections[i], name)) {
+ inUnparseableSection = true
+ continue
+ }
+ }
+ continue
+ }
+
+ if inUnparseableSection {
+ section.isRawSection = true
+ section.rawBody += string(line)
+ continue
+ }
+
+ kname, offset, err := readKeyName(f.options.KeyValueDelimiters, line)
+ if err != nil {
+ switch {
+ // Treat as boolean key when desired, and whole line is key name.
+ case IsErrDelimiterNotFound(err):
+ switch {
+ case f.options.AllowBooleanKeys:
+ kname, err := p.readValue(line, parserBufferSize)
+ if err != nil {
+ return err
+ }
+ key, err := section.NewBooleanKey(kname)
+ if err != nil {
+ return err
+ }
+ key.Comment = strings.TrimSpace(p.comment.String())
+ p.comment.Reset()
+ continue
+
+ case f.options.SkipUnrecognizableLines:
+ continue
+ }
+ case IsErrEmptyKeyName(err) && f.options.SkipUnrecognizableLines:
+ continue
+ }
+ return err
+ }
+
+ // Auto increment.
+ isAutoIncr := false
+ if kname == "-" {
+ isAutoIncr = true
+ kname = "#" + strconv.Itoa(p.count)
+ p.count++
+ }
+
+ value, err := p.readValue(line[offset:], parserBufferSize)
+ if err != nil {
+ return err
+ }
+ isLastValueEmpty = len(value) == 0
+
+ key, err := section.NewKey(kname, value)
+ if err != nil {
+ return err
+ }
+ key.isAutoIncrement = isAutoIncr
+ key.Comment = strings.TrimSpace(p.comment.String())
+ p.comment.Reset()
+ lastRegularKey = key
+ }
+ return nil
+}
diff --git a/cluster-autoscaler/vendor/gopkg.in/ini.v1/section.go b/cluster-autoscaler/vendor/gopkg.in/ini.v1/section.go
new file mode 100644
index 000000000000..a3615d820b7a
--- /dev/null
+++ b/cluster-autoscaler/vendor/gopkg.in/ini.v1/section.go
@@ -0,0 +1,256 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+)
+
+// Section represents a config section.
+type Section struct {
+ f *File
+ Comment string
+ name string
+ keys map[string]*Key
+ keyList []string
+ keysHash map[string]string
+
+ isRawSection bool
+ rawBody string
+}
+
+func newSection(f *File, name string) *Section {
+ return &Section{
+ f: f,
+ name: name,
+ keys: make(map[string]*Key),
+ keyList: make([]string, 0, 10),
+ keysHash: make(map[string]string),
+ }
+}
+
+// Name returns name of Section.
+func (s *Section) Name() string {
+ return s.name
+}
+
+// Body returns rawBody of Section if the section was marked as unparseable.
+// It still follows the other rules of the INI format surrounding leading/trailing whitespace.
+func (s *Section) Body() string {
+ return strings.TrimSpace(s.rawBody)
+}
+
+// SetBody updates body content only if section is raw.
+func (s *Section) SetBody(body string) {
+ if !s.isRawSection {
+ return
+ }
+ s.rawBody = body
+}
+
+// NewKey creates a new key to given section.
+func (s *Section) NewKey(name, val string) (*Key, error) {
+ if len(name) == 0 {
+ return nil, errors.New("error creating new key: empty key name")
+ } else if s.f.options.Insensitive || s.f.options.InsensitiveKeys {
+ name = strings.ToLower(name)
+ }
+
+ if s.f.BlockMode {
+ s.f.lock.Lock()
+ defer s.f.lock.Unlock()
+ }
+
+ if inSlice(name, s.keyList) {
+ if s.f.options.AllowShadows {
+ if err := s.keys[name].addShadow(val); err != nil {
+ return nil, err
+ }
+ } else {
+ s.keys[name].value = val
+ s.keysHash[name] = val
+ }
+ return s.keys[name], nil
+ }
+
+ s.keyList = append(s.keyList, name)
+ s.keys[name] = newKey(s, name, val)
+ s.keysHash[name] = val
+ return s.keys[name], nil
+}
+
+// NewBooleanKey creates a new boolean type key to given section.
+func (s *Section) NewBooleanKey(name string) (*Key, error) {
+ key, err := s.NewKey(name, "true")
+ if err != nil {
+ return nil, err
+ }
+
+ key.isBooleanType = true
+ return key, nil
+}
+
+// GetKey returns key in section by given name.
+func (s *Section) GetKey(name string) (*Key, error) {
+ if s.f.BlockMode {
+ s.f.lock.RLock()
+ }
+ if s.f.options.Insensitive || s.f.options.InsensitiveKeys {
+ name = strings.ToLower(name)
+ }
+ key := s.keys[name]
+ if s.f.BlockMode {
+ s.f.lock.RUnlock()
+ }
+
+ if key == nil {
+ // Check if it is a child-section.
+ sname := s.name
+ for {
+ if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 {
+ sname = sname[:i]
+ sec, err := s.f.GetSection(sname)
+ if err != nil {
+ continue
+ }
+ return sec.GetKey(name)
+ }
+ break
+ }
+ return nil, fmt.Errorf("error when getting key of section %q: key %q not exists", s.name, name)
+ }
+ return key, nil
+}
+
+// HasKey returns true if section contains a key with given name.
+func (s *Section) HasKey(name string) bool {
+ key, _ := s.GetKey(name)
+ return key != nil
+}
+
+// Deprecated: Use "HasKey" instead.
+func (s *Section) Haskey(name string) bool {
+ return s.HasKey(name)
+}
+
+// HasValue returns true if section contains given raw value.
+func (s *Section) HasValue(value string) bool {
+ if s.f.BlockMode {
+ s.f.lock.RLock()
+ defer s.f.lock.RUnlock()
+ }
+
+ for _, k := range s.keys {
+ if value == k.value {
+ return true
+ }
+ }
+ return false
+}
+
+// Key assumes named Key exists in section and returns a zero-value when not.
+func (s *Section) Key(name string) *Key {
+ key, err := s.GetKey(name)
+ if err != nil {
+ // It's OK here because the only possible error is empty key name,
+ // but if it's empty, this piece of code won't be executed.
+ key, _ = s.NewKey(name, "")
+ return key
+ }
+ return key
+}
+
+// Keys returns list of keys of section.
+func (s *Section) Keys() []*Key {
+ keys := make([]*Key, len(s.keyList))
+ for i := range s.keyList {
+ keys[i] = s.Key(s.keyList[i])
+ }
+ return keys
+}
+
+// ParentKeys returns list of keys of parent section.
+func (s *Section) ParentKeys() []*Key {
+ var parentKeys []*Key
+ sname := s.name
+ for {
+ if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 {
+ sname = sname[:i]
+ sec, err := s.f.GetSection(sname)
+ if err != nil {
+ continue
+ }
+ parentKeys = append(parentKeys, sec.Keys()...)
+ } else {
+ break
+ }
+
+ }
+ return parentKeys
+}
+
+// KeyStrings returns list of key names of section.
+func (s *Section) KeyStrings() []string {
+ list := make([]string, len(s.keyList))
+ copy(list, s.keyList)
+ return list
+}
+
+// KeysHash returns keys hash consisting of names and values.
+func (s *Section) KeysHash() map[string]string {
+ if s.f.BlockMode {
+ s.f.lock.RLock()
+ defer s.f.lock.RUnlock()
+ }
+
+ hash := make(map[string]string, len(s.keysHash))
+ for key, value := range s.keysHash {
+ hash[key] = value
+ }
+ return hash
+}
+
+// DeleteKey deletes a key from section.
+func (s *Section) DeleteKey(name string) {
+ if s.f.BlockMode {
+ s.f.lock.Lock()
+ defer s.f.lock.Unlock()
+ }
+
+ for i, k := range s.keyList {
+ if k == name {
+ s.keyList = append(s.keyList[:i], s.keyList[i+1:]...)
+ delete(s.keys, name)
+ delete(s.keysHash, name)
+ return
+ }
+ }
+}
+
+// ChildSections returns a list of child sections of current section.
+// For example, "[parent.child1]" and "[parent.child12]" are child sections
+// of section "[parent]".
+func (s *Section) ChildSections() []*Section {
+ prefix := s.name + s.f.options.ChildSectionDelimiter
+ children := make([]*Section, 0, 3)
+ for _, name := range s.f.sectionList {
+ if strings.HasPrefix(name, prefix) {
+ children = append(children, s.f.sections[name]...)
+ }
+ }
+ return children
+}
diff --git a/cluster-autoscaler/vendor/gopkg.in/ini.v1/struct.go b/cluster-autoscaler/vendor/gopkg.in/ini.v1/struct.go
new file mode 100644
index 000000000000..a486b2fe0fdc
--- /dev/null
+++ b/cluster-autoscaler/vendor/gopkg.in/ini.v1/struct.go
@@ -0,0 +1,747 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "time"
+ "unicode"
+)
+
+// NameMapper represents a ini tag name mapper.
+type NameMapper func(string) string
+
+// Built-in name getters.
+var (
+ // SnackCase converts to format SNACK_CASE.
+ SnackCase NameMapper = func(raw string) string {
+ newstr := make([]rune, 0, len(raw))
+ for i, chr := range raw {
+ if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
+ if i > 0 {
+ newstr = append(newstr, '_')
+ }
+ }
+ newstr = append(newstr, unicode.ToUpper(chr))
+ }
+ return string(newstr)
+ }
+ // TitleUnderscore converts to format title_underscore.
+ TitleUnderscore NameMapper = func(raw string) string {
+ newstr := make([]rune, 0, len(raw))
+ for i, chr := range raw {
+ if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
+ if i > 0 {
+ newstr = append(newstr, '_')
+ }
+ chr -= 'A' - 'a'
+ }
+ newstr = append(newstr, chr)
+ }
+ return string(newstr)
+ }
+)
+
+func (s *Section) parseFieldName(raw, actual string) string {
+ if len(actual) > 0 {
+ return actual
+ }
+ if s.f.NameMapper != nil {
+ return s.f.NameMapper(raw)
+ }
+ return raw
+}
+
+func parseDelim(actual string) string {
+ if len(actual) > 0 {
+ return actual
+ }
+ return ","
+}
+
+var reflectTime = reflect.TypeOf(time.Now()).Kind()
+
+// setSliceWithProperType sets proper values to slice based on its type.
+func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error {
+ var strs []string
+ if allowShadow {
+ strs = key.StringsWithShadows(delim)
+ } else {
+ strs = key.Strings(delim)
+ }
+
+ numVals := len(strs)
+ if numVals == 0 {
+ return nil
+ }
+
+ var vals interface{}
+ var err error
+
+ sliceOf := field.Type().Elem().Kind()
+ switch sliceOf {
+ case reflect.String:
+ vals = strs
+ case reflect.Int:
+ vals, err = key.parseInts(strs, true, false)
+ case reflect.Int64:
+ vals, err = key.parseInt64s(strs, true, false)
+ case reflect.Uint:
+ vals, err = key.parseUints(strs, true, false)
+ case reflect.Uint64:
+ vals, err = key.parseUint64s(strs, true, false)
+ case reflect.Float64:
+ vals, err = key.parseFloat64s(strs, true, false)
+ case reflect.Bool:
+ vals, err = key.parseBools(strs, true, false)
+ case reflectTime:
+ vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false)
+ default:
+ return fmt.Errorf("unsupported type '[]%s'", sliceOf)
+ }
+ if err != nil && isStrict {
+ return err
+ }
+
+ slice := reflect.MakeSlice(field.Type(), numVals, numVals)
+ for i := 0; i < numVals; i++ {
+ switch sliceOf {
+ case reflect.String:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i]))
+ case reflect.Int:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i]))
+ case reflect.Int64:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i]))
+ case reflect.Uint:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i]))
+ case reflect.Uint64:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i]))
+ case reflect.Float64:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i]))
+ case reflect.Bool:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]bool)[i]))
+ case reflectTime:
+ slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i]))
+ }
+ }
+ field.Set(slice)
+ return nil
+}
+
+func wrapStrictError(err error, isStrict bool) error {
+ if isStrict {
+ return err
+ }
+ return nil
+}
+
+// setWithProperType sets proper value to field based on its type,
+// but it does not return error for failing parsing,
+// because we want to use default value that is already assigned to struct.
+func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error {
+ vt := t
+ isPtr := t.Kind() == reflect.Ptr
+ if isPtr {
+ vt = t.Elem()
+ }
+ switch vt.Kind() {
+ case reflect.String:
+ stringVal := key.String()
+ if isPtr {
+ field.Set(reflect.ValueOf(&stringVal))
+ } else if len(stringVal) > 0 {
+ field.SetString(key.String())
+ }
+ case reflect.Bool:
+ boolVal, err := key.Bool()
+ if err != nil {
+ return wrapStrictError(err, isStrict)
+ }
+ if isPtr {
+ field.Set(reflect.ValueOf(&boolVal))
+ } else {
+ field.SetBool(boolVal)
+ }
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ // ParseDuration will not return err for `0`, so check the type name
+ if vt.Name() == "Duration" {
+ durationVal, err := key.Duration()
+ if err != nil {
+ if intVal, err := key.Int64(); err == nil {
+ field.SetInt(intVal)
+ return nil
+ }
+ return wrapStrictError(err, isStrict)
+ }
+ if isPtr {
+ field.Set(reflect.ValueOf(&durationVal))
+ } else if int64(durationVal) > 0 {
+ field.Set(reflect.ValueOf(durationVal))
+ }
+ return nil
+ }
+
+ intVal, err := key.Int64()
+ if err != nil {
+ return wrapStrictError(err, isStrict)
+ }
+ if isPtr {
+ pv := reflect.New(t.Elem())
+ pv.Elem().SetInt(intVal)
+ field.Set(pv)
+ } else {
+ field.SetInt(intVal)
+ }
+ // byte is an alias for uint8, so supporting uint8 breaks support for byte
+ case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ durationVal, err := key.Duration()
+ // Skip zero value
+ if err == nil && uint64(durationVal) > 0 {
+ if isPtr {
+ field.Set(reflect.ValueOf(&durationVal))
+ } else {
+ field.Set(reflect.ValueOf(durationVal))
+ }
+ return nil
+ }
+
+ uintVal, err := key.Uint64()
+ if err != nil {
+ return wrapStrictError(err, isStrict)
+ }
+ if isPtr {
+ pv := reflect.New(t.Elem())
+ pv.Elem().SetUint(uintVal)
+ field.Set(pv)
+ } else {
+ field.SetUint(uintVal)
+ }
+
+ case reflect.Float32, reflect.Float64:
+ floatVal, err := key.Float64()
+ if err != nil {
+ return wrapStrictError(err, isStrict)
+ }
+ if isPtr {
+ pv := reflect.New(t.Elem())
+ pv.Elem().SetFloat(floatVal)
+ field.Set(pv)
+ } else {
+ field.SetFloat(floatVal)
+ }
+ case reflectTime:
+ timeVal, err := key.Time()
+ if err != nil {
+ return wrapStrictError(err, isStrict)
+ }
+ if isPtr {
+ field.Set(reflect.ValueOf(&timeVal))
+ } else {
+ field.Set(reflect.ValueOf(timeVal))
+ }
+ case reflect.Slice:
+ return setSliceWithProperType(key, field, delim, allowShadow, isStrict)
+ default:
+ return fmt.Errorf("unsupported type %q", t)
+ }
+ return nil
+}
+
+func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool, allowNonUnique bool, extends bool) {
+ opts := strings.SplitN(tag, ",", 5)
+ rawName = opts[0]
+ for _, opt := range opts[1:] {
+ omitEmpty = omitEmpty || (opt == "omitempty")
+ allowShadow = allowShadow || (opt == "allowshadow")
+ allowNonUnique = allowNonUnique || (opt == "nonunique")
+ extends = extends || (opt == "extends")
+ }
+ return rawName, omitEmpty, allowShadow, allowNonUnique, extends
+}
+
+// mapToField maps the given value to the matching field of the given section.
+// The sectionIndex is the index (if non unique sections are enabled) to which the value should be added.
+func (s *Section) mapToField(val reflect.Value, isStrict bool, sectionIndex int, sectionName string) error {
+ if val.Kind() == reflect.Ptr {
+ val = val.Elem()
+ }
+ typ := val.Type()
+
+ for i := 0; i < typ.NumField(); i++ {
+ field := val.Field(i)
+ tpField := typ.Field(i)
+
+ tag := tpField.Tag.Get("ini")
+ if tag == "-" {
+ continue
+ }
+
+ rawName, _, allowShadow, allowNonUnique, extends := parseTagOptions(tag)
+ fieldName := s.parseFieldName(tpField.Name, rawName)
+ if len(fieldName) == 0 || !field.CanSet() {
+ continue
+ }
+
+ isStruct := tpField.Type.Kind() == reflect.Struct
+ isStructPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct
+ isAnonymousPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous
+ if isAnonymousPtr {
+ field.Set(reflect.New(tpField.Type.Elem()))
+ }
+
+ if extends && (isAnonymousPtr || (isStruct && tpField.Anonymous)) {
+ if isStructPtr && field.IsNil() {
+ field.Set(reflect.New(tpField.Type.Elem()))
+ }
+ fieldSection := s
+ if rawName != "" {
+ sectionName = s.name + s.f.options.ChildSectionDelimiter + rawName
+ if secs, err := s.f.SectionsByName(sectionName); err == nil && sectionIndex < len(secs) {
+ fieldSection = secs[sectionIndex]
+ }
+ }
+ if err := fieldSection.mapToField(field, isStrict, sectionIndex, sectionName); err != nil {
+ return fmt.Errorf("map to field %q: %v", fieldName, err)
+ }
+ } else if isAnonymousPtr || isStruct || isStructPtr {
+ if secs, err := s.f.SectionsByName(fieldName); err == nil {
+ if len(secs) <= sectionIndex {
+ return fmt.Errorf("there are not enough sections (%d <= %d) for the field %q", len(secs), sectionIndex, fieldName)
+ }
+ // Only set the field to non-nil struct value if we have a section for it.
+ // Otherwise, we end up with a non-nil struct ptr even though there is no data.
+ if isStructPtr && field.IsNil() {
+ field.Set(reflect.New(tpField.Type.Elem()))
+ }
+ if err = secs[sectionIndex].mapToField(field, isStrict, sectionIndex, fieldName); err != nil {
+ return fmt.Errorf("map to field %q: %v", fieldName, err)
+ }
+ continue
+ }
+ }
+
+ // Map non-unique sections
+ if allowNonUnique && tpField.Type.Kind() == reflect.Slice {
+ newField, err := s.mapToSlice(fieldName, field, isStrict)
+ if err != nil {
+ return fmt.Errorf("map to slice %q: %v", fieldName, err)
+ }
+
+ field.Set(newField)
+ continue
+ }
+
+ if key, err := s.GetKey(fieldName); err == nil {
+ delim := parseDelim(tpField.Tag.Get("delim"))
+ if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil {
+ return fmt.Errorf("set field %q: %v", fieldName, err)
+ }
+ }
+ }
+ return nil
+}
+
+// mapToSlice maps all sections with the same name and returns the new value.
+// The type of the Value must be a slice.
+func (s *Section) mapToSlice(secName string, val reflect.Value, isStrict bool) (reflect.Value, error) {
+ secs, err := s.f.SectionsByName(secName)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+
+ typ := val.Type().Elem()
+ for i, sec := range secs {
+ elem := reflect.New(typ)
+ if err = sec.mapToField(elem, isStrict, i, sec.name); err != nil {
+ return reflect.Value{}, fmt.Errorf("map to field from section %q: %v", secName, err)
+ }
+
+ val = reflect.Append(val, elem.Elem())
+ }
+ return val, nil
+}
+
+// mapTo maps a section to object v.
+func (s *Section) mapTo(v interface{}, isStrict bool) error {
+ typ := reflect.TypeOf(v)
+ val := reflect.ValueOf(v)
+ if typ.Kind() == reflect.Ptr {
+ typ = typ.Elem()
+ val = val.Elem()
+ } else {
+ return errors.New("not a pointer to a struct")
+ }
+
+ if typ.Kind() == reflect.Slice {
+ newField, err := s.mapToSlice(s.name, val, isStrict)
+ if err != nil {
+ return err
+ }
+
+ val.Set(newField)
+ return nil
+ }
+
+ return s.mapToField(val, isStrict, 0, s.name)
+}
+
+// MapTo maps section to given struct.
+func (s *Section) MapTo(v interface{}) error {
+ return s.mapTo(v, false)
+}
+
+// StrictMapTo maps section to given struct in strict mode,
+// which returns all possible error including value parsing error.
+func (s *Section) StrictMapTo(v interface{}) error {
+ return s.mapTo(v, true)
+}
+
+// MapTo maps file to given struct.
+func (f *File) MapTo(v interface{}) error {
+ return f.Section("").MapTo(v)
+}
+
+// StrictMapTo maps file to given struct in strict mode,
+// which returns all possible error including value parsing error.
+func (f *File) StrictMapTo(v interface{}) error {
+ return f.Section("").StrictMapTo(v)
+}
+
+// MapToWithMapper maps data sources to given struct with name mapper.
+func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
+ cfg, err := Load(source, others...)
+ if err != nil {
+ return err
+ }
+ cfg.NameMapper = mapper
+ return cfg.MapTo(v)
+}
+
+// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode,
+// which returns all possible error including value parsing error.
+func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
+ cfg, err := Load(source, others...)
+ if err != nil {
+ return err
+ }
+ cfg.NameMapper = mapper
+ return cfg.StrictMapTo(v)
+}
+
+// MapTo maps data sources to given struct.
+func MapTo(v, source interface{}, others ...interface{}) error {
+ return MapToWithMapper(v, nil, source, others...)
+}
+
+// StrictMapTo maps data sources to given struct in strict mode,
+// which returns all possible error including value parsing error.
+func StrictMapTo(v, source interface{}, others ...interface{}) error {
+ return StrictMapToWithMapper(v, nil, source, others...)
+}
+
+// reflectSliceWithProperType does the opposite thing as setSliceWithProperType.
+func reflectSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow bool) error {
+ slice := field.Slice(0, field.Len())
+ if field.Len() == 0 {
+ return nil
+ }
+ sliceOf := field.Type().Elem().Kind()
+
+ if allowShadow {
+ var keyWithShadows *Key
+ for i := 0; i < field.Len(); i++ {
+ var val string
+ switch sliceOf {
+ case reflect.String:
+ val = slice.Index(i).String()
+ case reflect.Int, reflect.Int64:
+ val = fmt.Sprint(slice.Index(i).Int())
+ case reflect.Uint, reflect.Uint64:
+ val = fmt.Sprint(slice.Index(i).Uint())
+ case reflect.Float64:
+ val = fmt.Sprint(slice.Index(i).Float())
+ case reflect.Bool:
+ val = fmt.Sprint(slice.Index(i).Bool())
+ case reflectTime:
+ val = slice.Index(i).Interface().(time.Time).Format(time.RFC3339)
+ default:
+ return fmt.Errorf("unsupported type '[]%s'", sliceOf)
+ }
+
+ if i == 0 {
+ keyWithShadows = newKey(key.s, key.name, val)
+ } else {
+ _ = keyWithShadows.AddShadow(val)
+ }
+ }
+ *key = *keyWithShadows
+ return nil
+ }
+
+ var buf bytes.Buffer
+ for i := 0; i < field.Len(); i++ {
+ switch sliceOf {
+ case reflect.String:
+ buf.WriteString(slice.Index(i).String())
+ case reflect.Int, reflect.Int64:
+ buf.WriteString(fmt.Sprint(slice.Index(i).Int()))
+ case reflect.Uint, reflect.Uint64:
+ buf.WriteString(fmt.Sprint(slice.Index(i).Uint()))
+ case reflect.Float64:
+ buf.WriteString(fmt.Sprint(slice.Index(i).Float()))
+ case reflect.Bool:
+ buf.WriteString(fmt.Sprint(slice.Index(i).Bool()))
+ case reflectTime:
+ buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339))
+ default:
+ return fmt.Errorf("unsupported type '[]%s'", sliceOf)
+ }
+ buf.WriteString(delim)
+ }
+ key.SetValue(buf.String()[:buf.Len()-len(delim)])
+ return nil
+}
+
+// reflectWithProperType does the opposite thing as setWithProperType.
+func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow bool) error {
+ switch t.Kind() {
+ case reflect.String:
+ key.SetValue(field.String())
+ case reflect.Bool:
+ key.SetValue(fmt.Sprint(field.Bool()))
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ key.SetValue(fmt.Sprint(field.Int()))
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ key.SetValue(fmt.Sprint(field.Uint()))
+ case reflect.Float32, reflect.Float64:
+ key.SetValue(fmt.Sprint(field.Float()))
+ case reflectTime:
+ key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339)))
+ case reflect.Slice:
+ return reflectSliceWithProperType(key, field, delim, allowShadow)
+ case reflect.Ptr:
+ if !field.IsNil() {
+ return reflectWithProperType(t.Elem(), key, field.Elem(), delim, allowShadow)
+ }
+ default:
+ return fmt.Errorf("unsupported type %q", t)
+ }
+ return nil
+}
+
+// CR: copied from encoding/json/encode.go with modifications of time.Time support.
+// TODO: add more test coverage.
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ case reflectTime:
+ t, ok := v.Interface().(time.Time)
+ return ok && t.IsZero()
+ }
+ return false
+}
+
+// StructReflector is the interface implemented by struct types that can extract themselves into INI objects.
+type StructReflector interface {
+ ReflectINIStruct(*File) error
+}
+
+func (s *Section) reflectFrom(val reflect.Value) error {
+ if val.Kind() == reflect.Ptr {
+ val = val.Elem()
+ }
+ typ := val.Type()
+
+ for i := 0; i < typ.NumField(); i++ {
+ if !val.Field(i).CanInterface() {
+ continue
+ }
+
+ field := val.Field(i)
+ tpField := typ.Field(i)
+
+ tag := tpField.Tag.Get("ini")
+ if tag == "-" {
+ continue
+ }
+
+ rawName, omitEmpty, allowShadow, allowNonUnique, extends := parseTagOptions(tag)
+ if omitEmpty && isEmptyValue(field) {
+ continue
+ }
+
+ if r, ok := field.Interface().(StructReflector); ok {
+ return r.ReflectINIStruct(s.f)
+ }
+
+ fieldName := s.parseFieldName(tpField.Name, rawName)
+ if len(fieldName) == 0 || !field.CanSet() {
+ continue
+ }
+
+ if extends && tpField.Anonymous && (tpField.Type.Kind() == reflect.Ptr || tpField.Type.Kind() == reflect.Struct) {
+ if err := s.reflectFrom(field); err != nil {
+ return fmt.Errorf("reflect from field %q: %v", fieldName, err)
+ }
+ continue
+ }
+
+ if (tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct) ||
+ (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") {
+ // Note: The only error here is section doesn't exist.
+ sec, err := s.f.GetSection(fieldName)
+ if err != nil {
+ // Note: fieldName can never be empty here, ignore error.
+ sec, _ = s.f.NewSection(fieldName)
+ }
+
+ // Add comment from comment tag
+ if len(sec.Comment) == 0 {
+ sec.Comment = tpField.Tag.Get("comment")
+ }
+
+ if err = sec.reflectFrom(field); err != nil {
+ return fmt.Errorf("reflect from field %q: %v", fieldName, err)
+ }
+ continue
+ }
+
+ if allowNonUnique && tpField.Type.Kind() == reflect.Slice {
+ slice := field.Slice(0, field.Len())
+ if field.Len() == 0 {
+ return nil
+ }
+ sliceOf := field.Type().Elem().Kind()
+
+ for i := 0; i < field.Len(); i++ {
+ if sliceOf != reflect.Struct && sliceOf != reflect.Ptr {
+ return fmt.Errorf("field %q is not a slice of pointer or struct", fieldName)
+ }
+
+ sec, err := s.f.NewSection(fieldName)
+ if err != nil {
+ return err
+ }
+
+ // Add comment from comment tag
+ if len(sec.Comment) == 0 {
+ sec.Comment = tpField.Tag.Get("comment")
+ }
+
+ if err := sec.reflectFrom(slice.Index(i)); err != nil {
+ return fmt.Errorf("reflect from field %q: %v", fieldName, err)
+ }
+ }
+ continue
+ }
+
+ // Note: Same reason as section.
+ key, err := s.GetKey(fieldName)
+ if err != nil {
+ key, _ = s.NewKey(fieldName, "")
+ }
+
+ // Add comment from comment tag
+ if len(key.Comment) == 0 {
+ key.Comment = tpField.Tag.Get("comment")
+ }
+
+ delim := parseDelim(tpField.Tag.Get("delim"))
+ if err = reflectWithProperType(tpField.Type, key, field, delim, allowShadow); err != nil {
+ return fmt.Errorf("reflect field %q: %v", fieldName, err)
+ }
+
+ }
+ return nil
+}
+
+// ReflectFrom reflects section from given struct. It overwrites existing ones.
+func (s *Section) ReflectFrom(v interface{}) error {
+ typ := reflect.TypeOf(v)
+ val := reflect.ValueOf(v)
+
+ if s.name != DefaultSection && s.f.options.AllowNonUniqueSections &&
+ (typ.Kind() == reflect.Slice || typ.Kind() == reflect.Ptr) {
+ // Clear sections to make sure none exists before adding the new ones
+ s.f.DeleteSection(s.name)
+
+ if typ.Kind() == reflect.Ptr {
+ sec, err := s.f.NewSection(s.name)
+ if err != nil {
+ return err
+ }
+ return sec.reflectFrom(val.Elem())
+ }
+
+ slice := val.Slice(0, val.Len())
+ sliceOf := val.Type().Elem().Kind()
+ if sliceOf != reflect.Ptr {
+ return fmt.Errorf("not a slice of pointers")
+ }
+
+ for i := 0; i < slice.Len(); i++ {
+ sec, err := s.f.NewSection(s.name)
+ if err != nil {
+ return err
+ }
+
+ err = sec.reflectFrom(slice.Index(i))
+ if err != nil {
+ return fmt.Errorf("reflect from %dth field: %v", i, err)
+ }
+ }
+
+ return nil
+ }
+
+ if typ.Kind() == reflect.Ptr {
+ val = val.Elem()
+ } else {
+ return errors.New("not a pointer to a struct")
+ }
+
+ return s.reflectFrom(val)
+}
+
+// ReflectFrom reflects file from given struct.
+func (f *File) ReflectFrom(v interface{}) error {
+ return f.Section("").ReflectFrom(v)
+}
+
+// ReflectFromWithMapper reflects data sources from given struct with name mapper.
+func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error {
+ cfg.NameMapper = mapper
+ return cfg.ReflectFrom(v)
+}
+
+// ReflectFrom reflects data sources from given struct.
+func ReflectFrom(cfg *File, v interface{}) error {
+ return ReflectFromWithMapper(cfg, v, nil)
+}
diff --git a/cluster-autoscaler/vendor/modules.txt b/cluster-autoscaler/vendor/modules.txt
index 64e8c16e27f5..b14523b91c9b 100644
--- a/cluster-autoscaler/vendor/modules.txt
+++ b/cluster-autoscaler/vendor/modules.txt
@@ -582,6 +582,23 @@ github.com/spf13/cobra
# github.com/spf13/pflag v1.0.5
## explicit; go 1.12
github.com/spf13/pflag
+# github.com/spotinst/spotinst-sdk-go v1.181.0
+## explicit; go 1.20
+github.com/spotinst/spotinst-sdk-go/service/elastigroup
+github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/aws
+github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure
+github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/azure/v3
+github.com/spotinst/spotinst-sdk-go/service/elastigroup/providers/gcp
+github.com/spotinst/spotinst-sdk-go/spotinst
+github.com/spotinst/spotinst-sdk-go/spotinst/client
+github.com/spotinst/spotinst-sdk-go/spotinst/credentials
+github.com/spotinst/spotinst-sdk-go/spotinst/featureflag
+github.com/spotinst/spotinst-sdk-go/spotinst/log
+github.com/spotinst/spotinst-sdk-go/spotinst/session
+github.com/spotinst/spotinst-sdk-go/spotinst/util/jsonutil
+github.com/spotinst/spotinst-sdk-go/spotinst/util/stringutil
+github.com/spotinst/spotinst-sdk-go/spotinst/util/uritemplates
+github.com/spotinst/spotinst-sdk-go/spotinst/util/useragent
# github.com/stretchr/objx v0.5.0
## explicit; go 1.12
github.com/stretchr/objx
@@ -965,6 +982,9 @@ gopkg.in/gcfg.v1/types
# gopkg.in/inf.v0 v0.9.1
## explicit
gopkg.in/inf.v0
+# gopkg.in/ini.v1 v1.67.0
+## explicit
+gopkg.in/ini.v1
# gopkg.in/natefinch/lumberjack.v2 v2.0.0
## explicit
gopkg.in/natefinch/lumberjack.v2